浏览代码

Merge r1337003 through r1346681 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1346682 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 年之前
父节点
当前提交
4e2a3935b1
共有 100 个文件被更改,包括 4435 次插入8365 次删除
  1. 1 1
      BUILDING.txt
  2. 1 1
      dev-support/test-patch.sh
  3. 4 1
      hadoop-assemblies/pom.xml
  4. 0 4
      hadoop-client/pom.xml
  5. 4 1
      hadoop-common-project/hadoop-annotations/pom.xml
  6. 4 1
      hadoop-common-project/hadoop-auth-examples/pom.xml
  7. 4 1
      hadoop-common-project/hadoop-auth/pom.xml
  8. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  9. 138 12
      hadoop-common-project/hadoop-common/CHANGES.txt
  10. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  11. 10 7
      hadoop-common-project/hadoop-common/pom.xml
  12. 4 4
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  13. 11 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
  14. 77 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  15. 3 3
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  16. 36 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  17. 16 22
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
  18. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  19. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
  20. 61 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  21. 8 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  22. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  23. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  24. 151 44
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  25. 15 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  26. 206 51
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  27. 29 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  28. 9 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
  29. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
  30. 199 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java
  31. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  32. 101 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java
  33. 98 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
  34. 579 96
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  35. 34 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
  36. 29 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
  37. 90 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
  38. 18 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java
  39. 88 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
  40. 25 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  41. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
  42. 20 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  43. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  44. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java
  45. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  46. 13 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  47. 27 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  48. 57 18
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  49. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  50. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
  51. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
  52. 65 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
  53. 6 2
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
  54. 6 0
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml
  55. 3 3
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
  56. 12 0
      hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
  57. 23 4
      hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
  58. 29 13
      hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
  59. 15 1
      hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto
  60. 70 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  61. 32 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  62. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml
  63. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  64. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
  65. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  66. 14 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  67. 2 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
  68. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
  69. 72 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java
  70. 17 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
  71. 452 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
  72. 71 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
  73. 52 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java
  74. 319 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
  75. 80 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
  76. 65 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
  77. 20 13
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
  78. 135 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java
  79. 385 256
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
  80. 156 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java
  81. 13 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
  82. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java
  83. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
  84. 19 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
  85. 2 0
      hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
  86. 0 400
      hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj
  87. 0 41
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in
  88. 0 5117
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure
  89. 0 65
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac
  90. 0 59
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c
  91. 0 111
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c
  92. 0 59
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in
  93. 0 68
      hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml
  94. 0 599
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
  95. 0 537
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
  96. 0 86
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java
  97. 0 204
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
  98. 0 77
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java
  99. 0 159
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java
  100. 0 90
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java

+ 1 - 1
BUILDING.txt

@@ -9,7 +9,7 @@ Requirements:
 * Forrest 0.8 (if generating docs)
 * Forrest 0.8 (if generating docs)
 * Findbugs 1.3.9 (if running findbugs)
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
 * ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
-* Autotools (if compiling native code)
+* CMake 2.6 or newer (if compiling native code)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 
 ----------------------------------------------------------------------------------
 ----------------------------------------------------------------------------------

+ 1 - 1
dev-support/test-patch.sh

@@ -601,7 +601,7 @@ checkFindbugsWarnings () {
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html
     if [[ $newFindbugsWarnings > 0 ]] ; then
     if [[ $newFindbugsWarnings > 0 ]] ; then
-      JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/$(basename $BASEDIR)/patchprocess/newPatchFindbugsWarnings${module_suffix}.html
+      JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/patchprocess/newPatchFindbugsWarnings${module_suffix}.html
 $JIRA_COMMENT_FOOTER"
 $JIRA_COMMENT_FOOTER"
     fi
     fi
   done
   done

+ 4 - 1
hadoop-assemblies/pom.xml

@@ -15,7 +15,10 @@
    See the License for the specific language governing permissions and
    See the License for the specific language governing permissions and
    limitations under the License.
    limitations under the License.
 -->
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>

+ 0 - 4
hadoop-client/pom.xml

@@ -107,10 +107,6 @@
           <groupId>org.aspectj</groupId>
           <groupId>org.aspectj</groupId>
           <artifactId>aspectjrt</artifactId>
           <artifactId>aspectjrt</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>org.apache.avro</groupId>
-          <artifactId>avro</artifactId>
-        </exclusion>
         <exclusion>
         <exclusion>
           <groupId>org.apache.avro</groupId>
           <groupId>org.apache.avro</groupId>
           <artifactId>avro-ipc</artifactId>
           <artifactId>avro-ipc</artifactId>

+ 4 - 1
hadoop-common-project/hadoop-annotations/pom.xml

@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
   limitations under the License. See accompanying LICENSE file.
 -->
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>

+ 4 - 1
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -12,7 +12,10 @@
  See the License for the specific language governing permissions and
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
  limitations under the License. See accompanying LICENSE file.
 -->
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>

+ 4 - 1
hadoop-common-project/hadoop-auth/pom.xml

@@ -12,7 +12,10 @@
  See the License for the specific language governing permissions and
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
  limitations under the License. See accompanying LICENSE file.
 -->
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -84,7 +84,7 @@ public class KerberosName {
     try {
     try {
       defaultRealm = KerberosUtil.getDefaultRealm();
       defaultRealm = KerberosUtil.getDefaultRealm();
     } catch (Exception ke) {
     } catch (Exception ke) {
-        LOG.warn("Kerberos krb5 configuration not found, setting default realm to empty");
+        LOG.debug("Kerberos krb5 configuration not found, setting default realm to empty");
         defaultRealm="";
         defaultRealm="";
     }
     }
   }
   }

+ 138 - 12
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -12,6 +12,9 @@ Trunk (unreleased changes)
     HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
     HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
     Robinson via atm)
     Robinson via atm)
 
 
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -63,10 +66,22 @@ Trunk (unreleased changes)
 
 
     HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
     HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
 
 
-    HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
-
     HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
     HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
 
 
+    HADOOP-8297. Writable javadocs don't carry default constructor (harsh)
+
+    HADOOP-8360. empty-configuration.xml fails xml validation
+    (Radim Kolar via harsh)
+
+    HADOOP-8367 Improve documentation of declaringClassProtocolName in rpc headers 
+                (Sanjay Radia)
+
+    HADOOP-8415. Add getDouble() and setDouble() in
+    org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
+
+    HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
+    filesystems (harsh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -135,19 +150,129 @@ Trunk (unreleased changes)
     HADOOP-8375. test-patch should stop immediately once it has found
     HADOOP-8375. test-patch should stop immediately once it has found
     compilation errors (bobby)
     compilation errors (bobby)
 
 
+    HADOOP-8395. Text shell command unnecessarily demands that a
+    SequenceFile's key class be WritableComparable (harsh)
+
+    HADOOP-8413. test-patch.sh gives out the wrong links for
+    newPatchFindbugsWarnings (Colin Patrick McCabe via bobby)
+
+    HADOOP-6871. When the value of a configuration key is set to its
+    unresolved form, it causes the IllegalStateException in
+    Configuration.get() stating that substitution depth is too large.
+    (Arvind Prabhakar via harsh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
 
-Release 2.0.0 - UNRELEASED 
+  BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
-  INCOMPATIBLE CHANGES
+    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
+    correctly (todd)
+    
+    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
+    
+    HADOOP-8215. Security support for ZK Failover controller (todd)
+    
+    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
+    
+    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
+    error (todd)
+    
+    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
+    class (todd)
+    
+    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
+    
+    HADOOP-8247. Add a config to enable auto-HA, which disables manual
+    FailoverController (todd)
+    
+    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
+    
+    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
+    enabled. (todd)
+    
+    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
+    (todd via eli)
+    
+    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
 
 
-    HADOOP-7920. Remove Avro Rpc. (suresh)
+Release 2.0.1-alpha - UNRELEASED
+
+  INCOMPATIBLE CHANGES
 
 
     HADOOP-8388. Remove unused BlockLocation serialization.
     HADOOP-8388. Remove unused BlockLocation serialization.
     (Colin Patrick McCabe via eli)
     (Colin Patrick McCabe via eli)
 
 
+  NEW FEATURES
+ 
+  IMPROVEMENTS
+
+    HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
+    final release. (todd)
+
+    HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts.
+    (Tomohiko Kinebuchi via eli)
+
+    HADOOP-8398. Cleanup BlockLocation. (eli)
+
+    HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
+    methods that don't take a Path argument. (eli)
+
+    HADOOP-8323. Add javadoc and tests for Text.clear() behavior (harsh)
+
+    HADOOP-8358. Config-related WARN for dfs.web.ugi can be avoided. (harsh)
+
+    HADOOP-8450. Remove src/test/system. (eli)
+
+  BUG FIXES
+
+    HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
+    starting with a numeric character. (Junping Du via suresh)
+
+    HADOOP-8393. hadoop-config.sh missing variable exports, causes Yarn jobs to fail with ClassNotFoundException MRAppMaster. (phunt via tucu)
+
+    HADOOP-8316. Audit logging should be disabled by default. (eli)
+
+    HADOOP-8400. All commands warn "Kerberos krb5 configuration not found" when security is not enabled. (tucu)
+
+    HADOOP-8406. CompressionCodecFactory.CODEC_PROVIDERS iteration is
+    thread-unsafe (todd)
+
+    HADOOP-8287. etc/hadoop is missing hadoop-env.sh (eli)
+
+    HADOOP-8408. MR doesn't work with a non-default ViewFS mount table
+    and security enabled. (atm via eli)
+
+    HADOOP-8329. Build fails with Java 7. (eli)
+
+    HADOOP-8268. A few pom.xml across Hadoop project
+    may fail XML validation. (Radim Kolar via harsh)
+
+    HADOOP-8444. Fix the tests FSMainOperationsBaseTest.java and
+    FileContextMainOperationsBaseTest.java to avoid potential
+    test failure (Madhukara Phatak via harsh)
+
+    HADOOP-8452. DN logs backtrace when running under jsvc and /jmx is loaded 
+    (Andy Isaacson via bobby)
+
+    HADOOP-8460. Document proper setting of HADOOP_PID_DIR and 
+    HADOOP_SECURE_DN_PID_DIR (bobby)
+
+    HADOOP-8466. hadoop-client POM incorrectly excludes avro. (bmahe via tucu)
+
+    HADOOP-8481. update BUILDING.txt to talk about cmake rather than autotools.
+    (Colin Patrick McCabe via eli)
+
+Release 2.0.0-alpha - 05-23-2012
+
+  INCOMPATIBLE CHANGES
+
+    HADOOP-7920. Remove Avro Rpc. (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-7773. Add support for protocol buffer based RPC engine.
     HADOOP-7773. Add support for protocol buffer based RPC engine.
@@ -305,11 +430,15 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8356. FileSystem service loading mechanism should print the FileSystem 
     HADOOP-8356. FileSystem service loading mechanism should print the FileSystem 
     impl it is failing to load (tucu)
     impl it is failing to load (tucu)
 
 
-    HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
-    final release. (todd)
+    HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop.
+    (Roman Shaposhnik via atm)
 
 
-    HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
-    (Colin Patrick McCabe via eli)
+    HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too
+    (not just MapReduce). Contributed by Eugene Koontz.
+
+    HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+
+    HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -444,9 +573,6 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8359. Fix javadoc warnings in Configuration.  (Anupam Seth via
     HADOOP-8359. Fix javadoc warnings in Configuration.  (Anupam Seth via
     szetszwo)
     szetszwo)
 
 
-    HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
-    starting with a numeric character. (Junping Du via suresh)
-
   BREAKDOWN OF HADOOP-7454 SUBTASKS
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -290,5 +290,9 @@
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
     </Match>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
+    </Match>
 
 
  </FindBugsFilter>
  </FindBugsFilter>

+ 10 - 7
hadoop-common-project/hadoop-common/pom.xml

@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
   limitations under the License. See accompanying LICENSE file.
 -->
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
@@ -340,7 +343,7 @@
                 <echo file="target/compile-proto.sh">
                 <echo file="target/compile-proto.sh">
                     PROTO_DIR=src/main/proto
                     PROTO_DIR=src/main/proto
                     JAVA_DIR=target/generated-sources/java
                     JAVA_DIR=target/generated-sources/java
-                    which cygpath 2> /dev/null
+                    which cygpath 2&gt; /dev/null
                     if [ $? = 1 ]; then
                     if [ $? = 1 ]; then
                       IS_WIN=false
                       IS_WIN=false
                     else
                     else
@@ -348,8 +351,8 @@
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                     fi
                     fi
-                    mkdir -p $JAVA_DIR 2> /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+                    mkdir -p $JAVA_DIR 2&gt; /dev/null
+                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
                     do
                     do
                         if [ "$IS_WIN" = "true" ]; then
                         if [ "$IS_WIN" = "true" ]; then
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
@@ -375,7 +378,7 @@
                 <echo file="target/compile-test-proto.sh">
                 <echo file="target/compile-test-proto.sh">
                     PROTO_DIR=src/test/proto
                     PROTO_DIR=src/test/proto
                     JAVA_DIR=target/generated-test-sources/java
                     JAVA_DIR=target/generated-test-sources/java
-                    which cygpath 2> /dev/null
+                    which cygpath 2&gt; /dev/null
                     if [ $? = 1 ]; then
                     if [ $? = 1 ]; then
                       IS_WIN=false
                       IS_WIN=false
                     else
                     else
@@ -383,8 +386,8 @@
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                     fi
                     fi
-                    mkdir -p $JAVA_DIR 2> /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+                    mkdir -p $JAVA_DIR 2&gt; /dev/null
+                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
                     do
                     do
                         if [ "$IS_WIN" = "true" ]; then
                         if [ "$IS_WIN" = "true" ]; then
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -172,7 +172,7 @@ IFS=
 
 
 if [ "$HADOOP_COMMON_HOME" = "" ]; then
 if [ "$HADOOP_COMMON_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_DIR" ]; then
   if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_DIR" ]; then
-    HADOOP_COMMON_HOME=$HADOOP_PREFIX
+    export HADOOP_COMMON_HOME=$HADOOP_PREFIX
   fi
   fi
 fi
 fi
 
 
@@ -252,7 +252,7 @@ HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
 # put hdfs in classpath if present
 # put hdfs in classpath if present
 if [ "$HADOOP_HDFS_HOME" = "" ]; then
 if [ "$HADOOP_HDFS_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$HDFS_DIR" ]; then
   if [ -d "${HADOOP_PREFIX}/$HDFS_DIR" ]; then
-    HADOOP_HDFS_HOME=$HADOOP_PREFIX
+    export HADOOP_HDFS_HOME=$HADOOP_PREFIX
   fi
   fi
 fi
 fi
 
 
@@ -269,7 +269,7 @@ CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
 # put yarn in classpath if present
 # put yarn in classpath if present
 if [ "$YARN_HOME" = "" ]; then
 if [ "$YARN_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
   if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
-    YARN_HOME=$HADOOP_PREFIX
+    export YARN_HOME=$HADOOP_PREFIX
   fi
   fi
 fi
 fi
 
 
@@ -286,7 +286,7 @@ CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
 # put mapred in classpath if present AND different from YARN
 # put mapred in classpath if present AND different from YARN
 if [ "$HADOOP_MAPRED_HOME" = "" ]; then
 if [ "$HADOOP_MAPRED_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$MAPRED_DIR" ]; then
   if [ -d "${HADOOP_PREFIX}/$MAPRED_DIR" ]; then
-    HADOOP_MAPRED_HOME=$HADOOP_PREFIX
+    export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
   fi
   fi
 fi
 fi
 
 

+ 11 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh

@@ -109,8 +109,10 @@ fi
 export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
 export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
 export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
 export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
 export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
 export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
 log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
 log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
 
 
 # Set default scheduling priority
 # Set default scheduling priority
 if [ "$HADOOP_NICENESS" = "" ]; then
 if [ "$HADOOP_NICENESS" = "" ]; then
@@ -139,7 +141,7 @@ case $startStop in
     echo starting $command, logging to $log
     echo starting $command, logging to $log
     cd "$HADOOP_PREFIX"
     cd "$HADOOP_PREFIX"
     case $command in
     case $command in
-      namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer)
+      namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|zkfc)
         if [ -z "$HADOOP_HDFS_HOME" ]; then
         if [ -z "$HADOOP_HDFS_HOME" ]; then
           hdfsScript="$HADOOP_PREFIX"/bin/hdfs
           hdfsScript="$HADOOP_PREFIX"/bin/hdfs
         else
         else
@@ -162,9 +164,15 @@ case $startStop in
   (stop)
   (stop)
 
 
     if [ -f $pid ]; then
     if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
+      TARGET_PID=`cat $pid`
+      if kill -0 $TARGET_PID > /dev/null 2>&1; then
         echo stopping $command
         echo stopping $command
-        kill `cat $pid`
+        kill $TARGET_PID
+        sleep $HADOOP_STOP_TIMEOUT
+        if kill -0 $TARGET_PID > /dev/null 2>&1; then
+          echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+          kill -9 $TARGET_PID
+        fi
       else
       else
         echo no $command to stop
         echo no $command to stop
       fi
       fi

+ 77 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -0,0 +1,77 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=${JAVA_HOME}
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+  if [ "$HADOOP_CLASSPATH" ]; then
+    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+  else
+    export HADOOP_CLASSPATH=$f
+  fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by 
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #
 #
 #Security appender
 #Security appender
 #
 #
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 #
 #
 # hdfs audit logging
 # hdfs audit logging
 #
 #
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxbackupindex=20
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 #
 #
 # mapred audit logging
 # mapred audit logging
 #
 #
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxbackupindex=20
 mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}

+ 36 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -617,7 +617,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     }
     Matcher match = varPat.matcher("");
     Matcher match = varPat.matcher("");
     String eval = expr;
     String eval = expr;
+    Set<String> evalSet = new HashSet<String>();
     for(int s=0; s<MAX_SUBST; s++) {
     for(int s=0; s<MAX_SUBST; s++) {
+      if (evalSet.contains(eval)) {
+        // Cyclic resolution pattern detected. Return current expression.
+        return eval;
+      }
+      evalSet.add(eval);
       match.reset(eval);
       match.reset(eval);
       if (!match.find()) {
       if (!match.find()) {
         return eval;
         return eval;
@@ -917,6 +923,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return defaultValue;
       return defaultValue;
     return Float.parseFloat(valueString);
     return Float.parseFloat(valueString);
   }
   }
+
   /**
   /**
    * Set the value of the <code>name</code> property to a <code>float</code>.
    * Set the value of the <code>name</code> property to a <code>float</code>.
    * 
    * 
@@ -926,6 +933,35 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public void setFloat(String name, float value) {
   public void setFloat(String name, float value) {
     set(name,Float.toString(value));
     set(name,Float.toString(value));
   }
   }
+
+  /** 
+   * Get the value of the <code>name</code> property as a <code>double</code>.  
+   * If no such property exists, the provided default value is returned,
+   * or if the specified value is not a valid <code>double</code>,
+   * then an error is thrown.
+   *
+   * @param name property name.
+   * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
+   * @return property value as a <code>double</code>, 
+   *         or <code>defaultValue</code>. 
+   */
+  public double getDouble(String name, double defaultValue) {
+    String valueString = getTrimmed(name);
+    if (valueString == null)
+      return defaultValue;
+    return Double.parseDouble(valueString);
+  }
+
+  /**
+   * Set the value of the <code>name</code> property to a <code>double</code>.
+   * 
+   * @param name property name.
+   * @param value property value.
+   */
+  public void setDouble(String name, double value) {
+    set(name,Double.toString(value));
+  }
  
  
   /** 
   /** 
    * Get the value of the <code>name</code> property as a <code>boolean</code>.  
    * Get the value of the <code>name</code> property as a <code>boolean</code>.  

+ 16 - 22
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java

@@ -17,29 +17,23 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
-/*
- * A BlockLocation lists hosts, offset and length
- * of block. 
- * 
+
+/**
+ * Represents the network location of a block, information about the hosts
+ * that contain block replicas, and other block metadata (E.g. the file
+ * offset associated with the block, length, whether it is corrupt, etc).
  */
  */
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 @InterfaceStability.Stable
 public class BlockLocation {
 public class BlockLocation {
-  private String[] hosts; //hostnames of datanodes
-  private String[] names; //hostname:portNumber of datanodes
-  private String[] topologyPaths; // full path name in network topology
-  private long offset;  //offset of the of the block in the file
+  private String[] hosts; // Datanode hostnames
+  private String[] names; // Datanode IP:xferPort for accessing the block
+  private String[] topologyPaths; // Full path name in network topology
+  private long offset;  // Offset of the block in the file
   private long length;
   private long length;
   private boolean corrupt;
   private boolean corrupt;
 
 
@@ -105,7 +99,7 @@ public class BlockLocation {
    * Get the list of hosts (hostname) hosting this block
    * Get the list of hosts (hostname) hosting this block
    */
    */
   public String[] getHosts() throws IOException {
   public String[] getHosts() throws IOException {
-    if ((hosts == null) || (hosts.length == 0)) {
+    if (hosts == null || hosts.length == 0) {
       return new String[0];
       return new String[0];
     } else {
     } else {
       return hosts;
       return hosts;
@@ -113,25 +107,25 @@ public class BlockLocation {
   }
   }
 
 
   /**
   /**
-   * Get the list of names (hostname:port) hosting this block
+   * Get the list of names (IP:xferPort) hosting this block
    */
    */
   public String[] getNames() throws IOException {
   public String[] getNames() throws IOException {
-    if ((names == null) || (names.length == 0)) {
+    if (names == null || names.length == 0) {
       return new String[0];
       return new String[0];
     } else {
     } else {
-      return this.names;
+      return names;
     }
     }
   }
   }
 
 
   /**
   /**
    * Get the list of network topology paths for each of the hosts.
    * Get the list of network topology paths for each of the hosts.
-   * The last component of the path is the host.
+   * The last component of the path is the "name" (IP:xferPort).
    */
    */
   public String[] getTopologyPaths() throws IOException {
   public String[] getTopologyPaths() throws IOException {
-    if ((topologyPaths == null) || (topologyPaths.length == 0)) {
+    if (topologyPaths == null || topologyPaths.length == 0) {
       return new String[0];
       return new String[0];
     } else {
     } else {
-      return this.topologyPaths;
+      return topologyPaths;
     }
     }
   }
   }
   
   

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.http.lib.StaticUserWebFilter;
 
 
 /** 
 /** 
  * This class contains constants for configuration keys used
  * This class contains constants for configuration keys used
@@ -116,6 +117,8 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
       "security.refresh.user.mappings.protocol.acl";
       "security.refresh.user.mappings.protocol.acl";
   public static final String 
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
+  public static final String 
+  SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
   
   
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
       "hadoop.security.token.service.use_ip";
       "hadoop.security.token.service.use_ip";
@@ -161,5 +164,12 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
     "ha.failover-controller.cli-check.rpc-timeout.ms";
     "ha.failover-controller.cli-check.rpc-timeout.ms";
   public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
   public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
 
 
+  /** Static user web-filter properties.
+   * See {@link StaticUserWebFilter}.
+   */
+  public static final String HADOOP_HTTP_STATIC_USER =
+    "hadoop.http.staticuser.user";
+  public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
+    "dr.who";
 }
 }
 
 

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java

@@ -44,6 +44,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  * else append to an existing file.</li>
  * else append to an existing file.</li>
  * <li> CREATE|OVERWRITE - to create a file if it does not exist, 
  * <li> CREATE|OVERWRITE - to create a file if it does not exist, 
  * else overwrite an existing file.</li>
  * else overwrite an existing file.</li>
+ * <li> SYNC_BLOCK - to force closed blocks to the disk device.
+ * In addition {@link Syncable#hsync()} should be called after each write,
+ * if true synchronous behavior is required.</li>
  * </ol>
  * </ol>
  * 
  * 
  * Following combination is not valid and will result in 
  * Following combination is not valid and will result in 
@@ -71,7 +74,12 @@ public enum CreateFlag {
   /**
   /**
    * Append to a file. See javadoc for more description.
    * Append to a file. See javadoc for more description.
    */
    */
-  APPEND((short) 0x04);
+  APPEND((short) 0x04),
+
+  /**
+   * Force closed blocks to disk. Similar to POSIX O_SYNC. See javadoc for description.
+   */
+  SYNC_BLOCK((short) 0x08);
 
 
   private final short mode;
   private final short mode;
 
 

+ 61 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -615,7 +615,9 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Return a set of server default configuration values
    * Return a set of server default configuration values
    * @return server default configuration values
    * @return server default configuration values
    * @throws IOException
    * @throws IOException
+   * @deprecated use {@link #getServerDefaults(Path)} instead
    */
    */
+  @Deprecated
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
     Configuration conf = getConf();
     Configuration conf = getConf();
     return new FsServerDefaults(getDefaultBlockSize(), 
     return new FsServerDefaults(getDefaultBlockSize(), 
@@ -828,6 +830,30 @@ public abstract class FileSystem extends Configured implements Closeable {
       long blockSize,
       long blockSize,
       Progressable progress) throws IOException;
       Progressable progress) throws IOException;
   
   
+  /**
+   * Create an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param permission
+   * @param flags {@link CreateFlag}s to use for this stream.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file.
+   * @param blockSize
+   * @param progress
+   * @throws IOException
+   * @see #setPermission(Path, FsPermission)
+   */
+  public FSDataOutputStream create(Path f,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress) throws IOException {
+    // only DFS support this
+    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
+  }
+  
   
   
   /*.
   /*.
    * This create has been added to support the FileContext that processes
    * This create has been added to support the FileContext that processes
@@ -952,10 +978,35 @@ public abstract class FileSystem extends Configured implements Closeable {
    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
        boolean overwrite, int bufferSize, short replication, long blockSize,
        boolean overwrite, int bufferSize, short replication, long blockSize,
        Progressable progress) throws IOException {
        Progressable progress) throws IOException {
-     throw new IOException("createNonRecursive unsupported for this filesystem "
-         + this.getClass());
+     return createNonRecursive(f, permission,
+         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+             : EnumSet.of(CreateFlag.CREATE), bufferSize,
+             replication, blockSize, progress);
    }
    }
 
 
+   /**
+    * Opens an FSDataOutputStream at the indicated Path with write-progress
+    * reporting. Same as create(), except fails if parent directory doesn't
+    * already exist.
+    * @param f the file name to open
+    * @param permission
+    * @param flags {@link CreateFlag}s to use for this stream.
+    * @param bufferSize the size of the buffer to be used.
+    * @param replication required block replication for the file.
+    * @param blockSize
+    * @param progress
+    * @throws IOException
+    * @see #setPermission(Path, FsPermission)
+    * @deprecated API only for 0.20-append
+    */
+    @Deprecated
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException {
+      throw new IOException("createNonRecursive unsupported for this filesystem "
+          + this.getClass());
+    }
+
   /**
   /**
    * Creates the given Path as a brand-new zero-length file.  If
    * Creates the given Path as a brand-new zero-length file.  If
    * create fails, or if it already existed, return false.
    * create fails, or if it already existed, return false.
@@ -1939,8 +1990,12 @@ public abstract class FileSystem extends Configured implements Closeable {
     return getFileStatus(f).getBlockSize();
     return getFileStatus(f).getBlockSize();
   }
   }
 
 
-  /** Return the number of bytes that large input files should be optimally
-   * be split into to minimize i/o time. */
+  /**
+   * Return the number of bytes that large input files should be optimally
+   * be split into to minimize i/o time.
+   * @deprecated use {@link #getDefaultBlockSize(Path)} instead
+   */
+  @Deprecated
   public long getDefaultBlockSize() {
   public long getDefaultBlockSize() {
     // default to 32MB: large enough to minimize the impact of seeks
     // default to 32MB: large enough to minimize the impact of seeks
     return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
     return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
@@ -1958,7 +2013,9 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Get the default replication.
    * Get the default replication.
+   * @deprecated use {@link #getDefaultReplication(Path)} instead
    */
    */
+  @Deprecated
   public short getDefaultReplication() { return 1; }
   public short getDefaultReplication() { return 1; }
 
 
   /**
   /**

+ 8 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -307,6 +307,12 @@ public class RawLocalFileSystem extends FileSystem {
     return FileUtil.fullyDelete(f);
     return FileUtil.fullyDelete(f);
   }
   }
  
  
+  /**
+   * {@inheritDoc}
+   *
+   * (<b>Note</b>: Returned list is not sorted in any given order,
+   * due to reliance on Java's {@link File#list()} API.)
+   */
   public FileStatus[] listStatus(Path f) throws IOException {
   public FileStatus[] listStatus(Path f) throws IOException {
     File localf = pathToFile(f);
     File localf = pathToFile(f);
     FileStatus[] results;
     FileStatus[] results;
@@ -316,7 +322,7 @@ public class RawLocalFileSystem extends FileSystem {
     }
     }
     if (localf.isFile()) {
     if (localf.isFile()) {
       return new FileStatus[] {
       return new FileStatus[] {
-        new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
+        new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
     }
     }
 
 
     String[] names = localf.list();
     String[] names = localf.list();
@@ -444,7 +450,7 @@ public class RawLocalFileSystem extends FileSystem {
   public FileStatus getFileStatus(Path f) throws IOException {
   public FileStatus getFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     File path = pathToFile(f);
     if (path.exists()) {
     if (path.exists()) {
-      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
+      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
     } else {
     } else {
       throw new FileNotFoundException("File " + f + " does not exist");
       throw new FileNotFoundException("File " + f + " does not exist");
     }
     }

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -136,7 +135,7 @@ class Display extends FsCommand {
 
 
   protected class TextRecordInputStream extends InputStream {
   protected class TextRecordInputStream extends InputStream {
     SequenceFile.Reader r;
     SequenceFile.Reader r;
-    WritableComparable<?> key;
+    Writable key;
     Writable val;
     Writable val;
 
 
     DataInputBuffer inbuf;
     DataInputBuffer inbuf;
@@ -148,7 +147,7 @@ class Display extends FsCommand {
       r = new SequenceFile.Reader(lconf, 
       r = new SequenceFile.Reader(lconf, 
           SequenceFile.Reader.file(fpath));
           SequenceFile.Reader.file(fpath));
       key = ReflectionUtils.newInstance(
       key = ReflectionUtils.newInstance(
-          r.getKeyClass().asSubclass(WritableComparable.class), lconf);
+          r.getKeyClass().asSubclass(Writable.class), lconf);
       val = ReflectionUtils.newInstance(
       val = ReflectionUtils.newInstance(
           r.getValueClass().asSubclass(Writable.class), lconf);
           r.getValueClass().asSubclass(Writable.class), lconf);
       inbuf = new DataInputBuffer();
       inbuf = new DataInputBuffer();

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -233,6 +233,11 @@ public class ViewFileSystem extends FileSystem {
       fsState.resolve(getUriPath(f), true);
       fsState.resolve(getUriPath(f), true);
     return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory();
     return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory();
   }
   }
+  
+  @Override
+  public String getCanonicalServiceName() {
+    return null;
+  }
 
 
   @Override
   @Override
   public URI getUri() {
   public URI getUri() {

+ 151 - 44
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException;
@@ -81,9 +82,15 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    */
    */
   public interface ActiveStandbyElectorCallback {
   public interface ActiveStandbyElectorCallback {
     /**
     /**
-     * This method is called when the app becomes the active leader
+     * This method is called when the app becomes the active leader.
+     * If the service fails to become active, it should throw
+     * ServiceFailedException. This will cause the elector to
+     * sleep for a short period, then re-join the election.
+     * 
+     * Callback implementations are expected to manage their own
+     * timeouts (e.g. when making an RPC to a remote node).
      */
      */
-    void becomeActive();
+    void becomeActive() throws ServiceFailedException;
 
 
     /**
     /**
      * This method is called when the app becomes a standby
      * This method is called when the app becomes a standby
@@ -134,7 +141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
   public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
   public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
 
 
-  private static final int NUM_RETRIES = 3;
+  static int NUM_RETRIES = 3;
+  private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
 
 
   private static enum ConnectionState {
   private static enum ConnectionState {
     DISCONNECTED, CONNECTED, TERMINATED
     DISCONNECTED, CONNECTED, TERMINATED
@@ -154,6 +162,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private final String zkHostPort;
   private final String zkHostPort;
   private final int zkSessionTimeout;
   private final int zkSessionTimeout;
   private final List<ACL> zkAcl;
   private final List<ACL> zkAcl;
+  private final List<ZKAuthInfo> zkAuthInfo;
   private byte[] appData;
   private byte[] appData;
   private final String zkLockFilePath;
   private final String zkLockFilePath;
   private final String zkBreadCrumbPath;
   private final String zkBreadCrumbPath;
@@ -185,6 +194,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    *          znode under which to create the lock
    *          znode under which to create the lock
    * @param acl
    * @param acl
    *          ZooKeeper ACL's
    *          ZooKeeper ACL's
+   * @param authInfo a list of authentication credentials to add to the
+   *                 ZK connection
    * @param app
    * @param app
    *          reference to callback interface object
    *          reference to callback interface object
    * @throws IOException
    * @throws IOException
@@ -192,6 +203,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    */
    */
   public ActiveStandbyElector(String zookeeperHostPorts,
   public ActiveStandbyElector(String zookeeperHostPorts,
       int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
       int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
+      List<ZKAuthInfo> authInfo,
       ActiveStandbyElectorCallback app) throws IOException,
       ActiveStandbyElectorCallback app) throws IOException,
       HadoopIllegalArgumentException {
       HadoopIllegalArgumentException {
     if (app == null || acl == null || parentZnodeName == null
     if (app == null || acl == null || parentZnodeName == null
@@ -201,6 +213,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     zkHostPort = zookeeperHostPorts;
     zkHostPort = zookeeperHostPorts;
     zkSessionTimeout = zookeeperSessionTimeout;
     zkSessionTimeout = zookeeperSessionTimeout;
     zkAcl = acl;
     zkAcl = acl;
+    zkAuthInfo = authInfo;
     appClient = app;
     appClient = app;
     znodeWorkingDir = parentZnodeName;
     znodeWorkingDir = parentZnodeName;
     zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
     zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
@@ -227,8 +240,6 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   public synchronized void joinElection(byte[] data)
   public synchronized void joinElection(byte[] data)
       throws HadoopIllegalArgumentException {
       throws HadoopIllegalArgumentException {
     
     
-    LOG.debug("Attempting active election");
-
     if (data == null) {
     if (data == null) {
       throw new HadoopIllegalArgumentException("data cannot be null");
       throw new HadoopIllegalArgumentException("data cannot be null");
     }
     }
@@ -236,6 +247,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     appData = new byte[data.length];
     appData = new byte[data.length];
     System.arraycopy(data, 0, appData, 0, data.length);
     System.arraycopy(data, 0, appData, 0, data.length);
 
 
+    LOG.debug("Attempting active election for " + this);
     joinElectionInternal();
     joinElectionInternal();
   }
   }
   
   
@@ -259,6 +271,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    */
    */
   public synchronized void ensureParentZNode()
   public synchronized void ensureParentZNode()
       throws IOException, InterruptedException {
       throws IOException, InterruptedException {
+    Preconditions.checkState(!wantToBeInElection,
+        "ensureParentZNode() may not be called while in the election");
+
     String pathParts[] = znodeWorkingDir.split("/");
     String pathParts[] = znodeWorkingDir.split("/");
     Preconditions.checkArgument(pathParts.length >= 1 &&
     Preconditions.checkArgument(pathParts.length >= 1 &&
         "".equals(pathParts[0]),
         "".equals(pathParts[0]),
@@ -292,6 +307,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    */
    */
   public synchronized void clearParentZNode()
   public synchronized void clearParentZNode()
       throws IOException, InterruptedException {
       throws IOException, InterruptedException {
+    Preconditions.checkState(!wantToBeInElection,
+        "clearParentZNode() may not be called while in the election");
+
     try {
     try {
       LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
       LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
 
 
@@ -360,7 +378,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         createConnection();
         createConnection();
       }
       }
       Stat stat = new Stat();
       Stat stat = new Stat();
-      return zkClient.getData(zkLockFilePath, false, stat);
+      return getDataWithRetries(zkLockFilePath, false, stat);
     } catch(KeeperException e) {
     } catch(KeeperException e) {
       Code code = e.code();
       Code code = e.code();
       if (isNodeDoesNotExist(code)) {
       if (isNodeDoesNotExist(code)) {
@@ -380,13 +398,17 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       String name) {
       String name) {
     if (isStaleClient(ctx)) return;
     if (isStaleClient(ctx)) return;
     LOG.debug("CreateNode result: " + rc + " for path: " + path
     LOG.debug("CreateNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState +
+        "  for " + this);
 
 
     Code code = Code.get(rc);
     Code code = Code.get(rc);
     if (isSuccess(code)) {
     if (isSuccess(code)) {
       // we successfully created the znode. we are the leader. start monitoring
       // we successfully created the znode. we are the leader. start monitoring
-      becomeActive();
-      monitorActiveStatus();
+      if (becomeActive()) {
+        monitorActiveStatus();
+      } else {
+        reJoinElectionAfterFailureToBecomeActive();
+      }
       return;
       return;
     }
     }
 
 
@@ -433,8 +455,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   public synchronized void processResult(int rc, String path, Object ctx,
   public synchronized void processResult(int rc, String path, Object ctx,
       Stat stat) {
       Stat stat) {
     if (isStaleClient(ctx)) return;
     if (isStaleClient(ctx)) return;
+    
+    assert wantToBeInElection :
+        "Got a StatNode result after quitting election";
+    
     LOG.debug("StatNode result: " + rc + " for path: " + path
     LOG.debug("StatNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState + " for " + this);
+        
 
 
     Code code = Code.get(rc);
     Code code = Code.get(rc);
     if (isSuccess(code)) {
     if (isSuccess(code)) {
@@ -442,7 +469,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       // creation was retried
       // creation was retried
       if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
       if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
         // we own the lock znode. so we are the leader
         // we own the lock znode. so we are the leader
-        becomeActive();
+        if (!becomeActive()) {
+          reJoinElectionAfterFailureToBecomeActive();
+        }
       } else {
       } else {
         // we dont own the lock znode. so we are a standby.
         // we dont own the lock znode. so we are a standby.
         becomeStandby();
         becomeStandby();
@@ -470,20 +499,37 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       }
       }
       errorMessage = errorMessage
       errorMessage = errorMessage
           + ". Not retrying further znode monitoring connection errors.";
           + ". Not retrying further znode monitoring connection errors.";
+    } else if (isSessionExpired(code)) {
+      // This isn't fatal - the client Watcher will re-join the election
+      LOG.warn("Lock monitoring failed because session was lost");
+      return;
     }
     }
 
 
     fatalError(errorMessage);
     fatalError(errorMessage);
   }
   }
 
 
   /**
   /**
-   * interface implementation of Zookeeper watch events (connection and node)
+   * We failed to become active. Re-join the election, but
+   * sleep for a few seconds after terminating our existing
+   * session, so that other nodes have a chance to become active.
+   * The failure to become active is already logged inside
+   * becomeActive().
+   */
+  private void reJoinElectionAfterFailureToBecomeActive() {
+    reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE);
+  }
+
+  /**
+   * interface implementation of Zookeeper watch events (connection and node),
+   * proxied by {@link WatcherWithClientRef}.
    */
    */
   synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
   synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
     Event.EventType eventType = event.getType();
     Event.EventType eventType = event.getType();
     if (isStaleClient(zk)) return;
     if (isStaleClient(zk)) return;
     LOG.debug("Watcher event type: " + eventType + " with state:"
     LOG.debug("Watcher event type: " + eventType + " with state:"
         + event.getState() + " for path:" + event.getPath()
         + event.getState() + " for path:" + event.getPath()
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState
+        + " for " + this);
 
 
     if (eventType == Event.EventType.None) {
     if (eventType == Event.EventType.None) {
       // the connection state has changed
       // the connection state has changed
@@ -494,7 +540,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         // be undone
         // be undone
         ConnectionState prevConnectionState = zkConnectionState;
         ConnectionState prevConnectionState = zkConnectionState;
         zkConnectionState = ConnectionState.CONNECTED;
         zkConnectionState = ConnectionState.CONNECTED;
-        if (prevConnectionState == ConnectionState.DISCONNECTED) {
+        if (prevConnectionState == ConnectionState.DISCONNECTED &&
+            wantToBeInElection) {
           monitorActiveStatus();
           monitorActiveStatus();
         }
         }
         break;
         break;
@@ -511,7 +558,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         // call listener to reconnect
         // call listener to reconnect
         LOG.info("Session expired. Entering neutral mode and rejoining...");
         LOG.info("Session expired. Entering neutral mode and rejoining...");
         enterNeutralMode();
         enterNeutralMode();
-        reJoinElection();
+        reJoinElection(0);
         break;
         break;
       default:
       default:
         fatalError("Unexpected Zookeeper watch event state: "
         fatalError("Unexpected Zookeeper watch event state: "
@@ -559,16 +606,21 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   protected synchronized ZooKeeper getNewZooKeeper() throws IOException {
   protected synchronized ZooKeeper getNewZooKeeper() throws IOException {
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
     zk.register(new WatcherWithClientRef(zk));
     zk.register(new WatcherWithClientRef(zk));
+    for (ZKAuthInfo auth : zkAuthInfo) {
+      zk.addAuthInfo(auth.getScheme(), auth.getAuth());
+    }
     return zk;
     return zk;
   }
   }
 
 
   private void fatalError(String errorMessage) {
   private void fatalError(String errorMessage) {
+    LOG.fatal(errorMessage);
     reset();
     reset();
     appClient.notifyFatalError(errorMessage);
     appClient.notifyFatalError(errorMessage);
   }
   }
 
 
   private void monitorActiveStatus() {
   private void monitorActiveStatus() {
-    LOG.debug("Monitoring active leader");
+    assert wantToBeInElection;
+    LOG.debug("Monitoring active leader for " + this);
     statRetryCount = 0;
     statRetryCount = 0;
     monitorLockNodeAsync();
     monitorLockNodeAsync();
   }
   }
@@ -586,7 +638,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     createLockNodeAsync();
     createLockNodeAsync();
   }
   }
 
 
-  private void reJoinElection() {
+  private void reJoinElection(int sleepTime) {
     LOG.info("Trying to re-establish ZK session");
     LOG.info("Trying to re-establish ZK session");
     
     
     // Some of the test cases rely on expiring the ZK sessions and
     // Some of the test cases rely on expiring the ZK sessions and
@@ -599,12 +651,30 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     sessionReestablishLockForTests.lock();
     sessionReestablishLockForTests.lock();
     try {
     try {
       terminateConnection();
       terminateConnection();
+      sleepFor(sleepTime);
+      
       joinElectionInternal();
       joinElectionInternal();
     } finally {
     } finally {
       sessionReestablishLockForTests.unlock();
       sessionReestablishLockForTests.unlock();
     }
     }
   }
   }
-  
+
+  /**
+   * Sleep for the given number of milliseconds.
+   * This is non-static, and separated out, so that unit tests
+   * can override the behavior not to sleep.
+   */
+  @VisibleForTesting
+  protected void sleepFor(int sleepMs) {
+    if (sleepMs > 0) {
+      try {
+        Thread.sleep(sleepMs);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
   @VisibleForTesting
   @VisibleForTesting
   void preventSessionReestablishmentForTests() {
   void preventSessionReestablishmentForTests() {
     sessionReestablishLockForTests.lock();
     sessionReestablishLockForTests.lock();
@@ -616,8 +686,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   }
   }
   
   
   @VisibleForTesting
   @VisibleForTesting
-  long getZKSessionIdForTests() {
-    return zkClient.getSessionId();
+  synchronized long getZKSessionIdForTests() {
+    if (zkClient != null) {
+      return zkClient.getSessionId();
+    } else {
+      return -1;
+    }
   }
   }
   
   
   @VisibleForTesting
   @VisibleForTesting
@@ -629,17 +703,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     int connectionRetryCount = 0;
     int connectionRetryCount = 0;
     boolean success = false;
     boolean success = false;
     while(!success && connectionRetryCount < NUM_RETRIES) {
     while(!success && connectionRetryCount < NUM_RETRIES) {
-      LOG.debug("Establishing zookeeper connection");
+      LOG.debug("Establishing zookeeper connection for " + this);
       try {
       try {
         createConnection();
         createConnection();
         success = true;
         success = true;
       } catch(IOException e) {
       } catch(IOException e) {
         LOG.warn(e);
         LOG.warn(e);
-        try {
-          Thread.sleep(5000);
-        } catch(InterruptedException e1) {
-          LOG.warn(e1);
-        }
+        sleepFor(5000);
       }
       }
       ++connectionRetryCount;
       ++connectionRetryCount;
     }
     }
@@ -647,14 +717,24 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   }
   }
 
 
   private void createConnection() throws IOException {
   private void createConnection() throws IOException {
+    if (zkClient != null) {
+      try {
+        zkClient.close();
+      } catch (InterruptedException e) {
+        throw new IOException("Interrupted while closing ZK",
+            e);
+      }
+      zkClient = null;
+    }
     zkClient = getNewZooKeeper();
     zkClient = getNewZooKeeper();
+    LOG.debug("Created new connection for " + this);
   }
   }
   
   
-  private void terminateConnection() {
+  void terminateConnection() {
     if (zkClient == null) {
     if (zkClient == null) {
       return;
       return;
     }
     }
-    LOG.debug("Terminating ZK connection");
+    LOG.debug("Terminating ZK connection for " + this);
     ZooKeeper tempZk = zkClient;
     ZooKeeper tempZk = zkClient;
     zkClient = null;
     zkClient = null;
     try {
     try {
@@ -670,20 +750,24 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     terminateConnection();
     terminateConnection();
   }
   }
 
 
-  private void becomeActive() {
+  private boolean becomeActive() {
     assert wantToBeInElection;
     assert wantToBeInElection;
-    if (state != State.ACTIVE) {
-      try {
-        Stat oldBreadcrumbStat = fenceOldActive();
-        writeBreadCrumbNode(oldBreadcrumbStat);
-      } catch (Exception e) {
-        LOG.warn("Exception handling the winning of election", e);
-        reJoinElection();
-        return;
-      }
-      LOG.debug("Becoming active");
-      state = State.ACTIVE;
+    if (state == State.ACTIVE) {
+      // already active
+      return true;
+    }
+    try {
+      Stat oldBreadcrumbStat = fenceOldActive();
+      writeBreadCrumbNode(oldBreadcrumbStat);
+      
+      LOG.debug("Becoming active for " + this);
       appClient.becomeActive();
       appClient.becomeActive();
+      state = State.ACTIVE;
+      return true;
+    } catch (Exception e) {
+      LOG.warn("Exception handling the winning of election", e);
+      // Caller will handle quitting and rejoining the election.
+      return false;
     }
     }
   }
   }
 
 
@@ -779,7 +863,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
   private void becomeStandby() {
   private void becomeStandby() {
     if (state != State.STANDBY) {
     if (state != State.STANDBY) {
-      LOG.debug("Becoming standby");
+      LOG.debug("Becoming standby for " + this);
       state = State.STANDBY;
       state = State.STANDBY;
       appClient.becomeStandby();
       appClient.becomeStandby();
     }
     }
@@ -787,7 +871,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
   private void enterNeutralMode() {
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
     if (state != State.NEUTRAL) {
-      LOG.debug("Entering neutral mode");
+      LOG.debug("Entering neutral mode for " + this);
       state = State.NEUTRAL;
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
       appClient.enterNeutralMode();
     }
     }
@@ -814,6 +898,15 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     });
     });
   }
   }
 
 
+  private byte[] getDataWithRetries(final String path, final boolean watch,
+      final Stat stat) throws InterruptedException, KeeperException {
+    return zkDoWithRetries(new ZKAction<byte[]>() {
+      public byte[] run() throws KeeperException, InterruptedException {
+        return zkClient.getData(path, watch, stat);
+      }
+    });
+  }
+
   private Stat setDataWithRetries(final String path, final byte[] data,
   private Stat setDataWithRetries(final String path, final byte[] data,
       final int version) throws InterruptedException, KeeperException {
       final int version) throws InterruptedException, KeeperException {
     return zkDoWithRetries(new ZKAction<Stat>() {
     return zkDoWithRetries(new ZKAction<Stat>() {
@@ -884,8 +977,14 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
     @Override
     @Override
     public void process(WatchedEvent event) {
     public void process(WatchedEvent event) {
-      ActiveStandbyElector.this.processWatchEvent(
-          zk, event);
+      try {
+        ActiveStandbyElector.this.processWatchEvent(
+            zk, event);
+      } catch (Throwable t) {
+        fatalError(
+            "Failed to process watcher event " + event + ": " +
+            StringUtils.stringifyException(t));
+      }
     }
     }
   }
   }
 
 
@@ -913,5 +1012,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     }
     }
     return false;
     return false;
   }
   }
+  
+  @Override
+  public String toString() {
+    return "elector id=" + System.identityHashCode(this) +
+      " appData=" +
+      ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
+      " cb=" + appClient;
+  }
 
 
 }
 }

+ 15 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -48,9 +50,12 @@ public class FailoverController {
   
   
   private final Configuration conf;
   private final Configuration conf;
 
 
+  private final RequestSource requestSource;
   
   
-  public FailoverController(Configuration conf) {
+  public FailoverController(Configuration conf,
+      RequestSource source) {
     this.conf = conf;
     this.conf = conf;
+    this.requestSource = source;
     
     
     this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
     this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
     this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
     this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
@@ -100,7 +105,7 @@ public class FailoverController {
       toSvcStatus = toSvc.getServiceStatus();
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
       String msg = "Unable to get service state for " + target;
-      LOG.error(msg, e);
+      LOG.error(msg + ": " + e.getLocalizedMessage());
       throw new FailoverFailedException(msg, e);
       throw new FailoverFailedException(msg, e);
     }
     }
 
 
@@ -122,7 +127,7 @@ public class FailoverController {
     }
     }
 
 
     try {
     try {
-      HAServiceProtocolHelper.monitorHealth(toSvc);
+      HAServiceProtocolHelper.monitorHealth(toSvc, createReqInfo());
     } catch (HealthCheckFailedException hce) {
     } catch (HealthCheckFailedException hce) {
       throw new FailoverFailedException(
       throw new FailoverFailedException(
           "Can't failover to an unhealthy service", hce);
           "Can't failover to an unhealthy service", hce);
@@ -132,7 +137,10 @@ public class FailoverController {
     }
     }
   }
   }
   
   
-  
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(requestSource);
+  }
+
   /**
   /**
    * Try to get the HA state of the node at the given address. This
    * Try to get the HA state of the node at the given address. This
    * function is guaranteed to be "quick" -- ie it has a short timeout
    * function is guaranteed to be "quick" -- ie it has a short timeout
@@ -143,7 +151,7 @@ public class FailoverController {
     HAServiceProtocol proxy = null;
     HAServiceProtocol proxy = null;
     try {
     try {
       proxy = svc.getProxy(conf, gracefulFenceTimeout);
       proxy = svc.getProxy(conf, gracefulFenceTimeout);
-      proxy.transitionToStandby();
+      proxy.transitionToStandby(createReqInfo());
       return true;
       return true;
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
       LOG.warn("Unable to gracefully make " + svc + " standby (" +
       LOG.warn("Unable to gracefully make " + svc + " standby (" +
@@ -198,7 +206,8 @@ public class FailoverController {
     Throwable cause = null;
     Throwable cause = null;
     try {
     try {
       HAServiceProtocolHelper.transitionToActive(
       HAServiceProtocolHelper.transitionToActive(
-          toSvc.getProxy(conf, rpcTimeoutToNewActive));
+          toSvc.getProxy(conf, rpcTimeoutToNewActive),
+          createReqInfo());
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
       LOG.error("Unable to make " + toSvc + " active (" +
       LOG.error("Unable to make " + toSvc + " active (" +
           sfe.getMessage() + "). Failing back.");
           sfe.getMessage() + "). Failing back.");

+ 206 - 51
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -19,11 +19,11 @@ package org.apache.hadoop.ha;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Map;
 
 
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -33,9 +33,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap;
 
 
 /**
 /**
@@ -49,6 +52,13 @@ public abstract class HAAdmin extends Configured implements Tool {
   
   
   private static final String FORCEFENCE  = "forcefence";
   private static final String FORCEFENCE  = "forcefence";
   private static final String FORCEACTIVE = "forceactive";
   private static final String FORCEACTIVE = "forceactive";
+  
+  /**
+   * Undocumented flag which allows an administrator to use manual failover
+   * state transitions even when auto-failover is enabled. This is an unsafe
+   * operation, which is why it is not documented in the usage below.
+   */
+  private static final String FORCEMANUAL = "forcemanual";
   private static final Log LOG = LogFactory.getLog(HAAdmin.class);
   private static final Log LOG = LogFactory.getLog(HAAdmin.class);
 
 
   private int rpcTimeoutForChecks = -1;
   private int rpcTimeoutForChecks = -1;
@@ -79,6 +89,7 @@ public abstract class HAAdmin extends Configured implements Tool {
   /** Output stream for errors, for use in tests */
   /** Output stream for errors, for use in tests */
   protected PrintStream errOut = System.err;
   protected PrintStream errOut = System.err;
   PrintStream out = System.out;
   PrintStream out = System.out;
+  private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
 
 
   protected abstract HAServiceTarget resolveTarget(String string);
   protected abstract HAServiceTarget resolveTarget(String string);
 
 
@@ -106,63 +117,83 @@ public abstract class HAAdmin extends Configured implements Tool {
     errOut.println("Usage: HAAdmin [" + cmd + " " + usage.args + "]");
     errOut.println("Usage: HAAdmin [" + cmd + " " + usage.args + "]");
   }
   }
 
 
-  private int transitionToActive(final String[] argv)
+  private int transitionToActive(final CommandLine cmd)
       throws IOException, ServiceFailedException {
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("transitionToActive: incorrect number of arguments");
       errOut.println("transitionToActive: incorrect number of arguments");
       printUsage(errOut, "-transitionToActive");
       printUsage(errOut, "-transitionToActive");
       return -1;
       return -1;
     }
     }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceTarget target = resolveTarget(argv[0]);
+    if (!checkManualStateManagementOK(target)) {
+      return -1;
+    }
+    HAServiceProtocol proto = target.getProxy(
         getConf(), 0);
         getConf(), 0);
-    HAServiceProtocolHelper.transitionToActive(proto);
+    HAServiceProtocolHelper.transitionToActive(proto, createReqInfo());
     return 0;
     return 0;
   }
   }
 
 
-  private int transitionToStandby(final String[] argv)
+  private int transitionToStandby(final CommandLine cmd)
       throws IOException, ServiceFailedException {
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("transitionToStandby: incorrect number of arguments");
       errOut.println("transitionToStandby: incorrect number of arguments");
       printUsage(errOut, "-transitionToStandby");
       printUsage(errOut, "-transitionToStandby");
       return -1;
       return -1;
     }
     }
     
     
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceTarget target = resolveTarget(argv[0]);
+    if (!checkManualStateManagementOK(target)) {
+      return -1;
+    }
+    HAServiceProtocol proto = target.getProxy(
         getConf(), 0);
         getConf(), 0);
-    HAServiceProtocolHelper.transitionToStandby(proto);
+    HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
     return 0;
     return 0;
   }
   }
+  /**
+   * Ensure that we are allowed to manually manage the HA state of the target
+   * service. If automatic failover is configured, then the automatic
+   * failover controllers should be doing state management, and it is generally
+   * an error to use the HAAdmin command line to do so.
+   * 
+   * @param target the target to check
+   * @return true if manual state management is allowed
+   */
+  private boolean checkManualStateManagementOK(HAServiceTarget target) {
+    if (target.isAutoFailoverEnabled()) {
+      if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
+        errOut.println(
+            "Automatic failover is enabled for " + target + "\n" +
+            "Refusing to manually manage HA state, since it may cause\n" +
+            "a split-brain scenario or other incorrect state.\n" +
+            "If you are very sure you know what you are doing, please \n" +
+            "specify the " + FORCEMANUAL + " flag.");
+        return false;
+      } else {
+        LOG.warn("Proceeding with manual HA state management even though\n" +
+            "automatic failover is enabled for " + target);
+        return true;
+      }
+    }
+    return true;
+  }
 
 
-  private int failover(final String[] argv)
-      throws IOException, ServiceFailedException {
-    boolean forceFence = false;
-    boolean forceActive = false;
-
-    Options failoverOpts = new Options();
-    // "-failover" isn't really an option but we need to add
-    // it to appease CommandLineParser
-    failoverOpts.addOption("failover", false, "failover");
-    failoverOpts.addOption(FORCEFENCE, false, "force fencing");
-    failoverOpts.addOption(FORCEACTIVE, false, "force failover");
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(requestSource);
+  }
 
 
-    CommandLineParser parser = new GnuParser();
-    CommandLine cmd;
+  private int failover(CommandLine cmd)
+      throws IOException, ServiceFailedException {
+    boolean forceFence = cmd.hasOption(FORCEFENCE);
+    boolean forceActive = cmd.hasOption(FORCEACTIVE);
 
 
-    try {
-      cmd = parser.parse(failoverOpts, argv);
-      forceFence = cmd.hasOption(FORCEFENCE);
-      forceActive = cmd.hasOption(FORCEACTIVE);
-    } catch (ParseException pe) {
-      errOut.println("failover: incorrect arguments");
-      printUsage(errOut, "-failover");
-      return -1;
-    }
-    
     int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
     int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
     final String[] args = cmd.getArgs();
     final String[] args = cmd.getArgs();
 
 
-    if (numOpts > 2 || args.length != 2) {
+    if (numOpts > 3 || args.length != 2) {
       errOut.println("failover: incorrect arguments");
       errOut.println("failover: incorrect arguments");
       printUsage(errOut, "-failover");
       printUsage(errOut, "-failover");
       return -1;
       return -1;
@@ -171,7 +202,30 @@ public abstract class HAAdmin extends Configured implements Tool {
     HAServiceTarget fromNode = resolveTarget(args[0]);
     HAServiceTarget fromNode = resolveTarget(args[0]);
     HAServiceTarget toNode = resolveTarget(args[1]);
     HAServiceTarget toNode = resolveTarget(args[1]);
     
     
-    FailoverController fc = new FailoverController(getConf());
+    // Check that auto-failover is consistently configured for both nodes.
+    Preconditions.checkState(
+        fromNode.isAutoFailoverEnabled() ==
+          toNode.isAutoFailoverEnabled(),
+          "Inconsistent auto-failover configs between %s and %s!",
+          fromNode, toNode);
+    
+    if (fromNode.isAutoFailoverEnabled()) {
+      if (forceFence || forceActive) {
+        // -forceActive doesn't make sense with auto-HA, since, if the node
+        // is not healthy, then its ZKFC will immediately quit the election
+        // again the next time a health check runs.
+        //
+        // -forceFence doesn't seem to have any real use cases with auto-HA
+        // so it isn't implemented.
+        errOut.println(FORCEFENCE + " and " + FORCEACTIVE + " flags not " +
+            "supported with auto-failover enabled.");
+        return -1;
+      }
+      return gracefulFailoverThroughZKFCs(toNode);
+    }
+    
+    FailoverController fc = new FailoverController(getConf(),
+        requestSource);
     
     
     try {
     try {
       fc.failover(fromNode, toNode, forceFence, forceActive); 
       fc.failover(fromNode, toNode, forceFence, forceActive); 
@@ -182,19 +236,44 @@ public abstract class HAAdmin extends Configured implements Tool {
     }
     }
     return 0;
     return 0;
   }
   }
+  
+
+  /**
+   * Initiate a graceful failover by talking to the target node's ZKFC.
+   * This sends an RPC to the ZKFC, which coordinates the failover.
+   * 
+   * @param toNode the node to fail to
+   * @return status code (0 for success)
+   * @throws IOException if failover does not succeed
+   */
+  private int gracefulFailoverThroughZKFCs(HAServiceTarget toNode)
+      throws IOException {
+
+    int timeout = FailoverController.getRpcTimeoutToNewActive(getConf());
+    ZKFCProtocol proxy = toNode.getZKFCProxy(getConf(), timeout);
+    try {
+      proxy.gracefulFailover();
+      out.println("Failover to " + toNode + " successful");
+    } catch (ServiceFailedException sfe) {
+      errOut.println("Failover failed: " + sfe.getLocalizedMessage());
+      return -1;
+    }
+
+    return 0;
+  }
 
 
-  private int checkHealth(final String[] argv)
+  private int checkHealth(final CommandLine cmd)
       throws IOException, ServiceFailedException {
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("checkHealth: incorrect number of arguments");
       errOut.println("checkHealth: incorrect number of arguments");
       printUsage(errOut, "-checkHealth");
       printUsage(errOut, "-checkHealth");
       return -1;
       return -1;
     }
     }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
         getConf(), rpcTimeoutForChecks);
         getConf(), rpcTimeoutForChecks);
     try {
     try {
-      HAServiceProtocolHelper.monitorHealth(proto);
+      HAServiceProtocolHelper.monitorHealth(proto, createReqInfo());
     } catch (HealthCheckFailedException e) {
     } catch (HealthCheckFailedException e) {
       errOut.println("Health check failed: " + e.getLocalizedMessage());
       errOut.println("Health check failed: " + e.getLocalizedMessage());
       return -1;
       return -1;
@@ -202,15 +281,16 @@ public abstract class HAAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
 
 
-  private int getServiceState(final String[] argv)
+  private int getServiceState(final CommandLine cmd)
       throws IOException, ServiceFailedException {
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("getServiceState: incorrect number of arguments");
       errOut.println("getServiceState: incorrect number of arguments");
       printUsage(errOut, "-getServiceState");
       printUsage(errOut, "-getServiceState");
       return -1;
       return -1;
     }
     }
 
 
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
         getConf(), rpcTimeoutForChecks);
         getConf(), rpcTimeoutForChecks);
     out.println(proto.getServiceStatus().getState());
     out.println(proto.getServiceStatus().getState());
     return 0;
     return 0;
@@ -263,26 +343,101 @@ public abstract class HAAdmin extends Configured implements Tool {
       printUsage(errOut);
       printUsage(errOut);
       return -1;
       return -1;
     }
     }
+    
+    if (!USAGE.containsKey(cmd)) {
+      errOut.println(cmd.substring(1) + ": Unknown command");
+      printUsage(errOut);
+      return -1;
+    }
+    
+    Options opts = new Options();
+
+    // Add command-specific options
+    if ("-failover".equals(cmd)) {
+      addFailoverCliOpts(opts);
+    }
+    // Mutative commands take FORCEMANUAL option
+    if ("-transitionToActive".equals(cmd) ||
+        "-transitionToStandby".equals(cmd) ||
+        "-failover".equals(cmd)) {
+      opts.addOption(FORCEMANUAL, false,
+          "force manual control even if auto-failover is enabled");
+    }
+         
+    CommandLine cmdLine = parseOpts(cmd, opts, argv);
+    if (cmdLine == null) {
+      // error already printed
+      return -1;
+    }
+    
+    if (cmdLine.hasOption(FORCEMANUAL)) {
+      if (!confirmForceManual()) {
+        LOG.fatal("Aborted");
+        return -1;
+      }
+      // Instruct the NNs to honor this request even if they're
+      // configured for manual failover.
+      requestSource = RequestSource.REQUEST_BY_USER_FORCED;
+    }
 
 
     if ("-transitionToActive".equals(cmd)) {
     if ("-transitionToActive".equals(cmd)) {
-      return transitionToActive(argv);
+      return transitionToActive(cmdLine);
     } else if ("-transitionToStandby".equals(cmd)) {
     } else if ("-transitionToStandby".equals(cmd)) {
-      return transitionToStandby(argv);
+      return transitionToStandby(cmdLine);
     } else if ("-failover".equals(cmd)) {
     } else if ("-failover".equals(cmd)) {
-      return failover(argv);
+      return failover(cmdLine);
     } else if ("-getServiceState".equals(cmd)) {
     } else if ("-getServiceState".equals(cmd)) {
-      return getServiceState(argv);
+      return getServiceState(cmdLine);
     } else if ("-checkHealth".equals(cmd)) {
     } else if ("-checkHealth".equals(cmd)) {
-      return checkHealth(argv);
+      return checkHealth(cmdLine);
     } else if ("-help".equals(cmd)) {
     } else if ("-help".equals(cmd)) {
       return help(argv);
       return help(argv);
     } else {
     } else {
-      errOut.println(cmd.substring(1) + ": Unknown command");
-      printUsage(errOut);
-      return -1;
+      // we already checked command validity above, so getting here
+      // would be a coding error
+      throw new AssertionError("Should not get here, command: " + cmd);
     } 
     } 
   }
   }
   
   
+  private boolean confirmForceManual() throws IOException {
+     return ToolRunner.confirmPrompt(
+        "You have specified the " + FORCEMANUAL + " flag. This flag is " +
+        "dangerous, as it can induce a split-brain scenario that WILL " +
+        "CORRUPT your HDFS namespace, possibly irrecoverably.\n" +
+        "\n" +
+        "It is recommended not to use this flag, but instead to shut down the " +
+        "cluster and disable automatic failover if you prefer to manually " +
+        "manage your HA state.\n" +
+        "\n" +
+        "You may abort safely by answering 'n' or hitting ^C now.\n" +
+        "\n" +
+        "Are you sure you want to continue?");
+  }
+
+  /**
+   * Add CLI options which are specific to the failover command and no
+   * others.
+   */
+  private void addFailoverCliOpts(Options failoverOpts) {
+    failoverOpts.addOption(FORCEFENCE, false, "force fencing");
+    failoverOpts.addOption(FORCEACTIVE, false, "force failover");
+    // Don't add FORCEMANUAL, since that's added separately for all commands
+    // that change state.
+  }
+  
+  private CommandLine parseOpts(String cmdName, Options opts, String[] argv) {
+    try {
+      // Strip off the first arg, since that's just the command name
+      argv = Arrays.copyOfRange(argv, 1, argv.length); 
+      return new GnuParser().parse(opts, argv);
+    } catch (ParseException pe) {
+      errOut.println(cmdName.substring(1) +
+          ": incorrect arguments");
+      printUsage(errOut, cmdName);
+      return null;
+    }
+  }
+  
   private int help(String[] argv) {
   private int help(String[] argv) {
     if (argv.length != 2) {
     if (argv.length != 2) {
       printUsage(errOut, "-help");
       printUsage(errOut, "-help");

+ 29 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -60,6 +60,31 @@ public interface HAServiceProtocol {
       return name;
       return name;
     }
     }
   }
   }
+  
+  public static enum RequestSource {
+    REQUEST_BY_USER,
+    REQUEST_BY_USER_FORCED,
+    REQUEST_BY_ZKFC;
+  }
+  
+  /**
+   * Information describing the source for a request to change state.
+   * This is used to differentiate requests from automatic vs CLI
+   * failover controllers, and in the future may include epoch
+   * information.
+   */
+  public static class StateChangeRequestInfo {
+    private final RequestSource source;
+
+    public StateChangeRequestInfo(RequestSource source) {
+      super();
+      this.source = source;
+    }
+
+    public RequestSource getSource() {
+      return source;
+    }
+  }
 
 
   /**
   /**
    * Monitor the health of service. This periodically called by the HA
    * Monitor the health of service. This periodically called by the HA
@@ -95,7 +120,8 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
-  public void transitionToActive() throws ServiceFailedException,
+  public void transitionToActive(StateChangeRequestInfo reqInfo)
+                                   throws ServiceFailedException,
                                           AccessControlException,
                                           AccessControlException,
                                           IOException;
                                           IOException;
 
 
@@ -110,7 +136,8 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
-  public void transitionToStandby() throws ServiceFailedException,
+  public void transitionToStandby(StateChangeRequestInfo reqInfo)
+                                    throws ServiceFailedException,
                                            AccessControlException,
                                            AccessControlException,
                                            IOException;
                                            IOException;
 
 

+ 9 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 
 
 /**
 /**
@@ -30,7 +31,8 @@ import org.apache.hadoop.ipc.RemoteException;
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class HAServiceProtocolHelper {
 public class HAServiceProtocolHelper {
-  public static void monitorHealth(HAServiceProtocol svc)
+  public static void monitorHealth(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
       throws IOException {
     try {
     try {
       svc.monitorHealth();
       svc.monitorHealth();
@@ -39,19 +41,21 @@ public class HAServiceProtocolHelper {
     }
     }
   }
   }
 
 
-  public static void transitionToActive(HAServiceProtocol svc)
+  public static void transitionToActive(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
       throws IOException {
     try {
     try {
-      svc.transitionToActive();
+      svc.transitionToActive(reqInfo);
     } catch (RemoteException e) {
     } catch (RemoteException e) {
       throw e.unwrapRemoteException(ServiceFailedException.class);
       throw e.unwrapRemoteException(ServiceFailedException.class);
     }
     }
   }
   }
 
 
-  public static void transitionToStandby(HAServiceProtocol svc)
+  public static void transitionToStandby(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
       throws IOException {
     try {
     try {
-      svc.transitionToStandby();
+      svc.transitionToStandby(reqInfo);
     } catch (RemoteException e) {
     } catch (RemoteException e) {
       throw e.unwrapRemoteException(ServiceFailedException.class);
       throw e.unwrapRemoteException(ServiceFailedException.class);
     }
     }

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolClientSideTranslatorPB;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
@@ -48,6 +49,11 @@ public abstract class HAServiceTarget {
    */
    */
   public abstract InetSocketAddress getAddress();
   public abstract InetSocketAddress getAddress();
 
 
+  /**
+   * @return the IPC address of the ZKFC on the target node
+   */
+  public abstract InetSocketAddress getZKFCAddress();
+
   /**
   /**
    * @return a Fencer implementation configured for this target node
    * @return a Fencer implementation configured for this target node
    */
    */
@@ -76,6 +82,20 @@ public abstract class HAServiceTarget {
         confCopy, factory, timeoutMs);
         confCopy, factory, timeoutMs);
   }
   }
   
   
+  /**
+   * @return a proxy to the ZKFC which is associated with this HA service.
+   */
+  public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
+      throws IOException {
+    Configuration confCopy = new Configuration(conf);
+    // Lower the timeout so we quickly fail to connect
+    confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+    SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
+    return new ZKFCProtocolClientSideTranslatorPB(
+        getZKFCAddress(),
+        confCopy, factory, timeoutMs);
+  }
+  
   public final Map<String, String> getFencingParameters() {
   public final Map<String, String> getFencingParameters() {
     Map<String, String> ret = Maps.newHashMap();
     Map<String, String> ret = Maps.newHashMap();
     addFencingParameters(ret);
     addFencingParameters(ret);
@@ -99,4 +119,11 @@ public abstract class HAServiceTarget {
     ret.put(HOST_SUBST_KEY, getAddress().getHostName());
     ret.put(HOST_SUBST_KEY, getAddress().getHostName());
     ret.put(PORT_SUBST_KEY, String.valueOf(getAddress().getPort()));
     ret.put(PORT_SUBST_KEY, String.valueOf(getAddress().getPort()));
   }
   }
+
+  /**
+   * @return true if auto failover should be considered enabled
+   */
+  public boolean isAutoFailoverEnabled() {
+    return false;
+  }
 }
 }

+ 199 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java

@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+
+/**
+ * Utilities for working with ZooKeeper.
+ */
+@InterfaceAudience.Private
+public class HAZKUtil {
+  
+  /**
+   * Parse ACL permission string, partially borrowed from
+   * ZooKeeperMain private method
+   */
+  private static int getPermFromString(String permString) {
+    int perm = 0;
+    for (int i = 0; i < permString.length(); i++) {
+      char c = permString.charAt(i); 
+      switch (c) {
+      case 'r':
+        perm |= ZooDefs.Perms.READ;
+        break;
+      case 'w':
+        perm |= ZooDefs.Perms.WRITE;
+        break;
+      case 'c':
+        perm |= ZooDefs.Perms.CREATE;
+        break;
+      case 'd':
+        perm |= ZooDefs.Perms.DELETE;
+        break;
+      case 'a':
+        perm |= ZooDefs.Perms.ADMIN;
+        break;
+      default:
+        throw new BadAclFormatException(
+            "Invalid permission '" + c + "' in permission string '" +
+            permString + "'");
+      }
+    }
+    return perm;
+  }
+
+  /**
+   * Parse comma separated list of ACL entries to secure generated nodes, e.g.
+   * <code>sasl:hdfs/host1@MY.DOMAIN:cdrwa,sasl:hdfs/host2@MY.DOMAIN:cdrwa</code>
+   *
+   * @return ACL list
+   * @throws HadoopIllegalArgumentException if an ACL is invalid
+   */
+  public static List<ACL> parseACLs(String aclString) {
+    List<ACL> acl = Lists.newArrayList();
+    if (aclString == null) {
+      return acl;
+    }
+    
+    List<String> aclComps = Lists.newArrayList(
+        Splitter.on(',').omitEmptyStrings().trimResults()
+        .split(aclString));
+    for (String a : aclComps) {
+      // from ZooKeeperMain private method
+      int firstColon = a.indexOf(':');
+      int lastColon = a.lastIndexOf(':');
+      if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) {
+        throw new BadAclFormatException(
+            "ACL '" + a + "' not of expected form scheme:id:perm");
+      }
+
+      ACL newAcl = new ACL();
+      newAcl.setId(new Id(a.substring(0, firstColon), a.substring(
+          firstColon + 1, lastColon)));
+      newAcl.setPerms(getPermFromString(a.substring(lastColon + 1)));
+      acl.add(newAcl);
+    }
+    
+    return acl;
+  }
+  
+  /**
+   * Parse a comma-separated list of authentication mechanisms. Each
+   * such mechanism should be of the form 'scheme:auth' -- the same
+   * syntax used for the 'addAuth' command in the ZK CLI.
+   * 
+   * @param authString the comma-separated auth mechanisms
+   * @return a list of parsed authentications
+   */
+  public static List<ZKAuthInfo> parseAuth(String authString) {
+    List<ZKAuthInfo> ret = Lists.newArrayList();
+    if (authString == null) {
+      return ret;
+    }
+    
+    List<String> authComps = Lists.newArrayList(
+        Splitter.on(',').omitEmptyStrings().trimResults()
+        .split(authString));
+    
+    for (String comp : authComps) {
+      String parts[] = comp.split(":", 2);
+      if (parts.length != 2) {
+        throw new BadAuthFormatException(
+            "Auth '" + comp + "' not of expected form scheme:auth");
+      }
+      ret.add(new ZKAuthInfo(parts[0],
+          parts[1].getBytes(Charsets.UTF_8)));
+    }
+    return ret;
+  }
+  
+  /**
+   * Because ZK ACLs and authentication information may be secret,
+   * allow the configuration values to be indirected through a file
+   * by specifying the configuration as "@/path/to/file". If this
+   * syntax is used, this function will return the contents of the file
+   * as a String.
+   * 
+   * @param valInConf the value from the Configuration 
+   * @return either the same value, or the contents of the referenced
+   * file if the configured value starts with "@"
+   * @throws IOException if the file cannot be read
+   */
+  public static String resolveConfIndirection(String valInConf)
+      throws IOException {
+    if (valInConf == null) return null;
+    if (!valInConf.startsWith("@")) {
+      return valInConf;
+    }
+    String path = valInConf.substring(1).trim();
+    return Files.toString(new File(path), Charsets.UTF_8).trim();
+  }
+
+  /**
+   * An authentication token passed to ZooKeeper.addAuthInfo
+   */
+  static class ZKAuthInfo {
+    private final String scheme;
+    private final byte[] auth;
+    
+    public ZKAuthInfo(String scheme, byte[] auth) {
+      super();
+      this.scheme = scheme;
+      this.auth = auth;
+    }
+
+    String getScheme() {
+      return scheme;
+    }
+
+    byte[] getAuth() {
+      return auth;
+    }
+  }
+
+  static class BadAclFormatException extends HadoopIllegalArgumentException {
+    private static final long serialVersionUID = 1L;
+
+    public BadAclFormatException(String message) {
+      super(message);
+    }
+  }
+  
+  static class BadAuthFormatException extends HadoopIllegalArgumentException {
+    private static final long serialVersionUID = 1L;
+
+    public BadAuthFormatException(String message) {
+      super(message);
+    }
+  }
+
+}

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +44,8 @@ import com.google.common.base.Preconditions;
  * Classes which need callbacks should implement the {@link Callback}
  * Classes which need callbacks should implement the {@link Callback}
  * interface.
  * interface.
  */
  */
-class HealthMonitor {
+@InterfaceAudience.Private
+public class HealthMonitor {
   private static final Log LOG = LogFactory.getLog(
   private static final Log LOG = LogFactory.getLog(
       HealthMonitor.class);
       HealthMonitor.class);
 
 
@@ -75,7 +77,8 @@ class HealthMonitor {
   private HAServiceStatus lastServiceState = new HAServiceStatus(
   private HAServiceStatus lastServiceState = new HAServiceStatus(
       HAServiceState.INITIALIZING);
       HAServiceState.INITIALIZING);
   
   
-  enum State {
+  @InterfaceAudience.Private
+  public enum State {
     /**
     /**
      * The health monitor is still starting up.
      * The health monitor is still starting up.
      */
      */

+ 101 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.KerberosInfo;
+
+import java.io.IOException;
+
+/**
+ * Protocol exposed by the ZKFailoverController, allowing for graceful
+ * failover.
+ */
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ZKFCProtocol {
+  /**
+   * Initial version of the protocol
+   */
+  public static final long versionID = 1L;
+
+  /**
+   * Request that this service yield from the active node election for the
+   * specified time period.
+   * 
+   * If the node is not currently active, it simply prevents any attempts
+   * to become active for the specified time period. Otherwise, it first
+   * tries to transition the local service to standby state, and then quits
+   * the election.
+   * 
+   * If the attempt to transition to standby succeeds, then the ZKFC receiving
+   * this RPC will delete its own breadcrumb node in ZooKeeper. Thus, the
+   * next node to become active will not run any fencing process. Otherwise,
+   * the breadcrumb will be left, such that the next active will fence this
+   * node.
+   * 
+   * After the specified time period elapses, the node will attempt to re-join
+   * the election, provided that its service is healthy.
+   * 
+   * If the node has previously been instructed to cede active, and is still
+   * within the specified time period, the later command's time period will
+   * take precedence, resetting the timer.
+   * 
+   * A call to cedeActive which specifies a 0 or negative time period will
+   * allow the target node to immediately rejoin the election, so long as
+   * it is healthy.
+   *  
+   * @param millisToCede period for which the node should not attempt to
+   * become active
+   * @throws IOException if the operation fails
+   * @throws AccessControlException if the operation is disallowed
+   */
+  @Idempotent
+  public void cedeActive(int millisToCede)
+      throws IOException, AccessControlException;
+  
+  /**
+   * Request that this node try to become active through a graceful failover.
+   * 
+   * If the node is already active, this is a no-op and simply returns success
+   * without taking any further action.
+   * 
+   * If the node is not healthy, it will throw an exception indicating that it
+   * is not able to become active.
+   * 
+   * If the node is healthy and not active, it will try to initiate a graceful
+   * failover to become active, returning only when it has successfully become
+   * active. See {@link ZKFailoverController#gracefulFailoverToYou()} for the
+   * implementation details.
+   * 
+   * If the node fails to successfully coordinate the failover, throws an
+   * exception indicating the reason for failure.
+   * 
+   * @throws IOException if graceful failover fails
+   * @throws AccessControlException if the operation is disallowed
+   */
+  @Idempotent
+  public void gracefulFailover()
+      throws IOException, AccessControlException;
+}

+ 98 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+
+import com.google.protobuf.BlockingService;
+
+@InterfaceAudience.LimitedPrivate("HDFS")
+@InterfaceStability.Evolving
+public class ZKFCRpcServer implements ZKFCProtocol {
+
+  private static final int HANDLER_COUNT = 3;
+  private final ZKFailoverController zkfc;
+  private Server server;
+
+  ZKFCRpcServer(Configuration conf,
+      InetSocketAddress bindAddr,
+      ZKFailoverController zkfc,
+      PolicyProvider policy) throws IOException {
+    this.zkfc = zkfc;
+    
+    RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
+        ProtobufRpcEngine.class);
+    ZKFCProtocolServerSideTranslatorPB translator =
+        new ZKFCProtocolServerSideTranslatorPB(this);
+    BlockingService service = ZKFCProtocolService
+        .newReflectiveBlockingService(translator);
+    this.server = RPC.getServer(
+        ZKFCProtocolPB.class,
+        service, bindAddr.getHostName(),
+            bindAddr.getPort(), HANDLER_COUNT, false, conf,
+            null /*secretManager*/);
+    
+    // set service-level authorization security policy
+    if (conf.getBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      server.refreshServiceAcl(conf, policy);
+    }
+
+  }
+  
+  void start() {
+    this.server.start();
+  }
+
+  public InetSocketAddress getAddress() {
+    return server.getListenerAddress();
+  }
+
+  void stopAndJoin() throws InterruptedException {
+    this.server.stop();
+    this.server.join();
+  }
+  
+  @Override
+  public void cedeActive(int millisToCede) throws IOException,
+      AccessControlException {
+    zkfc.checkRpcAdminAccess();
+    zkfc.cedeActive(millisToCede);
+  }
+
+  @Override
+  public void gracefulFailover() throws IOException, AccessControlException {
+    zkfc.checkRpcAdminAccess();
+    zkfc.gracefulFailoverToYou();
+  }
+
+}

+ 579 - 96
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -18,79 +18,143 @@
 package org.apache.hadoop.ha;
 package org.apache.hadoop.ha;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collections;
 import java.util.List;
 import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.HealthMonitor.State;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.ACL;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
 @InterfaceAudience.LimitedPrivate("HDFS")
 @InterfaceAudience.LimitedPrivate("HDFS")
-public abstract class ZKFailoverController implements Tool {
+public abstract class ZKFailoverController {
 
 
   static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
   static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
   
   
-  // TODO: this should be namespace-scoped
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
   private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
   private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
   private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
   private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
+  public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
+  private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
+  public static final String ZK_AUTH_KEY = "ha.zookeeper.auth";
   static final String ZK_PARENT_ZNODE_DEFAULT = "/hadoop-ha";
   static final String ZK_PARENT_ZNODE_DEFAULT = "/hadoop-ha";
 
 
+  /**
+   * All of the conf keys used by the ZKFC. This is used in order to allow
+   * them to be overridden on a per-nameservice or per-namenode basis.
+   */
+  protected static final String[] ZKFC_CONF_KEYS = new String[] {
+    ZK_QUORUM_KEY,
+    ZK_SESSION_TIMEOUT_KEY,
+    ZK_PARENT_ZNODE_KEY,
+    ZK_ACL_KEY,
+    ZK_AUTH_KEY
+  };
+  
+
   /** Unable to format the parent znode in ZK */
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;
   static final int ERR_CODE_FORMAT_DENIED = 2;
   /** The parent znode doesn't exist in ZK */
   /** The parent znode doesn't exist in ZK */
   static final int ERR_CODE_NO_PARENT_ZNODE = 3;
   static final int ERR_CODE_NO_PARENT_ZNODE = 3;
   /** Fencing is not properly configured */
   /** Fencing is not properly configured */
   static final int ERR_CODE_NO_FENCER = 4;
   static final int ERR_CODE_NO_FENCER = 4;
+  /** Automatic failover is not enabled */
+  static final int ERR_CODE_AUTO_FAILOVER_NOT_ENABLED = 5;
+  /** Cannot connect to ZooKeeper */
+  static final int ERR_CODE_NO_ZK = 6;
   
   
-  private Configuration conf;
+  protected Configuration conf;
+  private String zkQuorum;
+  protected final HAServiceTarget localTarget;
 
 
   private HealthMonitor healthMonitor;
   private HealthMonitor healthMonitor;
   private ActiveStandbyElector elector;
   private ActiveStandbyElector elector;
-
-  private HAServiceTarget localTarget;
-
-  private String parentZnode;
+  protected ZKFCRpcServer rpcServer;
 
 
   private State lastHealthState = State.INITIALIZING;
   private State lastHealthState = State.INITIALIZING;
 
 
   /** Set if a fatal error occurs */
   /** Set if a fatal error occurs */
   private String fatalError = null;
   private String fatalError = null;
 
 
-  @Override
-  public void setConf(Configuration conf) {
+  /**
+   * A future nanotime before which the ZKFC will not join the election.
+   * This is used during graceful failover.
+   */
+  private long delayJoiningUntilNanotime = 0;
+
+  /** Executor on which {@link #scheduleRecheck(long)} schedules events */
+  private ScheduledExecutorService delayExecutor =
+    Executors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("ZKFC Delay timer #%d")
+            .build());
+
+  private ActiveAttemptRecord lastActiveAttemptRecord;
+  private Object activeAttemptRecordLock = new Object();
+
+  protected ZKFailoverController(Configuration conf, HAServiceTarget localTarget) {
+    this.localTarget = localTarget;
     this.conf = conf;
     this.conf = conf;
-    localTarget = getLocalTarget();
   }
   }
   
   
 
 
   protected abstract byte[] targetToData(HAServiceTarget target);
   protected abstract byte[] targetToData(HAServiceTarget target);
-  protected abstract HAServiceTarget getLocalTarget();  
   protected abstract HAServiceTarget dataToTarget(byte[] data);
   protected abstract HAServiceTarget dataToTarget(byte[] data);
+  protected abstract void loginAsFCUser() throws IOException;
+  protected abstract void checkRpcAdminAccess()
+      throws AccessControlException, IOException;
+  protected abstract InetSocketAddress getRpcAddressToBindTo();
+  protected abstract PolicyProvider getPolicyProvider();
 
 
+  /**
+   * Return the name of a znode inside the configured parent znode in which
+   * the ZKFC will do all of its work. This is so that multiple federated
+   * nameservices can run on the same ZK quorum without having to manually
+   * configure them to separate subdirectories.
+   */
+  protected abstract String getScopeInsideParentNode();
 
 
-  @Override
-  public Configuration getConf() {
-    return conf;
+  public HAServiceTarget getLocalTarget() {
+    return localTarget;
   }
   }
-
-  @Override
+  
   public int run(final String[] args) throws Exception {
   public int run(final String[] args) throws Exception {
-    // TODO: need to hook DFS here to find the NN keytab info, etc,
-    // similar to what DFSHAAdmin does. Annoying that this is in common.
+    if (!localTarget.isAutoFailoverEnabled()) {
+      LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
+          " Please ensure that automatic failover is enabled in the " +
+          "configuration before running the ZK failover controller.");
+      return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
+    }
+    loginAsFCUser();
     try {
     try {
       return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
       return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
         @Override
         @Override
@@ -99,6 +163,10 @@ public abstract class ZKFailoverController implements Tool {
             return doRun(args);
             return doRun(args);
           } catch (Exception t) {
           } catch (Exception t) {
             throw new RuntimeException(t);
             throw new RuntimeException(t);
+          } finally {
+            if (elector != null) {
+              elector.terminateConnection();
+            }
           }
           }
         }
         }
       });
       });
@@ -107,6 +175,7 @@ public abstract class ZKFailoverController implements Tool {
     }
     }
   }
   }
   
   
+
   private int doRun(String[] args)
   private int doRun(String[] args)
       throws HadoopIllegalArgumentException, IOException, InterruptedException {
       throws HadoopIllegalArgumentException, IOException, InterruptedException {
     initZK();
     initZK();
@@ -129,11 +198,23 @@ public abstract class ZKFailoverController implements Tool {
       }
       }
     }
     }
     
     
-    if (!elector.parentZNodeExists()) {
-      LOG.fatal("Unable to start failover controller. " +
-          "Parent znode does not exist.\n" +
-          "Run with -formatZK flag to initialize ZooKeeper.");
-      return ERR_CODE_NO_PARENT_ZNODE;
+    try {
+      if (!elector.parentZNodeExists()) {
+        LOG.fatal("Unable to start failover controller. " +
+            "Parent znode does not exist.\n" +
+            "Run with -formatZK flag to initialize ZooKeeper.");
+        return ERR_CODE_NO_PARENT_ZNODE;
+      }
+    } catch (IOException ioe) {
+      if (ioe.getCause() instanceof KeeperException.ConnectionLossException) {
+        LOG.fatal("Unable to start failover controller. Unable to connect " +
+            "to ZooKeeper quorum at " + zkQuorum + ". Please check the " +
+            "configured value for " + ZK_QUORUM_KEY + " and ensure that " +
+            "ZooKeeper is running.");
+        return ERR_CODE_NO_ZK;
+      } else {
+        throw ioe;
+      }
     }
     }
 
 
     try {
     try {
@@ -145,8 +226,18 @@ public abstract class ZKFailoverController implements Tool {
       return ERR_CODE_NO_FENCER;
       return ERR_CODE_NO_FENCER;
     }
     }
 
 
+    initRPC();
     initHM();
     initHM();
-    mainLoop();
+    startRPC();
+    try {
+      mainLoop();
+    } finally {
+      rpcServer.stopAndJoin();
+      
+      elector.quitElection(true);
+      healthMonitor.shutdown();
+      healthMonitor.join();
+    }
     return 0;
     return 0;
   }
   }
 
 
@@ -181,6 +272,7 @@ public abstract class ZKFailoverController implements Tool {
   }
   }
 
 
   private boolean confirmFormat() {
   private boolean confirmFormat() {
+    String parentZnode = getParentZnode();
     System.err.println(
     System.err.println(
         "===============================================\n" +
         "===============================================\n" +
         "The configured parent znode " + parentZnode + " already exists.\n" +
         "The configured parent znode " + parentZnode + " already exists.\n" +
@@ -206,16 +298,40 @@ public abstract class ZKFailoverController implements Tool {
     healthMonitor.addCallback(new HealthCallbacks());
     healthMonitor.addCallback(new HealthCallbacks());
     healthMonitor.start();
     healthMonitor.start();
   }
   }
+  
+  protected void initRPC() throws IOException {
+    InetSocketAddress bindAddr = getRpcAddressToBindTo();
+    rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider());
+  }
+
+  protected void startRPC() throws IOException {
+    rpcServer.start();
+  }
+
 
 
   private void initZK() throws HadoopIllegalArgumentException, IOException {
   private void initZK() throws HadoopIllegalArgumentException, IOException {
-    String zkQuorum = conf.get(ZK_QUORUM_KEY);
+    zkQuorum = conf.get(ZK_QUORUM_KEY);
     int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
     int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
         ZK_SESSION_TIMEOUT_DEFAULT);
         ZK_SESSION_TIMEOUT_DEFAULT);
-    parentZnode = conf.get(ZK_PARENT_ZNODE_KEY,
-        ZK_PARENT_ZNODE_DEFAULT);
-    // TODO: need ZK ACL support in config, also maybe auth!
-    List<ACL> zkAcls = Ids.OPEN_ACL_UNSAFE;
+    // Parse ACLs from configuration.
+    String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT);
+    zkAclConf = HAZKUtil.resolveConfIndirection(zkAclConf);
+    List<ACL> zkAcls = HAZKUtil.parseACLs(zkAclConf);
+    if (zkAcls.isEmpty()) {
+      zkAcls = Ids.CREATOR_ALL_ACL;
+    }
+    
+    // Parse authentication from configuration.
+    String zkAuthConf = conf.get(ZK_AUTH_KEY);
+    zkAuthConf = HAZKUtil.resolveConfIndirection(zkAuthConf);
+    List<ZKAuthInfo> zkAuths;
+    if (zkAuthConf != null) {
+      zkAuths = HAZKUtil.parseAuth(zkAuthConf);
+    } else {
+      zkAuths = Collections.emptyList();
+    }
 
 
+    // Sanity check configuration.
     Preconditions.checkArgument(zkQuorum != null,
     Preconditions.checkArgument(zkQuorum != null,
         "Missing required configuration '%s' for ZooKeeper quorum",
         "Missing required configuration '%s' for ZooKeeper quorum",
         ZK_QUORUM_KEY);
         ZK_QUORUM_KEY);
@@ -224,9 +340,19 @@ public abstract class ZKFailoverController implements Tool {
     
     
 
 
     elector = new ActiveStandbyElector(zkQuorum,
     elector = new ActiveStandbyElector(zkQuorum,
-        zkTimeout, parentZnode, zkAcls, new ElectorCallbacks());
+        zkTimeout, getParentZnode(), zkAcls, zkAuths,
+        new ElectorCallbacks());
   }
   }
   
   
+  private String getParentZnode() {
+    String znode = conf.get(ZK_PARENT_ZNODE_KEY,
+        ZK_PARENT_ZNODE_DEFAULT);
+    if (!znode.endsWith("/")) {
+      znode += "/";
+    }
+    return znode + getScopeInsideParentNode();
+  }
+
   private synchronized void mainLoop() throws InterruptedException {
   private synchronized void mainLoop() throws InterruptedException {
     while (fatalError == null) {
     while (fatalError == null) {
       wait();
       wait();
@@ -242,16 +368,30 @@ public abstract class ZKFailoverController implements Tool {
     notifyAll();
     notifyAll();
   }
   }
   
   
-  private synchronized void becomeActive() {
+  private synchronized void becomeActive() throws ServiceFailedException {
     LOG.info("Trying to make " + localTarget + " active...");
     LOG.info("Trying to make " + localTarget + " active...");
     try {
     try {
       HAServiceProtocolHelper.transitionToActive(localTarget.getProxy(
       HAServiceProtocolHelper.transitionToActive(localTarget.getProxy(
-          conf, FailoverController.getRpcTimeoutToNewActive(conf)));
-      LOG.info("Successfully transitioned " + localTarget +
-          " to active state");
+          conf, FailoverController.getRpcTimeoutToNewActive(conf)),
+          createReqInfo());
+      String msg = "Successfully transitioned " + localTarget +
+          " to active state";
+      LOG.info(msg);
+      recordActiveAttempt(new ActiveAttemptRecord(true, msg));
+
     } catch (Throwable t) {
     } catch (Throwable t) {
-      LOG.fatal("Couldn't make " + localTarget + " active", t);
-      elector.quitElection(true);
+      String msg = "Couldn't make " + localTarget + " active";
+      LOG.fatal(msg, t);
+      
+      recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
+          StringUtils.stringifyException(t)));
+
+      if (t instanceof ServiceFailedException) {
+        throw (ServiceFailedException)t;
+      } else {
+        throw new ServiceFailedException("Couldn't transition to active",
+            t);
+      }
 /*
 /*
 * TODO:
 * TODO:
 * we need to make sure that if we get fenced and then quickly restarted,
 * we need to make sure that if we get fenced and then quickly restarted,
@@ -264,12 +404,79 @@ public abstract class ZKFailoverController implements Tool {
     }
     }
   }
   }
 
 
+  /**
+   * Store the results of the last attempt to become active.
+   * This is used so that, during manually initiated failover,
+   * we can report back the results of the attempt to become active
+   * to the initiator of the failover.
+   */
+  private void recordActiveAttempt(
+      ActiveAttemptRecord record) {
+    synchronized (activeAttemptRecordLock) {
+      lastActiveAttemptRecord = record;
+      activeAttemptRecordLock.notifyAll();
+    }
+  }
+
+  /**
+   * Wait until one of the following events:
+   * <ul>
+   * <li>Another thread publishes the results of an attempt to become active
+   * using {@link #recordActiveAttempt(ActiveAttemptRecord)}</li>
+   * <li>The node enters bad health status</li>
+   * <li>The specified timeout elapses</li>
+   * </ul>
+   * 
+   * @param timeoutMillis number of millis to wait
+   * @return the published record, or null if the timeout elapses or the
+   * service becomes unhealthy 
+   * @throws InterruptedException if the thread is interrupted.
+   */
+  private ActiveAttemptRecord waitForActiveAttempt(int timeoutMillis)
+      throws InterruptedException {
+    long st = System.nanoTime();
+    long waitUntil = st + TimeUnit.NANOSECONDS.convert(
+        timeoutMillis, TimeUnit.MILLISECONDS);
+    
+    do {
+      // periodically check health state, because entering an
+      // unhealthy state could prevent us from ever attempting to
+      // become active. We can detect this and respond to the user
+      // immediately.
+      synchronized (this) {
+        if (lastHealthState != State.SERVICE_HEALTHY) {
+          // early out if service became unhealthy
+          return null;
+        }
+      }
+
+      synchronized (activeAttemptRecordLock) {
+        if ((lastActiveAttemptRecord != null &&
+            lastActiveAttemptRecord.nanoTime >= st)) {
+          return lastActiveAttemptRecord;
+        }
+        // Only wait 1sec so that we periodically recheck the health state
+        // above.
+        activeAttemptRecordLock.wait(1000);
+      }
+    } while (System.nanoTime() < waitUntil);
+    
+    // Timeout elapsed.
+    LOG.warn(timeoutMillis + "ms timeout elapsed waiting for an attempt " +
+        "to become active");
+    return null;
+  }
+
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(RequestSource.REQUEST_BY_ZKFC);
+  }
+
   private synchronized void becomeStandby() {
   private synchronized void becomeStandby() {
     LOG.info("ZK Election indicated that " + localTarget +
     LOG.info("ZK Election indicated that " + localTarget +
         " should become standby");
         " should become standby");
     try {
     try {
       int timeout = FailoverController.getGracefulFenceTimeout(conf);
       int timeout = FailoverController.getGracefulFenceTimeout(conf);
-      localTarget.getProxy(conf, timeout).transitionToStandby();
+      localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
       LOG.info("Successfully transitioned " + localTarget +
       LOG.info("Successfully transitioned " + localTarget +
           " to standby state");
           " to standby state");
     } catch (Exception e) {
     } catch (Exception e) {
@@ -279,27 +486,336 @@ public abstract class ZKFailoverController implements Tool {
       // at the same time.
       // at the same time.
     }
     }
   }
   }
+  
+
+  private synchronized void fenceOldActive(byte[] data) {
+    HAServiceTarget target = dataToTarget(data);
+    
+    try {
+      doFence(target);
+    } catch (Throwable t) {
+      recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active: " + StringUtils.stringifyException(t)));
+      Throwables.propagate(t);
+    }
+  }
+  
+  private void doFence(HAServiceTarget target) {
+    LOG.info("Should fence: " + target);
+    boolean gracefulWorked = new FailoverController(conf,
+        RequestSource.REQUEST_BY_ZKFC).tryGracefulFence(target);
+    if (gracefulWorked) {
+      // It's possible that it's in standby but just about to go into active,
+      // no? Is there some race here?
+      LOG.info("Successfully transitioned " + target + " to standby " +
+          "state without fencing");
+      return;
+    }
+    
+    try {
+      target.checkFencingConfigured();
+    } catch (BadFencingConfigurationException e) {
+      LOG.error("Couldn't fence old active " + target, e);
+      recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active"));
+      throw new RuntimeException(e);
+    }
+    
+    if (!target.getFencer().fence(target)) {
+      throw new RuntimeException("Unable to fence " + target);
+    }
+  }
+
+
+  /**
+   * Request from graceful failover to cede active role. Causes
+   * this ZKFC to transition its local node to standby, then quit
+   * the election for the specified period of time, after which it
+   * will rejoin iff it is healthy.
+   */
+  void cedeActive(final int millisToCede)
+      throws AccessControlException, ServiceFailedException, IOException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          doCedeActive(millisToCede);
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+  
+  private void doCedeActive(int millisToCede) 
+      throws AccessControlException, ServiceFailedException, IOException {
+    int timeout = FailoverController.getGracefulFenceTimeout(conf);
+
+    // Lock elector to maintain lock ordering of elector -> ZKFC
+    synchronized (elector) {
+      synchronized (this) {
+        if (millisToCede <= 0) {
+          delayJoiningUntilNanotime = 0;
+          recheckElectability();
+          return;
+        }
+  
+        LOG.info("Requested by " + UserGroupInformation.getCurrentUser() +
+            " at " + Server.getRemoteAddress() + " to cede active role.");
+        boolean needFence = false;
+        try {
+          localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
+          LOG.info("Successfully ensured local node is in standby mode");
+        } catch (IOException ioe) {
+          LOG.warn("Unable to transition local node to standby: " +
+              ioe.getLocalizedMessage());
+          LOG.warn("Quitting election but indicating that fencing is " +
+              "necessary");
+          needFence = true;
+        }
+        delayJoiningUntilNanotime = System.nanoTime() +
+            TimeUnit.MILLISECONDS.toNanos(millisToCede);
+        elector.quitElection(needFence);
+      }
+    }
+    recheckElectability();
+  }
+  
+  /**
+   * Coordinate a graceful failover to this node.
+   * @throws ServiceFailedException if the node fails to become active
+   * @throws IOException some other error occurs
+   */
+  void gracefulFailoverToYou() throws ServiceFailedException, IOException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          doGracefulFailover();
+          return null;
+        }
+        
+      });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Coordinate a graceful failover. This proceeds in several phases:
+   * 1) Pre-flight checks: ensure that the local node is healthy, and
+   * thus a candidate for failover.
+   * 2) Determine the current active node. If it is the local node, no
+   * need to failover - return success.
+   * 3) Ask that node to yield from the election for a number of seconds.
+   * 4) Allow the normal election path to run in other threads. Wait until
+   * we either become unhealthy or we see an election attempt recorded by
+   * the normal code path.
+   * 5) Allow the old active to rejoin the election, so a future
+   * failback is possible.
+   */
+  private void doGracefulFailover()
+      throws ServiceFailedException, IOException, InterruptedException {
+    int timeout = FailoverController.getGracefulFenceTimeout(conf) * 2;
+    
+    // Phase 1: pre-flight checks
+    checkEligibleForFailover();
+    
+    // Phase 2: determine old/current active node. Check that we're not
+    // ourselves active, etc.
+    HAServiceTarget oldActive = getCurrentActive();
+    if (oldActive == null) {
+      // No node is currently active. So, if we aren't already
+      // active ourselves by means of a normal election, then there's
+      // probably something preventing us from becoming active.
+      throw new ServiceFailedException(
+          "No other node is currently active.");
+    }
+    
+    if (oldActive.getAddress().equals(localTarget.getAddress())) {
+      LOG.info("Local node " + localTarget + " is already active. " +
+          "No need to failover. Returning success.");
+      return;
+    }
+    
+    // Phase 3: ask the old active to yield from the election.
+    LOG.info("Asking " + oldActive + " to cede its active state for " +
+        timeout + "ms");
+    ZKFCProtocol oldZkfc = oldActive.getZKFCProxy(conf, timeout);
+    oldZkfc.cedeActive(timeout);
+
+    // Phase 4: wait for the normal election to make the local node
+    // active.
+    ActiveAttemptRecord attempt = waitForActiveAttempt(timeout + 60000);
+    
+    if (attempt == null) {
+      // We didn't even make an attempt to become active.
+      synchronized(this) {
+        if (lastHealthState != State.SERVICE_HEALTHY) {
+          throw new ServiceFailedException("Unable to become active. " +
+            "Service became unhealthy while trying to failover.");          
+        }
+      }
+      
+      throw new ServiceFailedException("Unable to become active. " +
+          "Local node did not get an opportunity to do so from ZooKeeper, " +
+          "or the local node took too long to transition to active.");
+    }
+
+    // Phase 5. At this point, we made some attempt to become active. So we
+    // can tell the old active to rejoin if it wants. This allows a quick
+    // fail-back if we immediately crash.
+    oldZkfc.cedeActive(-1);
+    
+    if (attempt.succeeded) {
+      LOG.info("Successfully became active. " + attempt.status);
+    } else {
+      // Propagate failure
+      String msg = "Failed to become active. " + attempt.status;
+      throw new ServiceFailedException(msg);
+    }
+  }
+
+  /**
+   * Ensure that the local node is in a healthy state, and thus
+   * eligible for graceful failover.
+   * @throws ServiceFailedException if the node is unhealthy
+   */
+  private synchronized void checkEligibleForFailover()
+      throws ServiceFailedException {
+    // Check health
+    if (this.getLastHealthState() != State.SERVICE_HEALTHY) {
+      throw new ServiceFailedException(
+          localTarget + " is not currently healthy. " +
+          "Cannot be failover target");
+    }
+  }
+
+  /**
+   * @return an {@link HAServiceTarget} for the current active node
+   * in the cluster, or null if no node is active.
+   * @throws IOException if a ZK-related issue occurs
+   * @throws InterruptedException if thread is interrupted 
+   */
+  private HAServiceTarget getCurrentActive()
+      throws IOException, InterruptedException {
+    synchronized (elector) {
+      synchronized (this) {
+        byte[] activeData;
+        try {
+          activeData = elector.getActiveData();
+        } catch (ActiveNotFoundException e) {
+          return null;
+        } catch (KeeperException ke) {
+          throw new IOException(
+              "Unexpected ZooKeeper issue fetching active node info", ke);
+        }
+        
+        HAServiceTarget oldActive = dataToTarget(activeData);
+        return oldActive;
+      }
+    }
+  }
+
+  /**
+   * Check the current state of the service, and join the election
+   * if it should be in the election.
+   */
+  private void recheckElectability() {
+    // Maintain lock ordering of elector -> ZKFC
+    synchronized (elector) {
+      synchronized (this) {
+        boolean healthy = lastHealthState == State.SERVICE_HEALTHY;
+    
+        long remainingDelay = delayJoiningUntilNanotime - System.nanoTime(); 
+        if (remainingDelay > 0) {
+          if (healthy) {
+            LOG.info("Would have joined master election, but this node is " +
+                "prohibited from doing so for " +
+                TimeUnit.NANOSECONDS.toMillis(remainingDelay) + " more ms");
+          }
+          scheduleRecheck(remainingDelay);
+          return;
+        }
+    
+        switch (lastHealthState) {
+        case SERVICE_HEALTHY:
+          elector.joinElection(targetToData(localTarget));
+          break;
+          
+        case INITIALIZING:
+          LOG.info("Ensuring that " + localTarget + " does not " +
+              "participate in active master election");
+          elector.quitElection(false);
+          break;
+    
+        case SERVICE_UNHEALTHY:
+        case SERVICE_NOT_RESPONDING:
+          LOG.info("Quitting master election for " + localTarget +
+              " and marking that fencing is necessary");
+          elector.quitElection(true);
+          break;
+          
+        case HEALTH_MONITOR_FAILED:
+          fatalError("Health monitor failed!");
+          break;
+          
+        default:
+          throw new IllegalArgumentException("Unhandled state:" + lastHealthState);
+        }
+      }
+    }
+  }
+  
+  /**
+   * Schedule a call to {@link #recheckElectability()} in the future.
+   */
+  private void scheduleRecheck(long whenNanos) {
+    delayExecutor.schedule(
+        new Runnable() {
+          @Override
+          public void run() {
+            try {
+              recheckElectability();
+            } catch (Throwable t) {
+              fatalError("Failed to recheck electability: " +
+                  StringUtils.stringifyException(t));
+            }
+          }
+        },
+        whenNanos, TimeUnit.NANOSECONDS);
+  }
 
 
   /**
   /**
    * @return the last health state passed to the FC
    * @return the last health state passed to the FC
    * by the HealthMonitor.
    * by the HealthMonitor.
    */
    */
   @VisibleForTesting
   @VisibleForTesting
-  State getLastHealthState() {
+  synchronized State getLastHealthState() {
     return lastHealthState;
     return lastHealthState;
   }
   }
+
+  private synchronized void setLastHealthState(HealthMonitor.State newState) {
+    LOG.info("Local service " + localTarget +
+        " entered state: " + newState);
+    lastHealthState = newState;
+  }
   
   
   @VisibleForTesting
   @VisibleForTesting
   ActiveStandbyElector getElectorForTests() {
   ActiveStandbyElector getElectorForTests() {
     return elector;
     return elector;
   }
   }
+  
+  @VisibleForTesting
+  ZKFCRpcServer getRpcServerForTests() {
+    return rpcServer;
+  }
 
 
   /**
   /**
    * Callbacks from elector
    * Callbacks from elector
    */
    */
   class ElectorCallbacks implements ActiveStandbyElectorCallback {
   class ElectorCallbacks implements ActiveStandbyElectorCallback {
     @Override
     @Override
-    public void becomeActive() {
+    public void becomeActive() throws ServiceFailedException {
       ZKFailoverController.this.becomeActive();
       ZKFailoverController.this.becomeActive();
     }
     }
 
 
@@ -319,31 +835,13 @@ public abstract class ZKFailoverController implements Tool {
 
 
     @Override
     @Override
     public void fenceOldActive(byte[] data) {
     public void fenceOldActive(byte[] data) {
-      HAServiceTarget target = dataToTarget(data);
-      
-      LOG.info("Should fence: " + target);
-      boolean gracefulWorked = new FailoverController(conf)
-          .tryGracefulFence(target);
-      if (gracefulWorked) {
-        // It's possible that it's in standby but just about to go into active,
-        // no? Is there some race here?
-        LOG.info("Successfully transitioned " + target + " to standby " +
-            "state without fencing");
-        return;
-      }
-      
-      try {
-        target.checkFencingConfigured();
-      } catch (BadFencingConfigurationException e) {
-        LOG.error("Couldn't fence old active " + target, e);
-        // TODO: see below todo
-        throw new RuntimeException(e);
-      }
-      
-      if (!target.getFencer().fence(target)) {
-        // TODO: this will end up in some kind of tight loop,
-        // won't it? We need some kind of backoff
-        throw new RuntimeException("Unable to fence " + target);
+      ZKFailoverController.this.fenceOldActive(data);
+    }
+    
+    @Override
+    public String toString() {
+      synchronized (ZKFailoverController.this) {
+        return "Elector callbacks for " + localTarget;
       }
       }
     }
     }
   }
   }
@@ -354,36 +852,21 @@ public abstract class ZKFailoverController implements Tool {
   class HealthCallbacks implements HealthMonitor.Callback {
   class HealthCallbacks implements HealthMonitor.Callback {
     @Override
     @Override
     public void enteredState(HealthMonitor.State newState) {
     public void enteredState(HealthMonitor.State newState) {
-      LOG.info("Local service " + localTarget +
-          " entered state: " + newState);
-      switch (newState) {
-      case SERVICE_HEALTHY:
-        LOG.info("Joining master election for " + localTarget);
-        elector.joinElection(targetToData(localTarget));
-        break;
-        
-      case INITIALIZING:
-        LOG.info("Ensuring that " + localTarget + " does not " +
-            "participate in active master election");
-        elector.quitElection(false);
-        break;
-
-      case SERVICE_UNHEALTHY:
-      case SERVICE_NOT_RESPONDING:
-        LOG.info("Quitting master election for " + localTarget +
-            " and marking that fencing is necessary");
-        elector.quitElection(true);
-        break;
-        
-      case HEALTH_MONITOR_FAILED:
-        fatalError("Health monitor failed!");
-        break;
-        
-      default:
-        throw new IllegalArgumentException("Unhandled state:" + newState);
-      }
-      
-      lastHealthState = newState;
+      setLastHealthState(newState);
+      recheckElectability();
     }
     }
   }
   }
+  
+  private static class ActiveAttemptRecord {
+    private final boolean succeeded;
+    private final String status;
+    private final long nanoTime;
+    
+    public ActiveAttemptRecord(boolean succeeded, String status) {
+      this.succeeded = succeeded;
+      this.status = status;
+      this.nanoTime = System.nanoTime();
+    }
+  }
+
 }
 }

+ 34 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java

@@ -30,13 +30,14 @@ import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -57,10 +58,6 @@ public class HAServiceProtocolClientSideTranslatorPB implements
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ = 
   private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ = 
       MonitorHealthRequestProto.newBuilder().build();
       MonitorHealthRequestProto.newBuilder().build();
-  private final static TransitionToActiveRequestProto TRANSITION_TO_ACTIVE_REQ = 
-      TransitionToActiveRequestProto.newBuilder().build();
-  private final static TransitionToStandbyRequestProto TRANSITION_TO_STANDBY_REQ = 
-      TransitionToStandbyRequestProto.newBuilder().build();
   private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ = 
   private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ = 
       GetServiceStatusRequestProto.newBuilder().build();
       GetServiceStatusRequestProto.newBuilder().build();
   
   
@@ -94,18 +91,25 @@ public class HAServiceProtocolClientSideTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public void transitionToActive() throws IOException {
+  public void transitionToActive(StateChangeRequestInfo reqInfo) throws IOException {
     try {
     try {
-      rpcProxy.transitionToActive(NULL_CONTROLLER, TRANSITION_TO_ACTIVE_REQ);
+      TransitionToActiveRequestProto req =
+          TransitionToActiveRequestProto.newBuilder()
+            .setReqInfo(convert(reqInfo)).build();
+
+      rpcProxy.transitionToActive(NULL_CONTROLLER, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
   }
   }
 
 
   @Override
   @Override
-  public void transitionToStandby() throws IOException {
+  public void transitionToStandby(StateChangeRequestInfo reqInfo) throws IOException {
     try {
     try {
-      rpcProxy.transitionToStandby(NULL_CONTROLLER, TRANSITION_TO_STANDBY_REQ);
+      TransitionToStandbyRequestProto req =
+        TransitionToStandbyRequestProto.newBuilder()
+          .setReqInfo(convert(reqInfo)).build();
+      rpcProxy.transitionToStandby(NULL_CONTROLLER, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -143,6 +147,27 @@ public class HAServiceProtocolClientSideTranslatorPB implements
     }
     }
   }
   }
   
   
+  private HAStateChangeRequestInfoProto convert(StateChangeRequestInfo reqInfo) {
+    HARequestSource src;
+    switch (reqInfo.getSource()) {
+    case REQUEST_BY_USER:
+      src = HARequestSource.REQUEST_BY_USER;
+      break;
+    case REQUEST_BY_USER_FORCED:
+      src = HARequestSource.REQUEST_BY_USER_FORCED;
+      break;
+    case REQUEST_BY_ZKFC:
+      src = HARequestSource.REQUEST_BY_ZKFC;
+      break;
+    default:
+      throw new IllegalArgumentException("Bad source: " + reqInfo.getSource());
+    }
+    return HAStateChangeRequestInfoProto.newBuilder()
+        .setReqSource(src)
+        .build();
+  }
+
+
   @Override
   @Override
   public void close() {
   public void close() {
     RPC.stopProxy(rpcProxy);
     RPC.stopProxy(rpcProxy);

+ 29 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java

@@ -19,12 +19,17 @@ package org.apache.hadoop.ha.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto;
@@ -56,6 +61,8 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       TransitionToActiveResponseProto.newBuilder().build();
       TransitionToActiveResponseProto.newBuilder().build();
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
       TransitionToStandbyResponseProto.newBuilder().build();
       TransitionToStandbyResponseProto.newBuilder().build();
+  private static final Log LOG = LogFactory.getLog(
+      HAServiceProtocolServerSideTranslatorPB.class);
   
   
   public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
   public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
     this.server = server;
     this.server = server;
@@ -71,13 +78,33 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
+  
+  private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
+    RequestSource src;
+    switch (proto.getReqSource()) {
+    case REQUEST_BY_USER:
+      src = RequestSource.REQUEST_BY_USER;
+      break;
+    case REQUEST_BY_USER_FORCED:
+      src = RequestSource.REQUEST_BY_USER_FORCED;
+      break;
+    case REQUEST_BY_ZKFC:
+      src = RequestSource.REQUEST_BY_ZKFC;
+      break;
+    default:
+      LOG.warn("Unknown request source: " + proto.getReqSource());
+      src = null;
+    }
+    
+    return new StateChangeRequestInfo(src);
+  }
 
 
   @Override
   @Override
   public TransitionToActiveResponseProto transitionToActive(
   public TransitionToActiveResponseProto transitionToActive(
       RpcController controller, TransitionToActiveRequestProto request)
       RpcController controller, TransitionToActiveRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      server.transitionToActive();
+      server.transitionToActive(convert(request.getReqInfo()));
       return TRANSITION_TO_ACTIVE_RESP;
       return TRANSITION_TO_ACTIVE_RESP;
     } catch(IOException e) {
     } catch(IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -89,7 +116,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       RpcController controller, TransitionToStandbyRequestProto request)
       RpcController controller, TransitionToStandbyRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      server.transitionToStandby();
+      server.transitionToStandby(convert(request.getReqInfo()));
       return TRANSITION_TO_STANDBY_RESP;
       return TRANSITION_TO_STANDBY_RESP;
     } catch(IOException e) {
     } catch(IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);

+ 90 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java

@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ZKFCProtocol;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+
+public class ZKFCProtocolClientSideTranslatorPB implements
+  ZKFCProtocol, Closeable, ProtocolTranslator {
+
+  private final static RpcController NULL_CONTROLLER = null;
+  private final ZKFCProtocolPB rpcProxy;
+
+  public ZKFCProtocolClientSideTranslatorPB(
+      InetSocketAddress addr, Configuration conf,
+      SocketFactory socketFactory, int timeout) throws IOException {
+    RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
+        ProtobufRpcEngine.class);
+    rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
+        RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
+        UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
+  }
+
+  @Override
+  public void cedeActive(int millisToCede) throws IOException,
+      AccessControlException {
+    try {
+      CedeActiveRequestProto req = CedeActiveRequestProto.newBuilder()
+          .setMillisToCede(millisToCede)
+          .build();
+      rpcProxy.cedeActive(NULL_CONTROLLER, req);      
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public void gracefulFailover() throws IOException, AccessControlException {
+    try {
+      rpcProxy.gracefulFailover(NULL_CONTROLLER,
+          GracefulFailoverRequestProto.getDefaultInstance());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+
+  @Override
+  public void close() {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+}

+ 18 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java

@@ -15,22 +15,25 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
+package org.apache.hadoop.ha.protocolPB;
 
 
-package org.apache.hadoop.hdfs.test.system;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
 
 
-/**
- * Client side API exposed from Namenode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
 @KerberosInfo(
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
-public interface NNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
-}
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@ProtocolInfo(protocolName = "org.apache.hadoop.ha.ZKFCProtocol", 
+    protocolVersion = 1)
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ZKFCProtocolPB extends
+    ZKFCProtocolService.BlockingInterface, VersionedProtocol {
+  /**
+   * If any methods need annotation, it can be added here
+   */
+}

+ 88 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java

@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.ZKFCProtocol;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveResponseProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverResponseProto;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ZKFCProtocolServerSideTranslatorPB implements
+    ZKFCProtocolPB {
+  private final ZKFCProtocol server;
+  
+  public ZKFCProtocolServerSideTranslatorPB(ZKFCProtocol server) {
+    this.server = server;
+  }
+
+  @Override
+  public CedeActiveResponseProto cedeActive(RpcController controller,
+      CedeActiveRequestProto request) throws ServiceException {
+    try {
+      server.cedeActive(request.getMillisToCede());
+      return CedeActiveResponseProto.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GracefulFailoverResponseProto gracefulFailover(
+      RpcController controller, GracefulFailoverRequestProto request)
+      throws ServiceException {
+    try {
+      server.gracefulFailover();
+      return GracefulFailoverResponseProto.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    return RPC.getProtocolVersion(ZKFCProtocolPB.class);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    if (!protocol.equals(RPC.getProtocolName(ZKFCProtocolPB.class))) {
+      throw new IOException("Serverside implements " +
+          RPC.getProtocolName(ZKFCProtocolPB.class) +
+          ". The following requested protocol is unknown: " + protocol);
+    }
+
+    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+        RPC.getProtocolVersion(ZKFCProtocolPB.class),
+        HAServiceProtocolPB.class);
+  }
+
+}

+ 25 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -96,7 +96,7 @@ public class HttpServer implements FilterContainer {
   // The ServletContext attribute where the daemon Configuration
   // The ServletContext attribute where the daemon Configuration
   // gets stored.
   // gets stored.
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
-  static final String ADMINS_ACL = "admins.acl";
+  public static final String ADMINS_ACL = "admins.acl";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
 
 
   public static final String BIND_ADDRESS = "bind.address";
   public static final String BIND_ADDRESS = "bind.address";
@@ -792,7 +792,7 @@ public class HttpServer implements FilterContainer {
    * 
    * 
    * @param servletContext
    * @param servletContext
    * @param request
    * @param request
-   * @param response
+   * @param response used to send the error response if user does not have admin access.
    * @return true if admin-authorized, false otherwise
    * @return true if admin-authorized, false otherwise
    * @throws IOException
    * @throws IOException
    */
    */
@@ -814,18 +814,33 @@ public class HttpServer implements FilterContainer {
                          "authorized to access this page.");
                          "authorized to access this page.");
       return false;
       return false;
     }
     }
+    
+    if (servletContext.getAttribute(ADMINS_ACL) != null &&
+        !userHasAdministratorAccess(servletContext, remoteUser)) {
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
+          + remoteUser + " is unauthorized to access this page.");
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   * Get the admin ACLs from the given ServletContext and check if the given
+   * user is in the ACL.
+   * 
+   * @param servletContext the context containing the admin ACL.
+   * @param remoteUser the remote user to check for.
+   * @return true if the user is present in the ACL, false if no ACL is set or
+   *         the user is not present
+   */
+  public static boolean userHasAdministratorAccess(ServletContext servletContext,
+      String remoteUser) {
     AccessControlList adminsAcl = (AccessControlList) servletContext
     AccessControlList adminsAcl = (AccessControlList) servletContext
         .getAttribute(ADMINS_ACL);
         .getAttribute(ADMINS_ACL);
     UserGroupInformation remoteUserUGI =
     UserGroupInformation remoteUserUGI =
         UserGroupInformation.createRemoteUser(remoteUser);
         UserGroupInformation.createRemoteUser(remoteUser);
-    if (adminsAcl != null) {
-      if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
-        response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
-            + remoteUser + " is unauthorized to access this page.");
-        return false;
-      }
-    }
-    return true;
+    return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
   }
   }
 
 
   /**
   /**

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java

@@ -37,15 +37,15 @@ import org.apache.hadoop.http.FilterInitializer;
 
 
 import javax.servlet.Filter;
 import javax.servlet.Filter;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+
 /**
 /**
  * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who)
  * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who)
  * so that the web UI is usable for a secure cluster without authentication.
  * so that the web UI is usable for a secure cluster without authentication.
  */
  */
 public class StaticUserWebFilter extends FilterInitializer {
 public class StaticUserWebFilter extends FilterInitializer {
   static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
   static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
-  
-  static final String USERNAME_KEY = "hadoop.http.staticuser.user";
-  static final String USERNAME_DEFAULT = "dr.who";
 
 
   private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
   private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
 
 
@@ -112,7 +112,7 @@ public class StaticUserWebFilter extends FilterInitializer {
 
 
     @Override
     @Override
     public void init(FilterConfig conf) throws ServletException {
     public void init(FilterConfig conf) throws ServletException {
-      this.username = conf.getInitParameter(USERNAME_KEY);
+      this.username = conf.getInitParameter(HADOOP_HTTP_STATIC_USER);
       this.user = new User(username);
       this.user = new User(username);
     }
     }
     
     
@@ -123,7 +123,7 @@ public class StaticUserWebFilter extends FilterInitializer {
     HashMap<String, String> options = new HashMap<String, String>();
     HashMap<String, String> options = new HashMap<String, String>();
     
     
     String username = getUsernameFromConf(conf);
     String username = getUsernameFromConf(conf);
-    options.put(USERNAME_KEY, username);
+    options.put(HADOOP_HTTP_STATIC_USER, username);
 
 
     container.addFilter("static_user_filter", 
     container.addFilter("static_user_filter", 
                         StaticUserFilter.class.getName(), 
                         StaticUserFilter.class.getName(), 
@@ -139,11 +139,12 @@ public class StaticUserWebFilter extends FilterInitializer {
       // We can't use the normal configuration deprecation mechanism here
       // We can't use the normal configuration deprecation mechanism here
       // since we need to split out the username from the configured UGI.
       // since we need to split out the username from the configured UGI.
       LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + 
       LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + 
-               USERNAME_KEY + ".");
+          HADOOP_HTTP_STATIC_USER + ".");
       String[] parts = oldStyleUgi.split(",");
       String[] parts = oldStyleUgi.split(",");
       return parts[0];
       return parts[0];
     } else {
     } else {
-      return conf.get(USERNAME_KEY, USERNAME_DEFAULT);
+      return conf.get(HADOOP_HTTP_STATIC_USER,
+        DEFAULT_HADOOP_HTTP_STATIC_USER);
     }
     }
   }
   }
 
 

+ 20 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -807,7 +807,7 @@ public class SequenceFile {
   }
   }
   
   
   /** Write key/value pairs to a sequence-format file. */
   /** Write key/value pairs to a sequence-format file. */
-  public static class Writer implements java.io.Closeable {
+  public static class Writer implements java.io.Closeable, Syncable {
     private Configuration conf;
     private Configuration conf;
     FSDataOutputStream out;
     FSDataOutputStream out;
     boolean ownOutputStream = true;
     boolean ownOutputStream = true;
@@ -1193,13 +1193,31 @@ public class SequenceFile {
       }
       }
     }
     }
 
 
-    /** flush all currently written data to the file system */
+    /**
+     * flush all currently written data to the file system
+     * @deprecated Use {@link #hsync()} or {@link #hflush()} instead
+     */
+    @Deprecated
     public void syncFs() throws IOException {
     public void syncFs() throws IOException {
       if (out != null) {
       if (out != null) {
         out.hflush();  // flush contents to file system
         out.hflush();  // flush contents to file system
       }
       }
     }
     }
 
 
+    @Override
+    public void hsync() throws IOException {
+      if (out != null) {
+        out.hsync();
+      }
+    }
+
+    @Override
+    public void hflush() throws IOException {
+      if (out != null) {
+        out.hflush();
+      }
+    }
+    
     /** Returns the configuration of this file. */
     /** Returns the configuration of this file. */
     Configuration getConf() { return conf; }
     Configuration getConf() { return conf; }
     
     

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -236,6 +236,11 @@ public class Text extends BinaryComparable
 
 
   /**
   /**
    * Clear the string to empty.
    * Clear the string to empty.
+   *
+   * <em>Note</em>: For performance reasons, this call does not clear the
+   * underlying byte array that is retrievable via {@link #getBytes()}.
+   * In order to free the byte-array memory, call {@link #set(byte[])}
+   * with an empty byte array (For example, <code>new byte[0]</code>).
    */
    */
   public void clear() {
   public void clear() {
     length = 0;
     length = 0;

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java

@@ -39,20 +39,23 @@ import org.apache.hadoop.classification.InterfaceStability;
  * <p>Example:</p>
  * <p>Example:</p>
  * <p><blockquote><pre>
  * <p><blockquote><pre>
  *     public class MyWritable implements Writable {
  *     public class MyWritable implements Writable {
- *       // Some data     
+ *       // Some data
  *       private int counter;
  *       private int counter;
  *       private long timestamp;
  *       private long timestamp;
- *       
+ *
+ *       // Default constructor to allow (de)serialization
+ *       MyWritable() { }
+ *
  *       public void write(DataOutput out) throws IOException {
  *       public void write(DataOutput out) throws IOException {
  *         out.writeInt(counter);
  *         out.writeInt(counter);
  *         out.writeLong(timestamp);
  *         out.writeLong(timestamp);
  *       }
  *       }
- *       
+ *
  *       public void readFields(DataInput in) throws IOException {
  *       public void readFields(DataInput in) throws IOException {
  *         counter = in.readInt();
  *         counter = in.readInt();
  *         timestamp = in.readLong();
  *         timestamp = in.readLong();
  *       }
  *       }
- *       
+ *
  *       public static MyWritable read(DataInput in) throws IOException {
  *       public static MyWritable read(DataInput in) throws IOException {
  *         MyWritable w = new MyWritable();
  *         MyWritable w = new MyWritable();
  *         w.readFields(in);
  *         w.readFields(in);

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -109,8 +109,12 @@ public class CompressionCodecFactory {
     List<Class<? extends CompressionCodec>> result
     List<Class<? extends CompressionCodec>> result
       = new ArrayList<Class<? extends CompressionCodec>>();
       = new ArrayList<Class<? extends CompressionCodec>>();
     // Add codec classes discovered via service loading
     // Add codec classes discovered via service loading
-    for (CompressionCodec codec : CODEC_PROVIDERS) {
-      result.add(codec.getClass());
+    synchronized (CODEC_PROVIDERS) {
+      // CODEC_PROVIDERS is a lazy collection. Synchronize so it is
+      // thread-safe. See HADOOP-8406.
+      for (CompressionCodec codec : CODEC_PROVIDERS) {
+        result.add(codec.getClass());
+      }
     }
     }
     // Add codec classes from configuration
     // Add codec classes from configuration
     String codecsString = conf.get("io.compression.codecs");
     String codecsString = conf.get("io.compression.codecs");

+ 13 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -53,6 +53,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
@@ -845,24 +847,24 @@ public class Client {
       touch();
       touch();
       
       
       try {
       try {
-        int id = in.readInt();                    // try to read an id
-
+        RpcResponseHeaderProto response = 
+            RpcResponseHeaderProto.parseDelimitedFrom(in);
+        int callId = response.getCallId();
         if (LOG.isDebugEnabled())
         if (LOG.isDebugEnabled())
-          LOG.debug(getName() + " got value #" + id);
-
-        Call call = calls.get(id);
+          LOG.debug(getName() + " got value #" + callId);
 
 
-        int state = in.readInt();     // read call status
-        if (state == Status.SUCCESS.state) {
+        Call call = calls.get(callId);
+        RpcStatusProto status = response.getStatus();
+        if (status == RpcStatusProto.SUCCESS) {
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           value.readFields(in);                 // read value
           value.readFields(in);                 // read value
           call.setRpcResponse(value);
           call.setRpcResponse(value);
-          calls.remove(id);
-        } else if (state == Status.ERROR.state) {
+          calls.remove(callId);
+        } else if (status == RpcStatusProto.ERROR) {
           call.setException(new RemoteException(WritableUtils.readString(in),
           call.setException(new RemoteException(WritableUtils.readString(in),
                                                 WritableUtils.readString(in)));
                                                 WritableUtils.readString(in)));
-          calls.remove(id);
-        } else if (state == Status.FATAL.state) {
+          calls.remove(callId);
+        } else if (status == RpcStatusProto.FATAL) {
           // Close the connection
           // Close the connection
           markClosed(new RemoteException(WritableUtils.readString(in), 
           markClosed(new RemoteException(WritableUtils.readString(in), 
                                          WritableUtils.readString(in)));
                                          WritableUtils.readString(in)));

+ 27 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -396,24 +396,44 @@ public class ProtobufRpcEngine implements RpcEngine {
        * it is.</li>
        * it is.</li>
        * </ol>
        * </ol>
        */
        */
-      public Writable call(RPC.Server server, String protocol,
+      public Writable call(RPC.Server server, String connectionProtocolName,
           Writable writableRequest, long receiveTime) throws Exception {
           Writable writableRequest, long receiveTime) throws Exception {
         RpcRequestWritable request = (RpcRequestWritable) writableRequest;
         RpcRequestWritable request = (RpcRequestWritable) writableRequest;
         HadoopRpcRequestProto rpcRequest = request.message;
         HadoopRpcRequestProto rpcRequest = request.message;
         String methodName = rpcRequest.getMethodName();
         String methodName = rpcRequest.getMethodName();
-        String protoName = rpcRequest.getDeclaringClassProtocolName();
+        
+        
+        /** 
+         * RPCs for a particular interface (ie protocol) are done using a
+         * IPC connection that is setup using rpcProxy.
+         * The rpcProxy's has a declared protocol name that is 
+         * sent form client to server at connection time. 
+         * 
+         * Each Rpc call also sends a protocol name 
+         * (called declaringClassprotocolName). This name is usually the same
+         * as the connection protocol name except in some cases. 
+         * For example metaProtocols such ProtocolInfoProto which get info
+         * about the protocol reuse the connection but need to indicate that
+         * the actual protocol is different (i.e. the protocol is
+         * ProtocolInfoProto) since they reuse the connection; in this case
+         * the declaringClassProtocolName field is set to the ProtocolInfoProto.
+         */
+
+        String declaringClassProtoName = 
+            rpcRequest.getDeclaringClassProtocolName();
         long clientVersion = rpcRequest.getClientProtocolVersion();
         long clientVersion = rpcRequest.getClientProtocolVersion();
         if (server.verbose)
         if (server.verbose)
-          LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
+          LOG.info("Call: connectionProtocolName=" + connectionProtocolName + 
+              ", method=" + methodName);
         
         
-        ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, protoName,
-            clientVersion);
+        ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, 
+                              declaringClassProtoName, clientVersion);
         BlockingService service = (BlockingService) protocolImpl.protocolImpl;
         BlockingService service = (BlockingService) protocolImpl.protocolImpl;
         MethodDescriptor methodDescriptor = service.getDescriptorForType()
         MethodDescriptor methodDescriptor = service.getDescriptorForType()
             .findMethodByName(methodName);
             .findMethodByName(methodName);
         if (methodDescriptor == null) {
         if (methodDescriptor == null) {
-          String msg = "Unknown method " + methodName + " called on " + protocol
-              + " protocol.";
+          String msg = "Unknown method " + methodName + " called on " 
+                                + connectionProtocolName + " protocol.";
           LOG.warn(msg);
           LOG.warn(msg);
           throw new RpcServerException(msg);
           throw new RpcServerException(msg);
         }
         }

+ 57 - 18
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1339,7 +1339,7 @@ public abstract class Server {
               + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
               + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
               + ") is configured as simple. Please configure another method "
               + ") is configured as simple. Please configure another method "
               + "like kerberos or digest.");
               + "like kerberos or digest.");
-            setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
+            setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
                 null, ae.getClass().getName(), ae.getMessage());
                 null, ae.getClass().getName(), ae.getMessage());
             responder.doRespond(authFailedCall);
             responder.doRespond(authFailedCall);
             throw ae;
             throw ae;
@@ -1420,7 +1420,7 @@ public abstract class Server {
         Call fakeCall =  new Call(-1, null, this);
         Call fakeCall =  new Call(-1, null, this);
         // Versions 3 and greater can interpret this exception
         // Versions 3 and greater can interpret this exception
         // response in the same manner
         // response in the same manner
-        setupResponse(buffer, fakeCall, Status.FATAL,
+        setupResponseOldVersionFatal(buffer, fakeCall,
             null, VersionMismatch.class.getName(), errMsg);
             null, VersionMismatch.class.getName(), errMsg);
 
 
         responder.doRespond(fakeCall);
         responder.doRespond(fakeCall);
@@ -1443,7 +1443,7 @@ public abstract class Server {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
 
 
       Call fakeCall = new Call(-1, null, this);
       Call fakeCall = new Call(-1, null, this);
-      setupResponse(buffer, fakeCall, Status.FATAL, null,
+      setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null,
           IpcException.class.getName(), errMsg);
           IpcException.class.getName(), errMsg);
       responder.doRespond(fakeCall);
       responder.doRespond(fakeCall);
     }
     }
@@ -1579,7 +1579,7 @@ public abstract class Server {
             new Call(header.getCallId(), null, this);
             new Call(header.getCallId(), null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 
 
-        setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+        setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
             IOException.class.getName(),
             IOException.class.getName(),
             "Unknown rpc kind "  + header.getRpcKind());
             "Unknown rpc kind "  + header.getRpcKind());
         responder.doRespond(readParamsFailedCall);
         responder.doRespond(readParamsFailedCall);
@@ -1597,7 +1597,7 @@ public abstract class Server {
             new Call(header.getCallId(), null, this);
             new Call(header.getCallId(), null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 
 
-        setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+        setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
             t.getClass().getName(),
             t.getClass().getName(),
             "IPC server unable to read call parameters: " + t.getMessage());
             "IPC server unable to read call parameters: " + t.getMessage());
         responder.doRespond(readParamsFailedCall);
         responder.doRespond(readParamsFailedCall);
@@ -1627,7 +1627,7 @@ public abstract class Server {
         rpcMetrics.incrAuthorizationSuccesses();
         rpcMetrics.incrAuthorizationSuccesses();
       } catch (AuthorizationException ae) {
       } catch (AuthorizationException ae) {
         rpcMetrics.incrAuthorizationFailures();
         rpcMetrics.incrAuthorizationFailures();
-        setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
+        setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null,
             ae.getClass().getName(), ae.getMessage());
             ae.getClass().getName(), ae.getMessage());
         responder.doRespond(authFailedCall);
         responder.doRespond(authFailedCall);
         return false;
         return false;
@@ -1725,8 +1725,8 @@ public abstract class Server {
             // responder.doResponse() since setupResponse may use
             // responder.doResponse() since setupResponse may use
             // SASL to encrypt response data and SASL enforces
             // SASL to encrypt response data and SASL enforces
             // its own message ordering.
             // its own message ordering.
-            setupResponse(buf, call, (error == null) ? Status.SUCCESS
-                : Status.ERROR, value, errorClass, error);
+            setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS
+                : RpcStatusProto.ERROR, value, errorClass, error);
             
             
             // Discard the large buf and reset it back to smaller size 
             // Discard the large buf and reset it back to smaller size 
             // to free up heap
             // to free up heap
@@ -1859,40 +1859,79 @@ public abstract class Server {
   /**
   /**
    * Setup response for the IPC Call.
    * Setup response for the IPC Call.
    * 
    * 
-   * @param response buffer to serialize the response into
+   * @param responseBuf buffer to serialize the response into
    * @param call {@link Call} to which we are setting up the response
    * @param call {@link Call} to which we are setting up the response
-   * @param status {@link Status} of the IPC call
+   * @param status of the IPC call
    * @param rv return value for the IPC Call, if the call was successful
    * @param rv return value for the IPC Call, if the call was successful
    * @param errorClass error class, if the the call failed
    * @param errorClass error class, if the the call failed
    * @param error error message, if the call failed
    * @param error error message, if the call failed
    * @throws IOException
    * @throws IOException
    */
    */
-  private void setupResponse(ByteArrayOutputStream response, 
-                             Call call, Status status, 
+  private void setupResponse(ByteArrayOutputStream responseBuf,
+                             Call call, RpcStatusProto status, 
                              Writable rv, String errorClass, String error) 
                              Writable rv, String errorClass, String error) 
   throws IOException {
   throws IOException {
-    response.reset();
-    DataOutputStream out = new DataOutputStream(response);
-    out.writeInt(call.callId);                // write call id
-    out.writeInt(status.state);           // write status
+    responseBuf.reset();
+    DataOutputStream out = new DataOutputStream(responseBuf);
+    RpcResponseHeaderProto.Builder response =  
+        RpcResponseHeaderProto.newBuilder();
+    response.setCallId(call.callId);
+    response.setStatus(status);
 
 
-    if (status == Status.SUCCESS) {
+
+    if (status == RpcStatusProto.SUCCESS) {
       try {
       try {
+        response.build().writeDelimitedTo(out);
         rv.write(out);
         rv.write(out);
       } catch (Throwable t) {
       } catch (Throwable t) {
         LOG.warn("Error serializing call response for call " + call, t);
         LOG.warn("Error serializing call response for call " + call, t);
         // Call back to same function - this is OK since the
         // Call back to same function - this is OK since the
         // buffer is reset at the top, and since status is changed
         // buffer is reset at the top, and since status is changed
         // to ERROR it won't infinite loop.
         // to ERROR it won't infinite loop.
-        setupResponse(response, call, Status.ERROR,
+        setupResponse(responseBuf, call, RpcStatusProto.ERROR,
             null, t.getClass().getName(),
             null, t.getClass().getName(),
             StringUtils.stringifyException(t));
             StringUtils.stringifyException(t));
         return;
         return;
       }
       }
     } else {
     } else {
+      if (status == RpcStatusProto.FATAL) {
+        response.setServerIpcVersionNum(Server.CURRENT_VERSION);
+      }
+      response.build().writeDelimitedTo(out);
       WritableUtils.writeString(out, errorClass);
       WritableUtils.writeString(out, errorClass);
       WritableUtils.writeString(out, error);
       WritableUtils.writeString(out, error);
     }
     }
+    if (call.connection.useWrap) {
+      wrapWithSasl(responseBuf, call);
+    }
+    call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
+  }
+  
+  /**
+   * Setup response for the IPC Call on Fatal Error from a 
+   * client that is using old version of Hadoop.
+   * The response is serialized using the previous protocol's response
+   * layout.
+   * 
+   * @param response buffer to serialize the response into
+   * @param call {@link Call} to which we are setting up the response
+   * @param rv return value for the IPC Call, if the call was successful
+   * @param errorClass error class, if the the call failed
+   * @param error error message, if the call failed
+   * @throws IOException
+   */
+  private void setupResponseOldVersionFatal(ByteArrayOutputStream response, 
+                             Call call,
+                             Writable rv, String errorClass, String error) 
+  throws IOException {
+    final int OLD_VERSION_FATAL_STATUS = -1;
+    response.reset();
+    DataOutputStream out = new DataOutputStream(response);
+    out.writeInt(call.callId);                // write call id
+    out.writeInt(OLD_VERSION_FATAL_STATUS);   // write FATAL_STATUS
+    WritableUtils.writeString(out, errorClass);
+    WritableUtils.writeString(out, error);
+
     if (call.connection.useWrap) {
     if (call.connection.useWrap) {
       wrapWithSasl(response, call);
       wrapWithSasl(response, call);
     }
     }

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -34,6 +34,7 @@ import javax.management.MBeanServer;
 import javax.management.MalformedObjectNameException;
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 import javax.management.ObjectName;
 import javax.management.ReflectionException;
 import javax.management.ReflectionException;
+import javax.management.RuntimeErrorException;
 import javax.management.RuntimeMBeanException;
 import javax.management.RuntimeMBeanException;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeType;
 import javax.management.openmbean.CompositeType;
@@ -317,6 +318,11 @@ public class JMXJsonServlet extends HttpServlet {
         LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
         LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
       }
       }
       return;
       return;
+    } catch (RuntimeErrorException e) {
+      // RuntimeErrorException happens when an unexpected failure occurs in getAttribute
+      // for example https://issues.apache.org/jira/browse/DAEMON-120
+      LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
     } catch (AttributeNotFoundException e) {
     } catch (AttributeNotFoundException e) {
       //Ignored the attribute was not found, which should never happen because the bean
       //Ignored the attribute was not found, which should never happen because the bean
       //just told us that it has this attribute, but if this happens just don't output
       //just told us that it has this attribute, but if this happens just don't output

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java

@@ -40,6 +40,7 @@ public interface Node {
    * @param location the location
    * @param location the location
    */
    */
   public void setNetworkLocation(String location);
   public void setNetworkLocation(String location);
+
   /** @return this node's name */
   /** @return this node's name */
   public String getName();
   public String getName();
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java

@@ -110,7 +110,7 @@ public class NodeBase implements Node {
    * @return the path of a node
    * @return the path of a node
    */
    */
   public static String getPath(Node node) {
   public static String getPath(Node node) {
-    return node.getNetworkLocation()+PATH_SEPARATOR_STR+node.getName();
+    return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
   }
   }
   
   
   /** @return this node's path as its string representation */
   /** @return this node's path as its string representation */

+ 65 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java

@@ -18,10 +18,15 @@
 
 
 package org.apache.hadoop.security.token;
 package org.apache.hadoop.security.token;
 
 
+import com.google.common.collect.Maps;
+
+import java.io.ByteArrayInputStream;
 import java.io.DataInput;
 import java.io.DataInput;
+import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.Map;
 import java.util.ServiceLoader;
 import java.util.ServiceLoader;
 
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
@@ -37,6 +42,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.ReflectionUtils;
 
 
 /**
 /**
  * The client-side form of the token.
  * The client-side form of the token.
@@ -45,6 +51,9 @@ import org.apache.hadoop.io.WritableUtils;
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
 public class Token<T extends TokenIdentifier> implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);
   public static final Log LOG = LogFactory.getLog(Token.class);
+  
+  private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
+  
   private byte[] identifier;
   private byte[] identifier;
   private byte[] password;
   private byte[] password;
   private Text kind;
   private Text kind;
@@ -100,13 +109,49 @@ public class Token<T extends TokenIdentifier> implements Writable {
   }
   }
 
 
   /**
   /**
-   * Get the token identifier
-   * @return the token identifier
+   * Get the token identifier's byte representation
+   * @return the token identifier's byte representation
    */
    */
   public byte[] getIdentifier() {
   public byte[] getIdentifier() {
     return identifier;
     return identifier;
   }
   }
   
   
+  private static synchronized Class<? extends TokenIdentifier>
+      getClassForIdentifier(Text kind) {
+    if (tokenKindMap == null) {
+      tokenKindMap = Maps.newHashMap();
+      for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
+        tokenKindMap.put(id.getKind(), id.getClass());
+      }
+    }
+    Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
+    if (cls == null) {
+      LOG.warn("Cannot find class for token kind " + kind);
+       return null;
+    }
+    return cls;
+  }
+  
+  /**
+   * Get the token identifier object, or null if it could not be constructed
+   * (because the class could not be loaded, for example).
+   * @return the token identifier, or null
+   * @throws IOException 
+   */
+  @SuppressWarnings("unchecked")
+  public T decodeIdentifier() throws IOException {
+    Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
+    if (cls == null) {
+      return null;
+    }
+    TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
+    ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
+    DataInputStream in = new DataInputStream(buf);  
+    tokenIdentifier.readFields(in);
+    in.close();
+    return (T) tokenIdentifier;
+  }
+  
   /**
   /**
    * Get the token password/secret
    * Get the token password/secret
    * @return the token password/secret
    * @return the token password/secret
@@ -260,16 +305,31 @@ public class Token<T extends TokenIdentifier> implements Writable {
       buffer.append(num);
       buffer.append(num);
     }
     }
   }
   }
+  
+  private void identifierToString(StringBuilder buffer) {
+    T id = null;
+    try {
+      id = decodeIdentifier();
+    } catch (IOException e) {
+      // handle in the finally block
+    } finally {
+      if (id != null) {
+        buffer.append("(").append(id).append(")");
+      } else {
+        addBinaryBuffer(buffer, identifier);
+      }
+    }
+  }
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
     StringBuilder buffer = new StringBuilder();
     StringBuilder buffer = new StringBuilder();
-    buffer.append("Ident: ");
-    addBinaryBuffer(buffer, identifier);
-    buffer.append(", Kind: ");
+    buffer.append("Kind: ");
     buffer.append(kind.toString());
     buffer.append(kind.toString());
     buffer.append(", Service: ");
     buffer.append(", Service: ");
     buffer.append(service.toString());
     buffer.append(service.toString());
+    buffer.append(", Ident: ");
+    identifierToString(buffer);
     return buffer.toString();
     return buffer.toString();
   }
   }
   
   

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh

@@ -48,10 +48,14 @@ done
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 
 # Command specific options appended to HADOOP_OPTS when specified
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
 export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
 export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
 
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+# The ZKFC does not need a large heap, and keeping it small avoids
+# any potential for long GC pauses
+export HADOOP_ZKFC_OPTS="-Xmx256m $HADOOP_ZKFC_OPTS"
 
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml

@@ -223,6 +223,12 @@
     <description>ACL for HAService protocol used by HAAdmin to manage the
     <description>ACL for HAService protocol used by HAAdmin to manage the
       active and stand-by states of namenode.</description>
       active and stand-by states of namenode.</description>
   </property>
   </property>
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
 
 
    <property>
    <property>
       <name>security.mrhs.client.protocol.acl</name>
       <name>security.mrhs.client.protocol.acl</name>

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties

@@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #
 #
 #Security appender
 #Security appender
 #
 #
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 #
 #
 # hdfs audit logging
 # hdfs audit logging
 #
 #
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxbackupindex=20
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 #
 #
 # mapred audit logging
 # mapred audit logging
 #
 #
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxbackupindex=20
 mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto

@@ -27,6 +27,16 @@ enum HAServiceStateProto {
   STANDBY = 2;
   STANDBY = 2;
 }
 }
 
 
+enum HARequestSource {
+  REQUEST_BY_USER = 0;
+  REQUEST_BY_USER_FORCED = 1;
+  REQUEST_BY_ZKFC = 2;
+}
+
+message HAStateChangeRequestInfoProto {
+  required HARequestSource reqSource = 1;
+}
+
 /**
 /**
  * void request
  * void request
  */
  */
@@ -43,6 +53,7 @@ message MonitorHealthResponseProto {
  * void request
  * void request
  */
  */
 message TransitionToActiveRequestProto { 
 message TransitionToActiveRequestProto { 
+  required HAStateChangeRequestInfoProto reqInfo = 1;
 }
 }
 
 
 /**
 /**
@@ -55,6 +66,7 @@ message TransitionToActiveResponseProto {
  * void request
  * void request
  */
  */
 message TransitionToStandbyRequestProto { 
 message TransitionToStandbyRequestProto { 
+  required HAStateChangeRequestInfoProto reqInfo = 1;
 }
 }
 
 
 /**
 /**

+ 23 - 4
hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto

@@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
 
 
-
 /**
 /**
  * This is the rpc payload header. It is sent with every rpc call.
  * This is the rpc payload header. It is sent with every rpc call.
  * 
  * 
@@ -34,8 +33,6 @@ option java_generate_equals_and_hash = true;
  *
  *
  */
  */
 
 
-
-
 /**
 /**
  * RpcKind determine the rpcEngine and the serialization of the rpc payload
  * RpcKind determine the rpcEngine and the serialization of the rpc payload
  */
  */
@@ -54,5 +51,27 @@ enum RpcPayloadOperationProto {
 message RpcPayloadHeaderProto { // the header for the RpcRequest
 message RpcPayloadHeaderProto { // the header for the RpcRequest
   optional RpcKindProto rpcKind = 1;
   optional RpcKindProto rpcKind = 1;
   optional RpcPayloadOperationProto rpcOp = 2;
   optional RpcPayloadOperationProto rpcOp = 2;
-  optional uint32 callId = 3; // each rpc has a callId that is also used in response
+  required uint32 callId = 3; // each rpc has a callId that is also used in response
+}
+
+enum RpcStatusProto {
+ SUCCESS = 0;  // RPC succeeded
+ ERROR = 1;    // RPC Failed
+ FATAL = 2;    // Fatal error - connection is closed
+}
+
+/**
+ * Rpc Response Header
+ *    - If successfull then the Respose follows after this header
+ *        - length (4 byte int), followed by the response
+ *    - If error or fatal - the exception info follow
+ *        - length (4 byte int) Class name of exception - UTF-8 string
+ *        - length (4 byte int) Stacktrace - UTF-8 string
+ *        - if the strings are null then the length is -1
+ * In case of Fatal error then the respose contains the Serverside's IPC version
+ */
+message RpcResponseHeaderProto {
+  required uint32 callId = 1; // callId used in Request
+  required RpcStatusProto status = 2;
+  optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error 
 }
 }

+ 29 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java → hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto

@@ -16,21 +16,37 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.test.system;
+option java_package = "org.apache.hadoop.ha.proto";
+option java_outer_classname = "ZKFCProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+message CedeActiveRequestProto {
+  required uint32 millisToCede = 1;
+}
+
+message CedeActiveResponseProto {
+}
+
+message GracefulFailoverRequestProto {
+}
+
+message GracefulFailoverResponseProto {
+}
 
 
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
 
 
 /**
 /**
- * Client side API exposed from Datanode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
+ * Protocol provides manual control of the ZK Failover Controllers
  */
  */
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
-public interface DNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
+service ZKFCProtocolService {
+  /**
+   * Request that the service cede its active state, and quit the election
+   * for some amount of time
+   */
+  rpc cedeActive(CedeActiveRequestProto)
+      returns(CedeActiveResponseProto);
+
+
+  rpc gracefulFailover(GracefulFailoverRequestProto)
+      returns(GracefulFailoverResponseProto);
 }
 }

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto

@@ -38,7 +38,21 @@ message HadoopRpcRequestProto {
   /** Bytes corresponding to the client protobuf request */
   /** Bytes corresponding to the client protobuf request */
   optional bytes request = 2;
   optional bytes request = 2;
   
   
-  /** protocol name of class declaring the called method */ 
+  /** 
+   * RPCs for a particular interface (ie protocol) are done using a
+   * IPC connection that is setup using rpcProxy.
+   * The rpcProxy's has a declared protocol name that is 
+   * sent form client to server at connection time. 
+   * 
+   * Each Rpc call also sends a protocol name 
+   * (called declaringClassprotocolName). This name is usually the same
+   * as the connection protocol name except in some cases. 
+   * For example metaProtocols such ProtocolInfoProto which get metainfo
+   * about the protocol reuse the connection but need to indicate that
+   * the actual protocol is different (i.e. the protocol is
+   * ProtocolInfoProto) since they reuse the connection; in this case
+   * the declaringClassProtocolName field is set to the ProtocolInfoProto
+   */
   required string declaringClassProtocolName = 3;
   required string declaringClassProtocolName = 3;
   
   
   /** protocol version of class declaring the called method */
   /** protocol version of class declaring the called method */

+ 70 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -944,4 +944,74 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>ha.zookeeper.quorum</name>
+  <description>
+    A list of ZooKeeper server addresses, separated by commas, that are
+    to be used by the ZKFailoverController in automatic failover.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.session-timeout.ms</name>
+  <value>5000</value>
+  <description>
+    The session timeout to use when the ZKFC connects to ZooKeeper.
+    Setting this value to a lower value implies that server crashes
+    will be detected more quickly, but risks triggering failover too
+    aggressively in the case of a transient error or network blip.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.parent-znode</name>
+  <value>/hadoop-ha</value>
+  <description>
+    The ZooKeeper znode under which the ZK failover controller stores
+    its information. Note that the nameservice ID is automatically
+    appended to this znode, so it is not normally necessary to
+    configure this, even in a federated environment.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.acl</name>
+  <value>world:anyone:rwcda</value>
+  <description>
+    A comma-separated list of ZooKeeper ACLs to apply to the znodes
+    used by automatic failover. These ACLs are specified in the same
+    format as used by the ZooKeeper CLI.
+
+    If the ACL itself contains secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.auth</name>
+  <value></value>
+  <description>
+    A comma-separated list of ZooKeeper authentications to add when
+    connecting to ZooKeeper. These are specified in the same format
+    as used by the &quot;addauth&quot; command in the ZK CLI. It is
+    important that the authentications specified here are sufficient
+    to access znodes with the ACL specified in ha.zookeeper.acl.
+
+    If the auths contain secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<!-- Static Web User Filter properties. -->
+<property>
+  <description>
+    The user name to filter as, on static web filters
+    while rendering content. An example use is the HDFS
+    web UI (user to be used for browsing files).
+  </description>
+  <name>hadoop.http.staticuser.user</name>
+  <value>dr.who</value>
+</property>
 </configuration>
 </configuration>

+ 32 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -526,6 +526,29 @@ public class TestConfiguration extends TestCase {
     }
     }
   }
   }
   
   
+  public void testDoubleValues() throws IOException {
+    out=new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.double1", "3.1415");
+    appendProperty("test.double2", "003.1415");
+    appendProperty("test.double3", "-3.1415");
+    appendProperty("test.double4", " -3.1415 ");
+    appendProperty("test.double5", "xyz-3.1415xyz");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    assertEquals(3.1415, conf.getDouble("test.double1", 0.0));
+    assertEquals(3.1415, conf.getDouble("test.double2", 0.0));
+    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0));
+    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0));
+    try {
+      conf.getDouble("test.double5", 0.0);
+      fail("Property had invalid double value, but was read successfully.");
+    } catch (NumberFormatException e) {
+      // pass
+    }
+  }
+  
   public void testGetClass() throws IOException {
   public void testGetClass() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     startConfig();
@@ -976,6 +999,15 @@ public class TestConfiguration extends TestCase {
         "Not returning expected number of classes. Number of returned classes ="
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 0, classes.length);
             + classes.length, 0, classes.length);
   }
   }
+
+  public void testInvalidSubstitutation() {
+    String key = "test.random.key";
+    String keyExpression = "${" + key + "}";
+    Configuration configuration = new Configuration();
+    configuration.set(key, keyExpression);
+    String value = configuration.get(key);
+    assertTrue("Unexpected value " + value, value.equals(keyExpression));
+  }
   
   
   public static void main(String[] argv) throws Exception {
   public static void main(String[] argv) throws Exception {
     junit.textui.TestRunner.main(new String[]{
     junit.textui.TestRunner.main(new String[]{

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml

@@ -1,3 +1,4 @@
+<?xml version="1.0"?>
 <!--
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    contributor license agreements.  See the NOTICE file distributed with
@@ -14,7 +15,6 @@
    See the License for the specific language governing permissions and
    See the License for the specific language governing permissions and
    limitations under the License.
    limitations under the License.
 -->
 -->
-<?xml version="1.0"?>
 <configuration>
 <configuration>
 </configuration>
 </configuration>
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -75,7 +75,7 @@ public abstract class FSMainOperationsBaseTest  {
   //A test filter with returns any path containing a "b" 
   //A test filter with returns any path containing a "b" 
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
     public boolean accept(Path file) {
     public boolean accept(Path file) {
-      if(file.getName().contains("x") || file.toString().contains("X"))
+      if(file.getName().contains("x") || file.getName().contains("X"))
         return true;
         return true;
       else
       else
         return false;
         return false;

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java

@@ -75,7 +75,7 @@ public abstract class FileContextMainOperationsBaseTest  {
   //A test filter with returns any path containing a "b" 
   //A test filter with returns any path containing a "b" 
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
     public boolean accept(Path file) {
     public boolean accept(Path file) {
-      if(file.getName().contains("x") || file.toString().contains("X"))
+      if(file.getName().contains("x") || file.getName().contains("X"))
         return true;
         return true;
       else
       else
         return false;
         return false;

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -113,7 +113,7 @@ public final class FileSystemTestHelper {
 
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
       int blockSize, boolean createParent) throws IOException {
       int blockSize, boolean createParent) throws IOException {
-      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true);
+      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
   }
   }
 
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   public static long createFile(FileSystem fSys, Path path, int numBlocks,

+ 14 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -74,6 +74,11 @@ public class TestFilterFileSystem {
         Progressable progress) throws IOException {
         Progressable progress) throws IOException {
       return null;
       return null;
     }
     }
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+            EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+            Progressable progress) throws IOException {
+      return null;
+    }
     public boolean mkdirs(Path f) { return false; }
     public boolean mkdirs(Path f) { return false; }
     public FSDataInputStream open(Path f) { return null; }
     public FSDataInputStream open(Path f) { return null; }
     public FSDataOutputStream create(Path f) { return null; }
     public FSDataOutputStream create(Path f) { return null; }
@@ -123,6 +128,15 @@ public class TestFilterFileSystem {
         Progressable progress) {
         Progressable progress) {
       return null;
       return null;
     }
     }
+    public FSDataOutputStream create(Path f,
+        FsPermission permission,
+        EnumSet<CreateFlag> flags,
+        int bufferSize,
+        short replication,
+        long blockSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
     public String getName() { return null; }
     public String getName() { return null; }
     public boolean delete(Path f) { return false; }
     public boolean delete(Path f) { return false; }
     public short getReplication(Path src) { return 0 ; }
     public short getReplication(Path src) { return 0 ; }

+ 2 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java

@@ -47,11 +47,9 @@ public abstract class S3FileSystemContractBaseTest
   }
   }
   
   
   public void testBlockSize() throws Exception {
   public void testBlockSize() throws Exception {
-    
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
-    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
-    
     Path file = path("/test/hadoop/file");
     Path file = path("/test/hadoop/file");
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
+    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
     createFile(file);
     createFile(file);
     assertEquals("Double default block size", newBlockSize,
     assertEquals("Double default block size", newBlockSize,
 	fs.getFileStatus(file).getBlockSize());
 	fs.getFileStatus(file).getBlockSize());

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java

@@ -141,11 +141,11 @@ public abstract class NativeS3FileSystemContractBaseTest
   public void testBlockSize() throws Exception {
   public void testBlockSize() throws Exception {
     Path file = path("/test/hadoop/file");
     Path file = path("/test/hadoop/file");
     createFile(file);
     createFile(file);
-    assertEquals("Default block size", fs.getDefaultBlockSize(),
+    assertEquals("Default block size", fs.getDefaultBlockSize(file),
     fs.getFileStatus(file).getBlockSize());
     fs.getFileStatus(file).getBlockSize());
 
 
     // Block size is determined at read time
     // Block size is determined at read time
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
     fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
     fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
     assertEquals("Double default block size", newBlockSize,
     assertEquals("Double default block size", newBlockSize,
     fs.getFileStatus(file).getBlockSize());
     fs.getFileStatus(file).getBlockSize());

+ 72 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java

@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.junit.Test;
+
+/**
+ * Test ViewFileSystem's support for having delegation tokens fetched and cached
+ * for the file system.
+ * 
+ * Currently this class just ensures that getCanonicalServiceName() always
+ * returns <code>null</code> for ViewFileSystem instances.
+ */
+public class TestViewFileSystemDelegationTokenSupport {
+  
+  private static final String MOUNT_TABLE_NAME = "vfs-cluster";
+
+  /**
+   * Regression test for HADOOP-8408.
+   */
+  @Test
+  public void testGetCanonicalServiceNameWithNonDefaultMountTable()
+      throws URISyntaxException, IOException {
+    
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, MOUNT_TABLE_NAME, "/user", new URI("file:///"));
+    
+    FileSystem viewFs = FileSystem.get(new URI(FsConstants.VIEWFS_SCHEME +
+        "://" + MOUNT_TABLE_NAME), conf);
+    
+    String serviceName = viewFs.getCanonicalServiceName();
+    assertNull(serviceName);
+  }
+  
+  @Test
+  public void testGetCanonicalServiceNameWithDefaultMountTable()
+      throws URISyntaxException, IOException {
+    
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, "/user", new URI("file:///"));
+    
+    FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+    
+    String serviceName = viewFs.getCanonicalServiceName();
+    assertNull(serviceName);
+  }
+
+}

+ 17 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java

@@ -19,16 +19,25 @@ package org.apache.hadoop.ha;
 
 
 import java.util.Arrays;
 import java.util.Arrays;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.server.ZooKeeperServer;
 import org.apache.zookeeper.server.ZooKeeperServer;
 
 
 public abstract class ActiveStandbyElectorTestUtil {
 public abstract class ActiveStandbyElectorTestUtil {
+  
+  private static final Log LOG = LogFactory.getLog(
+      ActiveStandbyElectorTestUtil.class);
+  private static final long LOG_INTERVAL_MS = 500;
 
 
   public static void waitForActiveLockData(TestContext ctx,
   public static void waitForActiveLockData(TestContext ctx,
       ZooKeeperServer zks, String parentDir, byte[] activeData)
       ZooKeeperServer zks, String parentDir, byte[] activeData)
       throws Exception {
       throws Exception {
+    long st = System.currentTimeMillis();
+    long lastPrint = st;
     while (true) {
     while (true) {
       if (ctx != null) {
       if (ctx != null) {
         ctx.checkException();
         ctx.checkException();
@@ -42,10 +51,18 @@ public abstract class ActiveStandbyElectorTestUtil {
             Arrays.equals(activeData, data)) {
             Arrays.equals(activeData, data)) {
           return;
           return;
         }
         }
+        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+          LOG.info("Cur data: " + StringUtils.byteToHexString(data));
+          lastPrint = System.currentTimeMillis();
+        }
       } catch (NoNodeException nne) {
       } catch (NoNodeException nne) {
         if (activeData == null) {
         if (activeData == null) {
           return;
           return;
         }
         }
+        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+          LOG.info("Cur data: no node");
+          lastPrint = System.currentTimeMillis();
+        }
       }
       }
       Thread.sleep(50);
       Thread.sleep(50);
     }
     }

+ 452 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java

@@ -0,0 +1,452 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ha;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.zookeeper.PortAssignment;
+import org.apache.zookeeper.TestableZooKeeper;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.Watcher.Event.KeeperState;
+import org.apache.zookeeper.ZKTestCase;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.ServerCnxnFactoryAccessor;
+import org.apache.zookeeper.server.ZKDatabase;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnLog;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Copy-paste of ClientBase from ZooKeeper, but without any of the
+ * JMXEnv verification. There seems to be a bug ZOOKEEPER-1438
+ * which causes spurious failures in the JMXEnv verification when
+ * we run these tests with the upstream ClientBase.
+ */
+public abstract class ClientBaseWithFixes extends ZKTestCase {
+    protected static final Logger LOG = LoggerFactory.getLogger(ClientBaseWithFixes.class);
+
+    public static int CONNECTION_TIMEOUT = 30000;
+    static final File BASETEST =
+        new File(System.getProperty("build.test.dir", "build"));
+
+    protected String hostPort = "127.0.0.1:" + PortAssignment.unique();
+    protected int maxCnxns = 0;
+    protected ServerCnxnFactory serverFactory = null;
+    protected File tmpDir = null;
+    
+    long initialFdCount;
+    
+    public ClientBaseWithFixes() {
+        super();
+    }
+
+    /**
+     * In general don't use this. Only use in the special case that you
+     * want to ignore results (for whatever reason) in your test. Don't
+     * use empty watchers in real code!
+     *
+     */
+    protected class NullWatcher implements Watcher {
+        public void process(WatchedEvent event) { /* nada */ }
+    }
+
+    protected static class CountdownWatcher implements Watcher {
+        // XXX this doesn't need to be volatile! (Should probably be final)
+        volatile CountDownLatch clientConnected;
+        volatile boolean connected;
+
+        public CountdownWatcher() {
+            reset();
+        }
+        synchronized public void reset() {
+            clientConnected = new CountDownLatch(1);
+            connected = false;
+        }
+        synchronized public void process(WatchedEvent event) {
+            if (event.getState() == KeeperState.SyncConnected ||
+                event.getState() == KeeperState.ConnectedReadOnly) {
+                connected = true;
+                notifyAll();
+                clientConnected.countDown();
+            } else {
+                connected = false;
+                notifyAll();
+            }
+        }
+        synchronized boolean isConnected() {
+            return connected;
+        }
+        synchronized void waitForConnected(long timeout) throws InterruptedException, TimeoutException {
+            long expire = System.currentTimeMillis() + timeout;
+            long left = timeout;
+            while(!connected && left > 0) {
+                wait(left);
+                left = expire - System.currentTimeMillis();
+            }
+            if (!connected) {
+                throw new TimeoutException("Did not connect");
+
+            }
+        }
+        synchronized void waitForDisconnected(long timeout) throws InterruptedException, TimeoutException {
+            long expire = System.currentTimeMillis() + timeout;
+            long left = timeout;
+            while(connected && left > 0) {
+                wait(left);
+                left = expire - System.currentTimeMillis();
+            }
+            if (connected) {
+                throw new TimeoutException("Did not disconnect");
+
+            }
+        }
+    }
+
+    protected TestableZooKeeper createClient()
+        throws IOException, InterruptedException
+    {
+        return createClient(hostPort);
+    }
+
+    protected TestableZooKeeper createClient(String hp)
+        throws IOException, InterruptedException
+    {
+        CountdownWatcher watcher = new CountdownWatcher();
+        return createClient(watcher, hp);
+    }
+
+    private LinkedList<ZooKeeper> allClients;
+    private boolean allClientsSetup = false;
+
+    protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
+        throws IOException, InterruptedException
+    {
+        return createClient(watcher, hp, CONNECTION_TIMEOUT);
+    }
+
+    protected TestableZooKeeper createClient(CountdownWatcher watcher,
+            String hp, int timeout)
+        throws IOException, InterruptedException
+    {
+        watcher.reset();
+        TestableZooKeeper zk = new TestableZooKeeper(hp, timeout, watcher);
+        if (!watcher.clientConnected.await(timeout, TimeUnit.MILLISECONDS))
+        {
+            Assert.fail("Unable to connect to server");
+        }
+        synchronized(this) {
+            if (!allClientsSetup) {
+                LOG.error("allClients never setup");
+                Assert.fail("allClients never setup");
+            }
+            if (allClients != null) {
+                allClients.add(zk);
+            } else {
+                // test done - close the zk, not needed
+                zk.close();
+            }
+        }
+
+
+        return zk;
+    }
+
+    public static class HostPort {
+        String host;
+        int port;
+        public HostPort(String host, int port) {
+            this.host = host;
+            this.port = port;
+        }
+    }
+    public static List<HostPort> parseHostPortList(String hplist) {
+        ArrayList<HostPort> alist = new ArrayList<HostPort>();
+        for (String hp: hplist.split(",")) {
+            int idx = hp.lastIndexOf(':');
+            String host = hp.substring(0, idx);
+            int port;
+            try {
+                port = Integer.parseInt(hp.substring(idx + 1));
+            } catch(RuntimeException e) {
+                throw new RuntimeException("Problem parsing " + hp + e.toString());
+            }
+            alist.add(new HostPort(host,port));
+        }
+        return alist;
+    }
+
+    /**
+     * Send the 4letterword
+     * @param host the destination host
+     * @param port the destination port
+     * @param cmd the 4letterword
+     * @return
+     * @throws IOException
+     */
+    public static String send4LetterWord(String host, int port, String cmd)
+        throws IOException
+    {
+        LOG.info("connecting to " + host + " " + port);
+        Socket sock = new Socket(host, port);
+        BufferedReader reader = null;
+        try {
+            OutputStream outstream = sock.getOutputStream();
+            outstream.write(cmd.getBytes());
+            outstream.flush();
+            // this replicates NC - close the output stream before reading
+            sock.shutdownOutput();
+
+            reader =
+                new BufferedReader(
+                        new InputStreamReader(sock.getInputStream()));
+            StringBuilder sb = new StringBuilder();
+            String line;
+            while((line = reader.readLine()) != null) {
+                sb.append(line + "\n");
+            }
+            return sb.toString();
+        } finally {
+            sock.close();
+            if (reader != null) {
+                reader.close();
+            }
+        }
+    }
+
+    public static boolean waitForServerUp(String hp, long timeout) {
+        long start = System.currentTimeMillis();
+        while (true) {
+            try {
+                // if there are multiple hostports, just take the first one
+                HostPort hpobj = parseHostPortList(hp).get(0);
+                String result = send4LetterWord(hpobj.host, hpobj.port, "stat");
+                if (result.startsWith("Zookeeper version:") &&
+                        !result.contains("READ-ONLY")) {
+                    return true;
+                }
+            } catch (IOException e) {
+                // ignore as this is expected
+                LOG.info("server " + hp + " not up " + e);
+            }
+
+            if (System.currentTimeMillis() > start + timeout) {
+                break;
+            }
+            try {
+                Thread.sleep(250);
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+        return false;
+    }
+    public static boolean waitForServerDown(String hp, long timeout) {
+        long start = System.currentTimeMillis();
+        while (true) {
+            try {
+                HostPort hpobj = parseHostPortList(hp).get(0);
+                send4LetterWord(hpobj.host, hpobj.port, "stat");
+            } catch (IOException e) {
+                return true;
+            }
+
+            if (System.currentTimeMillis() > start + timeout) {
+                break;
+            }
+            try {
+                Thread.sleep(250);
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+        return false;
+    }
+
+    public static File createTmpDir() throws IOException {
+        return createTmpDir(BASETEST);
+    }
+    static File createTmpDir(File parentDir) throws IOException {
+        File tmpFile = File.createTempFile("test", ".junit", parentDir);
+        // don't delete tmpFile - this ensures we don't attempt to create
+        // a tmpDir with a duplicate name
+        File tmpDir = new File(tmpFile + ".dir");
+        Assert.assertFalse(tmpDir.exists()); // never true if tmpfile does it's job
+        Assert.assertTrue(tmpDir.mkdirs());
+
+        return tmpDir;
+    }
+    private static int getPort(String hostPort) {
+        String[] split = hostPort.split(":");
+        String portstr = split[split.length-1];
+        String[] pc = portstr.split("/");
+        if (pc.length > 1) {
+            portstr = pc[0];
+        }
+        return Integer.parseInt(portstr);
+    }
+
+    static ServerCnxnFactory createNewServerInstance(File dataDir,
+            ServerCnxnFactory factory, String hostPort, int maxCnxns)
+        throws IOException, InterruptedException
+    {
+        ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
+        final int PORT = getPort(hostPort);
+        if (factory == null) {
+            factory = ServerCnxnFactory.createFactory(PORT, maxCnxns);
+        }
+        factory.startup(zks);
+        Assert.assertTrue("waiting for server up",
+                   ClientBaseWithFixes.waitForServerUp("127.0.0.1:" + PORT,
+                                              CONNECTION_TIMEOUT));
+
+        return factory;
+    }
+
+    static void shutdownServerInstance(ServerCnxnFactory factory,
+            String hostPort)
+    {
+        if (factory != null) {
+            ZKDatabase zkDb;
+            {
+                ZooKeeperServer zs = getServer(factory);
+        
+                zkDb = zs.getZKDatabase();
+            }
+            factory.shutdown();
+            try {
+                zkDb.close();
+            } catch (IOException ie) {
+                LOG.warn("Error closing logs ", ie);
+            }
+            final int PORT = getPort(hostPort);
+
+            Assert.assertTrue("waiting for server down",
+                       ClientBaseWithFixes.waitForServerDown("127.0.0.1:" + PORT,
+                                                    CONNECTION_TIMEOUT));
+        }
+    }
+
+    /**
+     * Test specific setup
+     */
+    public static void setupTestEnv() {
+        // during the tests we run with 100K prealloc in the logs.
+        // on windows systems prealloc of 64M was seen to take ~15seconds
+        // resulting in test Assert.failure (client timeout on first session).
+        // set env and directly in order to handle static init/gc issues
+        System.setProperty("zookeeper.preAllocSize", "100");
+        FileTxnLog.setPreallocSize(100 * 1024);
+    }
+
+    protected void setUpAll() throws Exception {
+        allClients = new LinkedList<ZooKeeper>();
+        allClientsSetup = true;
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        BASETEST.mkdirs();
+
+        setupTestEnv();
+
+        setUpAll();
+
+        tmpDir = createTmpDir(BASETEST);
+
+        startServer();
+
+        LOG.info("Client test setup finished");
+    }
+
+    protected void startServer() throws Exception {
+        LOG.info("STARTING server");
+        serverFactory = createNewServerInstance(tmpDir, serverFactory, hostPort, maxCnxns);
+    }
+
+    protected void stopServer() throws Exception {
+        LOG.info("STOPPING server");
+        shutdownServerInstance(serverFactory, hostPort);
+        serverFactory = null;
+    }
+
+
+    protected static ZooKeeperServer getServer(ServerCnxnFactory fac) {
+        ZooKeeperServer zs = ServerCnxnFactoryAccessor.getZkServer(fac);
+
+        return zs;
+    }
+
+    protected void tearDownAll() throws Exception {
+        synchronized (this) {
+            if (allClients != null) for (ZooKeeper zk : allClients) {
+                try {
+                    if (zk != null)
+                        zk.close();
+                } catch (InterruptedException e) {
+                    LOG.warn("ignoring interrupt", e);
+                }
+            }
+            allClients = null;
+        }
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        LOG.info("tearDown starting");
+
+        tearDownAll();
+
+        stopServer();
+
+        if (tmpDir != null) {
+            Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
+        }
+
+        // This has to be set to null when the same instance of this class is reused between test cases
+        serverFactory = null;
+    }
+
+    public static boolean recursiveDelete(File d) {
+        if (d.isDirectory()) {
+            File children[] = d.listFiles();
+            for (File f : children) {
+                Assert.assertTrue("delete " + f.toString(), recursiveDelete(f));
+            }
+        }
+        return d.delete();
+    }
+}

+ 71 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java

@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -34,13 +36,19 @@ import com.google.common.collect.Lists;
  * a mock implementation.
  * a mock implementation.
  */
  */
 class DummyHAService extends HAServiceTarget {
 class DummyHAService extends HAServiceTarget {
+  public static final Log LOG = LogFactory.getLog(DummyHAService.class);
+  private static final String DUMMY_FENCE_KEY = "dummy.fence.key";
   volatile HAServiceState state;
   volatile HAServiceState state;
   HAServiceProtocol proxy;
   HAServiceProtocol proxy;
+  ZKFCProtocol zkfcProxy = null;
   NodeFencer fencer;
   NodeFencer fencer;
   InetSocketAddress address;
   InetSocketAddress address;
   boolean isHealthy = true;
   boolean isHealthy = true;
   boolean actUnreachable = false;
   boolean actUnreachable = false;
-  boolean failToBecomeActive;
+  boolean failToBecomeActive, failToBecomeStandby, failToFence;
+  
+  DummySharedResource sharedResource;
+  public int fenceCount = 0;
   
   
   static ArrayList<DummyHAService> instances = Lists.newArrayList();
   static ArrayList<DummyHAService> instances = Lists.newArrayList();
   int index;
   int index;
@@ -48,7 +56,14 @@ class DummyHAService extends HAServiceTarget {
   DummyHAService(HAServiceState state, InetSocketAddress address) {
   DummyHAService(HAServiceState state, InetSocketAddress address) {
     this.state = state;
     this.state = state;
     this.proxy = makeMock();
     this.proxy = makeMock();
-    this.fencer = Mockito.mock(NodeFencer.class);
+    try {
+      Configuration conf = new Configuration();
+      conf.set(DUMMY_FENCE_KEY, DummyFencer.class.getName()); 
+      this.fencer = Mockito.spy(
+          NodeFencer.create(conf, DUMMY_FENCE_KEY));
+    } catch (BadFencingConfigurationException e) {
+      throw new RuntimeException(e);
+    }
     this.address = address;
     this.address = address;
     synchronized (instances) {
     synchronized (instances) {
       instances.add(this);
       instances.add(this);
@@ -56,6 +71,10 @@ class DummyHAService extends HAServiceTarget {
     }
     }
   }
   }
   
   
+  public void setSharedResource(DummySharedResource rsrc) {
+    this.sharedResource = rsrc;
+  }
+  
   private HAServiceProtocol makeMock() {
   private HAServiceProtocol makeMock() {
     return Mockito.spy(new MockHAProtocolImpl());
     return Mockito.spy(new MockHAProtocolImpl());
   }
   }
@@ -65,12 +84,24 @@ class DummyHAService extends HAServiceTarget {
     return address;
     return address;
   }
   }
 
 
+  @Override
+  public InetSocketAddress getZKFCAddress() {
+    return null;
+  }
+
   @Override
   @Override
   public HAServiceProtocol getProxy(Configuration conf, int timeout)
   public HAServiceProtocol getProxy(Configuration conf, int timeout)
       throws IOException {
       throws IOException {
     return proxy;
     return proxy;
   }
   }
   
   
+  @Override
+  public ZKFCProtocol getZKFCProxy(Configuration conf, int timeout)
+      throws IOException {
+    assert zkfcProxy != null;
+    return zkfcProxy;
+  }
+  
   @Override
   @Override
   public NodeFencer getFencer() {
   public NodeFencer getFencer() {
     return fencer;
     return fencer;
@@ -80,6 +111,11 @@ class DummyHAService extends HAServiceTarget {
   public void checkFencingConfigured() throws BadFencingConfigurationException {
   public void checkFencingConfigured() throws BadFencingConfigurationException {
   }
   }
   
   
+  @Override
+  public boolean isAutoFailoverEnabled() {
+    return true;
+  }
+
   @Override
   @Override
   public String toString() {
   public String toString() {
     return "DummyHAService #" + index;
     return "DummyHAService #" + index;
@@ -101,20 +137,28 @@ class DummyHAService extends HAServiceTarget {
     }
     }
     
     
     @Override
     @Override
-    public void transitionToActive() throws ServiceFailedException,
+    public void transitionToActive(StateChangeRequestInfo req) throws ServiceFailedException,
         AccessControlException, IOException {
         AccessControlException, IOException {
       checkUnreachable();
       checkUnreachable();
       if (failToBecomeActive) {
       if (failToBecomeActive) {
         throw new ServiceFailedException("injected failure");
         throw new ServiceFailedException("injected failure");
       }
       }
-    
+      if (sharedResource != null) {
+        sharedResource.take(DummyHAService.this);
+      }
       state = HAServiceState.ACTIVE;
       state = HAServiceState.ACTIVE;
     }
     }
     
     
     @Override
     @Override
-    public void transitionToStandby() throws ServiceFailedException,
+    public void transitionToStandby(StateChangeRequestInfo req) throws ServiceFailedException,
         AccessControlException, IOException {
         AccessControlException, IOException {
       checkUnreachable();
       checkUnreachable();
+      if (failToBecomeStandby) {
+        throw new ServiceFailedException("injected failure");
+      }
+      if (sharedResource != null) {
+        sharedResource.release(DummyHAService.this);
+      }
       state = HAServiceState.STANDBY;
       state = HAServiceState.STANDBY;
     }
     }
     
     
@@ -138,4 +182,26 @@ class DummyHAService extends HAServiceTarget {
     public void close() throws IOException {
     public void close() throws IOException {
     }
     }
   }
   }
+  
+  public static class DummyFencer implements FenceMethod {
+    public void checkArgs(String args) throws BadFencingConfigurationException {
+    }
+
+    @Override
+    public boolean tryFence(HAServiceTarget target, String args)
+        throws BadFencingConfigurationException {
+      LOG.info("tryFence(" + target + ")");
+      DummyHAService svc = (DummyHAService)target;
+      synchronized (svc) {
+        svc.fenceCount++;
+      }
+      if (svc.failToFence) {
+        LOG.info("Injected failure to fence");
+        return false;
+      }
+      svc.sharedResource.release(svc);
+      return true;
+    }
+  }
+
 }
 }

+ 52 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import org.junit.Assert;
+
+/**
+ * A fake shared resource, for use in automatic failover testing.
+ * This simulates a real shared resource like a shared edit log.
+ * When the {@link DummyHAService} instances change state or get
+ * fenced, they notify the shared resource, which asserts that
+ * we never have two HA services who think they're holding the
+ * resource at the same time.
+ */
+public class DummySharedResource {
+  private DummyHAService holder = null;
+  private int violations = 0;
+  
+  public synchronized void take(DummyHAService newHolder) {
+    if (holder == null || holder == newHolder) {
+      holder = newHolder;
+    } else {
+      violations++;
+      throw new IllegalStateException("already held by: " + holder);
+    }
+  }
+  
+  public synchronized void release(DummyHAService oldHolder) {
+    if (holder == oldHolder) {
+      holder = null;
+    }
+  }
+  
+  public synchronized void assertNoViolations() {
+    Assert.assertEquals(0, violations);
+  }
+}

+ 319 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java

@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HealthMonitor.State;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.data.Stat;
+import org.apache.zookeeper.server.ZooKeeperServer;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Ints;
+
+/**
+ * Harness for starting two dummy ZK FailoverControllers, associated with
+ * DummyHAServices. This harness starts two such ZKFCs, designated by
+ * indexes 0 and 1, and provides utilities for building tests around them.
+ */
+public class MiniZKFCCluster {
+  private final TestContext ctx;
+  private final ZooKeeperServer zks;
+
+  private DummyHAService svcs[];
+  private DummyZKFCThread thrs[];
+  private Configuration conf;
+  
+  private DummySharedResource sharedResource = new DummySharedResource();
+  
+  private static final Log LOG = LogFactory.getLog(MiniZKFCCluster.class);
+  
+  public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
+    this.conf = conf;
+    // Fast check interval so tests run faster
+    conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
+    conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
+    conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
+    svcs = new DummyHAService[2];
+    svcs[0] = new DummyHAService(HAServiceState.INITIALIZING,
+        new InetSocketAddress("svc1", 1234));
+    svcs[0].setSharedResource(sharedResource);
+    svcs[1] = new DummyHAService(HAServiceState.INITIALIZING,
+        new InetSocketAddress("svc2", 1234));
+    svcs[1].setSharedResource(sharedResource);
+    
+    this.ctx = new TestContext();
+    this.zks = zks;
+  }
+  
+  /**
+   * Set up two services and their failover controllers. svc1 is started
+   * first, so that it enters ACTIVE state, and then svc2 is started,
+   * which enters STANDBY
+   */
+  public void start() throws Exception {
+    // Format the base dir, should succeed
+    thrs = new DummyZKFCThread[2];
+    thrs[0] = new DummyZKFCThread(ctx, svcs[0]);
+    assertEquals(0, thrs[0].zkfc.run(new String[]{"-formatZK"}));
+    ctx.addThread(thrs[0]);
+    thrs[0].start();
+    
+    LOG.info("Waiting for svc0 to enter active state");
+    waitForHAState(0, HAServiceState.ACTIVE);
+    
+    LOG.info("Adding svc1");
+    thrs[1] = new DummyZKFCThread(ctx, svcs[1]);
+    thrs[1].start();
+    waitForHAState(1, HAServiceState.STANDBY);
+  }
+  
+  /**
+   * Stop the services.
+   * @throws Exception if either of the services had encountered a fatal error
+   */
+  public void stop() throws Exception {
+    for (DummyZKFCThread thr : thrs) {
+      if (thr != null) {
+        thr.interrupt();
+      }
+    }
+    if (ctx != null) {
+      ctx.stop();
+    }
+    sharedResource.assertNoViolations();
+  }
+
+  /**
+   * @return the TestContext implementation used internally. This allows more
+   * threads to be added to the context, etc.
+   */
+  public TestContext getTestContext() {
+    return ctx;
+  }
+  
+  public DummyHAService getService(int i) {
+    return svcs[i];
+  }
+
+  public ActiveStandbyElector getElector(int i) {
+    return thrs[i].zkfc.getElectorForTests();
+  }
+
+  public DummyZKFC getZkfc(int i) {
+    return thrs[i].zkfc;
+  }
+  
+  public void setHealthy(int idx, boolean healthy) {
+    svcs[idx].isHealthy = healthy;
+  }
+
+  public void setFailToBecomeActive(int idx, boolean doFail) {
+    svcs[idx].failToBecomeActive = doFail;
+  }
+
+  public void setFailToBecomeStandby(int idx, boolean doFail) {
+    svcs[idx].failToBecomeStandby = doFail;
+  }
+  
+  public void setFailToFence(int idx, boolean doFail) {
+    svcs[idx].failToFence = doFail;
+  }
+  
+  public void setUnreachable(int idx, boolean unreachable) {
+    svcs[idx].actUnreachable = unreachable;
+  }
+
+  /**
+   * Wait for the given HA service to enter the given HA state.
+   */
+  public void waitForHAState(int idx, HAServiceState state)
+      throws Exception {
+    DummyHAService svc = getService(idx);
+    while (svc.state != state) {
+      ctx.checkException();
+      Thread.sleep(50);
+    }
+  }
+  
+  /**
+   * Wait for the ZKFC to be notified of a change in health state.
+   */
+  public void waitForHealthState(int idx, State state)
+      throws Exception {
+    ZKFCTestUtil.waitForHealthState(thrs[idx].zkfc, state, ctx);
+  }
+
+  /**
+   * Wait for the given elector to enter the given elector state.
+   * @param idx the service index (0 or 1)
+   * @param state the state to wait for
+   * @throws Exception if it times out, or an exception occurs on one
+   * of the ZKFC threads while waiting.
+   */
+  public void waitForElectorState(int idx,
+      ActiveStandbyElector.State state) throws Exception {
+    ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
+        getElector(idx), state);
+  }
+
+  
+
+  /**
+   * Expire the ZK session of the given service. This requires
+   * (and asserts) that the given service be the current active.
+   * @throws NoNodeException if no service holds the lock
+   */
+  public void expireActiveLockHolder(int idx)
+      throws NoNodeException {
+    Stat stat = new Stat();
+    byte[] data = zks.getZKDatabase().getData(
+        DummyZKFC.LOCK_ZNODE, stat, null);
+    
+    assertArrayEquals(Ints.toByteArray(svcs[idx].index), data);
+    long session = stat.getEphemeralOwner();
+    LOG.info("Expiring svc " + idx + "'s zookeeper session " + session);
+    zks.closeSession(session);
+  }
+  
+
+  /**
+   * Wait for the given HA service to become the active lock holder.
+   * If the passed svc is null, waits for there to be no active
+   * lock holder.
+   */
+  public void waitForActiveLockHolder(Integer idx)
+      throws Exception {
+    DummyHAService svc = idx == null ? null : svcs[idx];
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(ctx, zks,
+        DummyZKFC.SCOPED_PARENT_ZNODE,
+        (idx == null) ? null : Ints.toByteArray(svc.index));
+  }
+  
+
+  /**
+   * Expires the ZK session associated with service 'fromIdx', and waits
+   * until service 'toIdx' takes over.
+   * @throws Exception if the target service does not become active
+   */
+  public void expireAndVerifyFailover(int fromIdx, int toIdx)
+      throws Exception {
+    Preconditions.checkArgument(fromIdx != toIdx);
+    
+    getElector(fromIdx).preventSessionReestablishmentForTests();
+    try {
+      expireActiveLockHolder(fromIdx);
+      
+      waitForHAState(fromIdx, HAServiceState.STANDBY);
+      waitForHAState(toIdx, HAServiceState.ACTIVE);
+    } finally {
+      getElector(fromIdx).allowSessionReestablishmentForTests();
+    }
+  }
+
+  /**
+   * Test-thread which runs a ZK Failover Controller corresponding
+   * to a given dummy service.
+   */
+  private class DummyZKFCThread extends TestingThread {
+    private final DummyZKFC zkfc;
+
+    public DummyZKFCThread(TestContext ctx, DummyHAService svc) {
+      super(ctx);
+      this.zkfc = new DummyZKFC(conf, svc);
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      try {
+        assertEquals(0, zkfc.run(new String[0]));
+      } catch (InterruptedException ie) {
+        // Interrupted by main thread, that's OK.
+      }
+    }
+  }
+  
+  static class DummyZKFC extends ZKFailoverController {
+    private static final String DUMMY_CLUSTER = "dummy-cluster";
+    public static final String SCOPED_PARENT_ZNODE =
+      ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT + "/" +
+      DUMMY_CLUSTER;
+    private static final String LOCK_ZNODE = 
+      SCOPED_PARENT_ZNODE + "/" + ActiveStandbyElector.LOCK_FILENAME;
+    private final DummyHAService localTarget;
+    
+    public DummyZKFC(Configuration conf, DummyHAService localTarget) {
+      super(conf, localTarget);
+      this.localTarget = localTarget;
+    }
+
+    @Override
+    protected byte[] targetToData(HAServiceTarget target) {
+      return Ints.toByteArray(((DummyHAService)target).index);
+    }
+    
+    @Override
+    protected HAServiceTarget dataToTarget(byte[] data) {
+      int index = Ints.fromByteArray(data);
+      return DummyHAService.getInstance(index);
+    }
+
+    @Override
+    protected void loginAsFCUser() throws IOException {
+    }
+
+    @Override
+    protected String getScopeInsideParentNode() {
+      return DUMMY_CLUSTER;
+    }
+
+    @Override
+    protected void checkRpcAdminAccess() throws AccessControlException {
+    }
+
+    @Override
+    protected InetSocketAddress getRpcAddressToBindTo() {
+      return new InetSocketAddress(0);
+    }
+
+    @Override
+    protected void initRPC() throws IOException {
+      super.initRPC();
+      localTarget.zkfcProxy = this.getRpcServerForTests();
+    }
+
+    @Override
+    protected PolicyProvider getPolicyProvider() {
+      return null;
+    }
+  }
+}

+ 80 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.ha;
 package org.apache.hadoop.ha;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.zookeeper.AsyncCallback;
 import org.apache.zookeeper.AsyncCallback;
@@ -40,6 +41,7 @@ import org.mockito.Mockito;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 
 
 public class TestActiveStandbyElector {
 public class TestActiveStandbyElector {
 
 
@@ -51,9 +53,12 @@ public class TestActiveStandbyElector {
   private ActiveStandbyElectorTester elector;
   private ActiveStandbyElectorTester elector;
 
 
   class ActiveStandbyElectorTester extends ActiveStandbyElector {
   class ActiveStandbyElectorTester extends ActiveStandbyElector {
+    private int sleptFor = 0;
+    
     ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
     ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
         List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException {
         List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException {
-      super(hostPort, timeout, parent, acl, app);
+      super(hostPort, timeout, parent, acl,
+          Collections.<ZKAuthInfo>emptyList(), app);
     }
     }
 
 
     @Override
     @Override
@@ -61,6 +66,14 @@ public class TestActiveStandbyElector {
       ++count;
       ++count;
       return mockZK;
       return mockZK;
     }
     }
+    
+    @Override
+    protected void sleepFor(int ms) {
+      // don't sleep in unit tests! Instead, just record the amount of
+      // time slept
+      LOG.info("Would have slept for " + ms + "ms");
+      sleptFor += ms;
+    }
   }
   }
 
 
   private static final String ZK_PARENT_NAME = "/parent/node";
   private static final String ZK_PARENT_NAME = "/parent/node";
@@ -146,6 +159,68 @@ public class TestActiveStandbyElector {
     verifyExistCall(1);
     verifyExistCall(1);
   }
   }
   
   
+  /**
+   * Verify that, when the callback fails to enter active state,
+   * the elector rejoins the election after sleeping for a short period.
+   */
+  @Test
+  public void testFailToBecomeActive() throws Exception {
+    mockNoPriorActive();
+    elector.joinElection(data);
+    Assert.assertEquals(0, elector.sleptFor);
+    
+    Mockito.doThrow(new ServiceFailedException("failed to become active"))
+        .when(mockApp).becomeActive();
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    // Should have tried to become active
+    Mockito.verify(mockApp).becomeActive();
+    
+    // should re-join
+    Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+    Assert.assertEquals(2, count);
+    Assert.assertTrue(elector.sleptFor > 0);
+  }
+  
+  /**
+   * Verify that, when the callback fails to enter active state, after
+   * a ZK disconnect (i.e from the StatCallback), that the elector rejoins
+   * the election after sleeping for a short period.
+   */
+  @Test
+  public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
+    mockNoPriorActive();
+    elector.joinElection(data);
+    Assert.assertEquals(0, elector.sleptFor);
+
+    elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+
+    elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    verifyExistCall(1);
+
+    Stat stat = new Stat();
+    stat.setEphemeralOwner(1L);
+    Mockito.when(mockZK.getSessionId()).thenReturn(1L);
+
+    // Fake failure to become active from within the stat callback
+    Mockito.doThrow(new ServiceFailedException("fail to become active"))
+        .when(mockApp).becomeActive();
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
+    
+    // should re-join
+    Mockito.verify(mockZK, Mockito.times(3)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+    Assert.assertEquals(2, count);
+    Assert.assertTrue(elector.sleptFor > 0);
+  }
+
+  
   /**
   /**
    * Verify that, if there is a record of a prior active node, the
    * Verify that, if there is a record of a prior active node, the
    * elector asks the application to fence it before becoming active.
    * elector asks the application to fence it before becoming active.
@@ -314,6 +389,7 @@ public class TestActiveStandbyElector {
    */
    */
   @Test
   @Test
   public void testStatNodeRetry() {
   public void testStatNodeRetry() {
+    elector.joinElection(data);
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
         (Stat) null);
         (Stat) null);
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
@@ -334,6 +410,7 @@ public class TestActiveStandbyElector {
    */
    */
   @Test
   @Test
   public void testStatNodeError() {
   public void testStatNodeError() {
+    elector.joinElection(data);
     elector.processResult(Code.RUNTIMEINCONSISTENCY.intValue(), ZK_LOCK_NAME,
     elector.processResult(Code.RUNTIMEINCONSISTENCY.intValue(), ZK_LOCK_NAME,
         mockZK, (Stat) null);
         mockZK, (Stat) null);
     Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
     Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
@@ -517,6 +594,8 @@ public class TestActiveStandbyElector {
    */
    */
   @Test
   @Test
   public void testQuitElection() throws Exception {
   public void testQuitElection() throws Exception {
+    elector.joinElection(data);
+    Mockito.verify(mockZK, Mockito.times(0)).close();
     elector.quitElection(true);
     elector.quitElection(true);
     Mockito.verify(mockZK, Mockito.times(1)).close();
     Mockito.verify(mockZK, Mockito.times(1)).close();
     // no watches added
     // no watches added

+ 65 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java

@@ -21,15 +21,16 @@ package org.apache.hadoop.ha;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 
 
-import java.io.File;
+import java.util.Collections;
 import java.util.UUID;
 import java.util.UUID;
 
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
+import org.apache.hadoop.ha.ActiveStandbyElector.State;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.server.ZooKeeperServer;
 import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.test.ClientBase;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.AdditionalMatchers;
 import org.mockito.AdditionalMatchers;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -39,7 +40,7 @@ import com.google.common.primitives.Ints;
 /**
 /**
  * Test for {@link ActiveStandbyElector} using real zookeeper.
  * Test for {@link ActiveStandbyElector} using real zookeeper.
  */
  */
-public class TestActiveStandbyElectorRealZK extends ClientBase {
+public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
   static final int NUM_ELECTORS = 2;
   static final int NUM_ELECTORS = 2;
   
   
   static {
   static {
@@ -58,8 +59,6 @@ public class TestActiveStandbyElectorRealZK extends ClientBase {
   
   
   @Override
   @Override
   public void setUp() throws Exception {
   public void setUp() throws Exception {
-    // build.test.dir is used by zookeeper
-    new File(System.getProperty("build.test.dir", "build")).mkdirs();
     super.setUp();
     super.setUp();
     
     
     zkServer = getServer(serverFactory);
     zkServer = getServer(serverFactory);
@@ -68,7 +67,8 @@ public class TestActiveStandbyElectorRealZK extends ClientBase {
       cbs[i] =  Mockito.mock(ActiveStandbyElectorCallback.class);
       cbs[i] =  Mockito.mock(ActiveStandbyElectorCallback.class);
       appDatas[i] = Ints.toByteArray(i);
       appDatas[i] = Ints.toByteArray(i);
       electors[i] = new ActiveStandbyElector(
       electors[i] = new ActiveStandbyElector(
-          hostPort, 5000, PARENT_DIR, Ids.OPEN_ACL_UNSAFE, cbs[i]);
+          hostPort, 5000, PARENT_DIR, Ids.OPEN_ACL_UNSAFE,
+          Collections.<ZKAuthInfo>emptyList(), cbs[i]);
     }
     }
   }
   }
   
   
@@ -196,4 +196,63 @@ public class TestActiveStandbyElectorRealZK extends ClientBase {
 
 
     checkFatalsAndReset();
     checkFatalsAndReset();
   }
   }
+  
+  @Test(timeout=15000)
+  public void testHandleSessionExpirationOfStandby() throws Exception {
+    // Let elector 0 be active
+    electors[0].ensureParentZNode();
+    electors[0].joinElection(appDatas[0]);
+    ZooKeeperServer zks = getServer(serverFactory);
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
+        zks, PARENT_DIR, appDatas[0]);
+    Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeActive();
+    checkFatalsAndReset();
+    
+    // Let elector 1 be standby
+    electors[1].joinElection(appDatas[1]);
+    ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
+        State.STANDBY);
+    
+    LOG.info("========================== Expiring standby's session");
+    zks.closeSession(electors[1].getZKSessionIdForTests());
+
+    // Should enter neutral mode when disconnected
+    Mockito.verify(cbs[1], Mockito.timeout(1000)).enterNeutralMode();
+
+    // Should re-join the election and go back to STANDBY
+    ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
+        State.STANDBY);
+    checkFatalsAndReset();
+    
+    LOG.info("========================== Quitting election");
+    electors[1].quitElection(false);
+
+    // Double check that we don't accidentally re-join the election
+    // by quitting elector 0 and ensuring elector 1 doesn't become active
+    electors[0].quitElection(false);
+    
+    // due to receiving the "expired" event.
+    Thread.sleep(1000);
+    Mockito.verify(cbs[1], Mockito.never()).becomeActive();
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
+        zks, PARENT_DIR, null);
+
+    checkFatalsAndReset();
+  }
+
+  @Test(timeout=15000)
+  public void testDontJoinElectionOnDisconnectAndReconnect() throws Exception {
+    electors[0].ensureParentZNode();
+
+    stopServer();
+    ActiveStandbyElectorTestUtil.waitForElectorState(
+        null, electors[0], State.NEUTRAL);
+    startServer();
+    waitForServerUp(hostPort, CONNECTION_TIMEOUT);
+    // Have to sleep to allow time for the clients to reconnect.
+    Thread.sleep(2000);
+    Mockito.verify(cbs[0], Mockito.never()).becomeActive();
+    Mockito.verify(cbs[1], Mockito.never()).becomeActive();
+    checkFatalsAndReset();
+  }
 }
 }

+ 20 - 13
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java

@@ -27,11 +27,12 @@ import static org.mockito.Mockito.verify;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
 import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
 import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.test.MockitoUtil;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -118,7 +119,8 @@ public class TestFailoverController {
   public void testFailoverToUnreadyService() throws Exception {
   public void testFailoverToUnreadyService() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
-    Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy).getServiceStatus();
+    Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy)
+        .getServiceStatus();
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
@@ -162,7 +164,7 @@ public class TestFailoverController {
   public void testFailoverFromFaultyServiceSucceeds() throws Exception {
   public void testFailoverFromFaultyServiceSucceeds() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToStandby();
+        .when(svc1.proxy).transitionToStandby(anyReqInfo());
 
 
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
@@ -185,7 +187,7 @@ public class TestFailoverController {
   public void testFailoverFromFaultyServiceFencingFailure() throws Exception {
   public void testFailoverFromFaultyServiceFencingFailure() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToStandby();
+        .when(svc1.proxy).transitionToStandby(anyReqInfo());
 
 
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
@@ -284,7 +286,7 @@ public class TestFailoverController {
     DummyHAService svc1 = spy(new DummyHAService(HAServiceState.ACTIVE, svc1Addr));
     DummyHAService svc1 = spy(new DummyHAService(HAServiceState.ACTIVE, svc1Addr));
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
@@ -295,8 +297,8 @@ public class TestFailoverController {
     }
     }
 
 
     // svc1 went standby then back to active
     // svc1 went standby then back to active
-    verify(svc1.proxy).transitionToStandby();
-    verify(svc1.proxy).transitionToActive();
+    verify(svc1.proxy).transitionToStandby(anyReqInfo());
+    verify(svc1.proxy).transitionToActive(anyReqInfo());
     assertEquals(HAServiceState.ACTIVE, svc1.state);
     assertEquals(HAServiceState.ACTIVE, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
   }
   }
@@ -306,7 +308,7 @@ public class TestFailoverController {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
@@ -327,7 +329,7 @@ public class TestFailoverController {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     AlwaysSucceedFencer.fenceCalled = 0;
     AlwaysSucceedFencer.fenceCalled = 0;
 
 
@@ -346,12 +348,16 @@ public class TestFailoverController {
     assertSame(svc2, AlwaysSucceedFencer.fencedSvc);
     assertSame(svc2, AlwaysSucceedFencer.fencedSvc);
   }
   }
 
 
+  private StateChangeRequestInfo anyReqInfo() {
+    return Mockito.<StateChangeRequestInfo>any();
+  }
+
   @Test
   @Test
   public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception {
   public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new IOException("Failed!"))
     Mockito.doThrow(new IOException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
     AlwaysFailFencer.fenceCalled = 0;
     AlwaysFailFencer.fenceCalled = 0;
 
 
@@ -374,10 +380,10 @@ public class TestFailoverController {
   public void testFailbackToFaultyServiceFails() throws Exception {
   public void testFailbackToFaultyServiceFails() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToActive();
+        .when(svc1.proxy).transitionToActive(anyReqInfo());
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
 
 
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
@@ -420,7 +426,8 @@ public class TestFailoverController {
   
   
   private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
   private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
       boolean forceFence, boolean forceActive) throws FailoverFailedException {
       boolean forceFence, boolean forceActive) throws FailoverFailedException {
-    FailoverController fc = new FailoverController(conf);
+    FailoverController fc = new FailoverController(conf, 
+        RequestSource.REQUEST_BY_USER);
     fc.failover(tgt1, tgt2, forceFence, forceActive);
     fc.failover(tgt1, tgt2, forceFence, forceActive);
   }
   }
 
 

+ 135 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java

@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.ha.HAZKUtil.BadAclFormatException;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
+import org.apache.zookeeper.ZooDefs.Perms;
+import org.apache.zookeeper.data.ACL;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+
+public class TestHAZKUtil {
+  private static final String TEST_ROOT_DIR = System.getProperty(
+      "test.build.data", "/tmp") + "/TestHAZKUtil";
+  private static final File TEST_FILE = new File(TEST_ROOT_DIR,
+      "test-file");
+  
+  /** A path which is expected not to exist */
+  private static final String BOGUS_FILE = "/xxxx-this-does-not-exist";
+
+  @Test
+  public void testEmptyACL() {
+    List<ACL> result = HAZKUtil.parseACLs("");
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testNullACL() {
+    List<ACL> result = HAZKUtil.parseACLs(null);
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testInvalidACLs() {
+    badAcl("a:b",
+        "ACL 'a:b' not of expected form scheme:id:perm"); // not enough parts
+    badAcl("a",
+        "ACL 'a' not of expected form scheme:id:perm"); // not enough parts
+    badAcl("password:foo:rx",
+        "Invalid permission 'x' in permission string 'rx'");
+  }
+  
+  private static void badAcl(String acls, String expectedErr) {
+    try {
+      HAZKUtil.parseACLs(acls);
+      fail("Should have failed to parse '" + acls + "'");
+    } catch (BadAclFormatException e) {
+      assertEquals(expectedErr, e.getMessage());
+    }
+  }
+
+  @Test
+  public void testGoodACLs() {
+    List<ACL> result = HAZKUtil.parseACLs(
+        "sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca");
+    ACL acl0 = result.get(0);
+    assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ |
+        Perms.WRITE | Perms.ADMIN, acl0.getPerms());
+    assertEquals("sasl", acl0.getId().getScheme());
+    assertEquals("hdfs/host1@MY.DOMAIN", acl0.getId().getId());
+    
+    ACL acl1 = result.get(1);
+    assertEquals(Perms.CREATE | Perms.ADMIN, acl1.getPerms());
+    assertEquals("sasl", acl1.getId().getScheme());
+    assertEquals("hdfs/host2@MY.DOMAIN", acl1.getId().getId());
+  }
+  
+  @Test
+  public void testEmptyAuth() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth("");
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testNullAuth() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth(null);
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testGoodAuths() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth(
+        "scheme:data,\n   scheme2:user:pass");
+    assertEquals(2, result.size());
+    ZKAuthInfo auth0 = result.get(0);
+    assertEquals("scheme", auth0.getScheme());
+    assertEquals("data", new String(auth0.getAuth()));
+    
+    ZKAuthInfo auth1 = result.get(1);
+    assertEquals("scheme2", auth1.getScheme());
+    assertEquals("user:pass", new String(auth1.getAuth()));
+  }
+  
+  @Test
+  public void testConfIndirection() throws IOException {
+    assertNull(HAZKUtil.resolveConfIndirection(null));
+    assertEquals("x", HAZKUtil.resolveConfIndirection("x"));
+    
+    TEST_FILE.getParentFile().mkdirs();
+    Files.write("hello world", TEST_FILE, Charsets.UTF_8);
+    assertEquals("hello world", HAZKUtil.resolveConfIndirection(
+        "@" + TEST_FILE.getAbsolutePath()));
+    
+    try {
+      HAZKUtil.resolveConfIndirection("@" + BOGUS_FILE);
+      fail("Did not throw for non-existent file reference");
+    } catch (FileNotFoundException fnfe) {
+      assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE));
+    }
+  }
+}

+ 385 - 256
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java

@@ -19,93 +19,58 @@ package org.apache.hadoop.ha;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
-import java.io.File;
-import java.net.InetSocketAddress;
+import java.security.NoSuchAlgorithmException;
 
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.HealthMonitor.State;
-import org.apache.hadoop.test.MultithreadedTestUtil;
-import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
-import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.test.ClientBase;
+import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
-import com.google.common.primitives.Ints;
-
-public class TestZKFailoverController extends ClientBase {
+public class TestZKFailoverController extends ClientBaseWithFixes {
   private Configuration conf;
   private Configuration conf;
-  private DummyHAService svc1;
-  private DummyHAService svc2;
-  private TestContext ctx;
-  private DummyZKFCThread thr1, thr2;
+  private MiniZKFCCluster cluster;
   
   
+  // Set up ZK digest-based credentials for the purposes of the tests,
+  // to make sure all of our functionality works with auth and ACLs
+  // present.
+  private static final String DIGEST_USER_PASS="test-user:test-password";
+  private static final String TEST_AUTH_GOOD =
+    "digest:" + DIGEST_USER_PASS;
+  private static final String DIGEST_USER_HASH;
   static {
   static {
-    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
+    try {
+      DIGEST_USER_HASH = DigestAuthenticationProvider.generateDigest(
+          DIGEST_USER_PASS);
+    } catch (NoSuchAlgorithmException e) {
+      throw new RuntimeException(e);
+    }
   }
   }
+  private static final String TEST_ACL =
+    "digest:" + DIGEST_USER_HASH + ":rwcda";
   
   
-  @Override
-  public void setUp() throws Exception {
-    // build.test.dir is used by zookeeper
-    new File(System.getProperty("build.test.dir", "build")).mkdirs();
-    super.setUp();
+  static {
+    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
   }
   }
   
   
   @Before
   @Before
   public void setupConfAndServices() {
   public void setupConfAndServices() {
     conf = new Configuration();
     conf = new Configuration();
-    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
-    // Fast check interval so tests run faster
-    conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
-    conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
-    conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
-    svc1 = new DummyHAService(HAServiceState.INITIALIZING,
-        new InetSocketAddress("svc1", 1234));
-    svc2 = new DummyHAService(HAServiceState.INITIALIZING,
-        new InetSocketAddress("svc2", 1234));
-  }
-  
-  /**
-   * Set up two services and their failover controllers. svc1 is started
-   * first, so that it enters ACTIVE state, and then svc2 is started,
-   * which enters STANDBY
-   */
-  private void setupFCs() throws Exception {
-    // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    conf.set(ZKFailoverController.ZK_ACL_KEY, TEST_ACL);
+    conf.set(ZKFailoverController.ZK_AUTH_KEY, TEST_AUTH_GOOD);
 
 
-    ctx = new MultithreadedTestUtil.TestContext();
-    thr1 = new DummyZKFCThread(ctx, svc1);
-    ctx.addThread(thr1);
-    thr1.start();
-    
-    LOG.info("Waiting for svc1 to enter active state");
-    waitForHAState(svc1, HAServiceState.ACTIVE);
-    
-    LOG.info("Adding svc2");
-    thr2 = new DummyZKFCThread(ctx, svc2);
-    thr2.start();
-    waitForHAState(svc2, HAServiceState.STANDBY);
-  }
-  
-  private void stopFCs() throws Exception {
-    if (thr1 != null) {
-      thr1.interrupt();
-    }
-    if (thr2 != null) {
-      thr2.interrupt();
-    }
-    if (ctx != null) {
-      ctx.stop();
-    }
+    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
+    this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
   }
   }
 
 
   /**
   /**
@@ -114,20 +79,104 @@ public class TestZKFailoverController extends ClientBase {
    */
    */
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testFormatZK() throws Exception {
   public void testFormatZK() throws Exception {
+    DummyHAService svc = cluster.getService(1);
     // Run without formatting the base dir,
     // Run without formatting the base dir,
     // should barf
     // should barf
     assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
     assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
-        runFC(svc1));
+        runFC(svc));
 
 
     // Format the base dir, should succeed
     // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    assertEquals(0, runFC(svc, "-formatZK"));
 
 
     // Should fail to format if already formatted
     // Should fail to format if already formatted
     assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
     assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
-        runFC(svc1, "-formatZK", "-nonInteractive"));
+        runFC(svc, "-formatZK", "-nonInteractive"));
   
   
     // Unless '-force' is on
     // Unless '-force' is on
-    assertEquals(0, runFC(svc1, "-formatZK", "-force"));
+    assertEquals(0, runFC(svc, "-formatZK", "-force"));
+  }
+  
+  /**
+   * Test that if ZooKeeper is not running, the correct error
+   * code is returned.
+   */
+  @Test(timeout=15000)
+  public void testNoZK() throws Exception {
+    stopServer();
+    DummyHAService svc = cluster.getService(1);
+    assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,
+        runFC(svc));
+  }
+  
+  @Test
+  public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
+    DummyHAService svc = cluster.getService(1);
+
+    DummyZKFC zkfcInOtherCluster = new DummyZKFC(conf, cluster.getService(1)) {
+      @Override
+      protected String getScopeInsideParentNode() {
+        return "other-scope";
+      }
+    };
+    
+    // Run without formatting the base dir,
+    // should barf
+    assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
+        runFC(svc));
+
+    // Format the base dir, should succeed
+    assertEquals(0, runFC(svc, "-formatZK"));
+    
+    // Run the other cluster without formatting, should barf because
+    // it uses a different parent znode
+    assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
+        zkfcInOtherCluster.run(new String[]{}));
+    
+    // Should succeed in formatting the second cluster
+    assertEquals(0, zkfcInOtherCluster.run(new String[]{"-formatZK"}));
+
+    // But should not have deleted the original base node from the first
+    // cluster
+    assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
+        runFC(svc, "-formatZK", "-nonInteractive"));
+  }
+  
+  /**
+   * Test that automatic failover won't run against a target that hasn't
+   * explicitly enabled the feature.
+   */
+  @Test(timeout=10000)
+  public void testWontRunWhenAutoFailoverDisabled() throws Exception {
+    DummyHAService svc = cluster.getService(1);
+    svc = Mockito.spy(svc);
+    Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
+    
+    assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
+        runFC(svc, "-formatZK"));
+    assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
+        runFC(svc));
+  }
+  
+  /**
+   * Test that, if ACLs are specified in the configuration, that
+   * it sets the ACLs when formatting the parent node.
+   */
+  @Test(timeout=15000)
+  public void testFormatSetsAcls() throws Exception {
+    // Format the base dir, should succeed
+    DummyHAService svc = cluster.getService(1);
+    assertEquals(0, runFC(svc, "-formatZK"));
+
+    ZooKeeper otherClient = createClient();
+    try {
+      // client without auth should not be able to read it
+      Stat stat = new Stat();
+      otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
+          false, stat);
+      fail("Was able to read data without authenticating!");
+    } catch (KeeperException.NoAuthException nae) {
+      // expected
+    }
   }
   }
   
   
   /**
   /**
@@ -136,14 +185,14 @@ public class TestZKFailoverController extends ClientBase {
    */
    */
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testFencingMustBeConfigured() throws Exception {
   public void testFencingMustBeConfigured() throws Exception {
-    svc1 = Mockito.spy(svc1);
+    DummyHAService svc = Mockito.spy(cluster.getService(0));
     Mockito.doThrow(new BadFencingConfigurationException("no fencing"))
     Mockito.doThrow(new BadFencingConfigurationException("no fencing"))
-        .when(svc1).checkFencingConfigured();
+        .when(svc).checkFencingConfigured();
     // Format the base dir, should succeed
     // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    assertEquals(0, runFC(svc, "-formatZK"));
     // Try to run the actual FC, should fail without a fencer
     // Try to run the actual FC, should fail without a fencer
     assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,
     assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,
-        runFC(svc1));
+        runFC(svc));
   }
   }
   
   
   /**
   /**
@@ -155,66 +204,50 @@ public class TestZKFailoverController extends ClientBase {
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testAutoFailoverOnBadHealth() throws Exception {
   public void testAutoFailoverOnBadHealth() throws Exception {
     try {
     try {
-      setupFCs();
+      cluster.start();
+      DummyHAService svc1 = cluster.getService(1);
       
       
-      LOG.info("Faking svc1 unhealthy, should failover to svc2");
-      svc1.isHealthy = false;
-      LOG.info("Waiting for svc1 to enter standby state");
-      waitForHAState(svc1, HAServiceState.STANDBY);
-      waitForHAState(svc2, HAServiceState.ACTIVE);
+      LOG.info("Faking svc0 unhealthy, should failover to svc1");
+      cluster.setHealthy(0, false);
+      
+      LOG.info("Waiting for svc0 to enter standby state");
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
   
   
-      LOG.info("Allowing svc1 to be healthy again, making svc2 unreachable " +
+      LOG.info("Allowing svc0 to be healthy again, making svc1 unreachable " +
           "and fail to gracefully go to standby");
           "and fail to gracefully go to standby");
-      svc1.isHealthy = true;
-      svc2.actUnreachable = true;
-      
-      // Allow fencing to succeed
-      Mockito.doReturn(true).when(svc2.fencer).fence(Mockito.same(svc2));
-      // Should fail back to svc1 at this point
-      waitForHAState(svc1, HAServiceState.ACTIVE);
-      // and fence svc2
-      Mockito.verify(svc2.fencer).fence(Mockito.same(svc2));
+      cluster.setUnreachable(1, true);
+      cluster.setHealthy(0, true);
+ 
+      // Should fail back to svc0 at this point
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      // and fence svc1
+      Mockito.verify(svc1.fencer).fence(Mockito.same(svc1));
     } finally {
     } finally {
-      stopFCs();
+      cluster.stop();
     }
     }
   }
   }
   
   
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testAutoFailoverOnLostZKSession() throws Exception {
   public void testAutoFailoverOnLostZKSession() throws Exception {
     try {
     try {
-      setupFCs();
+      cluster.start();
 
 
-      // Expire svc1, it should fail over to svc2
-      expireAndVerifyFailover(thr1, thr2);
+      // Expire svc0, it should fail over to svc1
+      cluster.expireAndVerifyFailover(0, 1);
       
       
-      // Expire svc2, it should fail back to svc1
-      expireAndVerifyFailover(thr2, thr1);
+      // Expire svc1, it should fail back to svc0
+      cluster.expireAndVerifyFailover(1, 0);
       
       
       LOG.info("======= Running test cases second time to test " +
       LOG.info("======= Running test cases second time to test " +
           "re-establishment =========");
           "re-establishment =========");
-      // Expire svc1, it should fail over to svc2
-      expireAndVerifyFailover(thr1, thr2);
+      // Expire svc0, it should fail over to svc1
+      cluster.expireAndVerifyFailover(0, 1);
       
       
-      // Expire svc2, it should fail back to svc1
-      expireAndVerifyFailover(thr2, thr1);
+      // Expire svc1, it should fail back to svc0
+      cluster.expireAndVerifyFailover(1, 0);
     } finally {
     } finally {
-      stopFCs();
-    }
-  }
-  
-  private void expireAndVerifyFailover(DummyZKFCThread fromThr,
-      DummyZKFCThread toThr) throws Exception {
-    DummyHAService fromSvc = fromThr.zkfc.localTarget;
-    DummyHAService toSvc = toThr.zkfc.localTarget;
-    
-    fromThr.zkfc.getElectorForTests().preventSessionReestablishmentForTests();
-    try {
-      expireActiveLockHolder(fromSvc);
-      
-      waitForHAState(fromSvc, HAServiceState.STANDBY);
-      waitForHAState(toSvc, HAServiceState.ACTIVE);
-    } finally {
-      fromThr.zkfc.getElectorForTests().allowSessionReestablishmentForTests();
+      cluster.stop();
     }
     }
   }
   }
 
 
@@ -225,33 +258,32 @@ public class TestZKFailoverController extends ClientBase {
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testDontFailoverToUnhealthyNode() throws Exception {
   public void testDontFailoverToUnhealthyNode() throws Exception {
     try {
     try {
-      setupFCs();
+      cluster.start();
 
 
-      // Make svc2 unhealthy, and wait for its FC to notice the bad health.
-      svc2.isHealthy = false;
-      waitForHealthState(thr2.zkfc,
-          HealthMonitor.State.SERVICE_UNHEALTHY);
+      // Make svc1 unhealthy, and wait for its FC to notice the bad health.
+      cluster.setHealthy(1, false);
+      cluster.waitForHealthState(1, HealthMonitor.State.SERVICE_UNHEALTHY);
       
       
-      // Expire svc1
-      thr1.zkfc.getElectorForTests().preventSessionReestablishmentForTests();
+      // Expire svc0
+      cluster.getElector(0).preventSessionReestablishmentForTests();
       try {
       try {
-        expireActiveLockHolder(svc1);
+        cluster.expireActiveLockHolder(0);
 
 
-        LOG.info("Expired svc1's ZK session. Waiting a second to give svc2" +
+        LOG.info("Expired svc0's ZK session. Waiting a second to give svc1" +
             " a chance to take the lock, if it is ever going to.");
             " a chance to take the lock, if it is ever going to.");
         Thread.sleep(1000);
         Thread.sleep(1000);
         
         
         // Ensure that no one holds the lock.
         // Ensure that no one holds the lock.
-        waitForActiveLockHolder(null);
+        cluster.waitForActiveLockHolder(null);
         
         
       } finally {
       } finally {
-        LOG.info("Allowing svc1's elector to re-establish its connection");
-        thr1.zkfc.getElectorForTests().allowSessionReestablishmentForTests();
+        LOG.info("Allowing svc0's elector to re-establish its connection");
+        cluster.getElector(0).allowSessionReestablishmentForTests();
       }
       }
-      // svc1 should get the lock again
-      waitForActiveLockHolder(svc1);
+      // svc0 should get the lock again
+      cluster.waitForActiveLockHolder(0);
     } finally {
     } finally {
-      stopFCs();
+      cluster.stop();
     }
     }
   }
   }
 
 
@@ -262,29 +294,38 @@ public class TestZKFailoverController extends ClientBase {
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testBecomingActiveFails() throws Exception {
   public void testBecomingActiveFails() throws Exception {
     try {
     try {
-      setupFCs();
+      cluster.start();
+      DummyHAService svc1 = cluster.getService(1);
       
       
-      LOG.info("Making svc2 fail to become active");
-      svc2.failToBecomeActive = true;
+      LOG.info("Making svc1 fail to become active");
+      cluster.setFailToBecomeActive(1, true);
       
       
-      LOG.info("Faking svc1 unhealthy, should NOT successfully " +
-          "failover to svc2");
-      svc1.isHealthy = false;
-      waitForHealthState(thr1.zkfc, State.SERVICE_UNHEALTHY);
-      waitForActiveLockHolder(null);
+      LOG.info("Faking svc0 unhealthy, should NOT successfully " +
+          "failover to svc1");
+      cluster.setHealthy(0, false);
+      cluster.waitForHealthState(0, State.SERVICE_UNHEALTHY);
+      cluster.waitForActiveLockHolder(null);
 
 
-      Mockito.verify(svc2.proxy).transitionToActive();
+      
+      Mockito.verify(svc1.proxy, Mockito.timeout(2000).atLeastOnce())
+        .transitionToActive(Mockito.<StateChangeRequestInfo>any());
 
 
-      waitForHAState(svc1, HAServiceState.STANDBY);
-      waitForHAState(svc2, HAServiceState.STANDBY);
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+      
+      LOG.info("Faking svc0 healthy again, should go back to svc0");
+      cluster.setHealthy(0, true);
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+      cluster.waitForActiveLockHolder(0);
       
       
-      LOG.info("Faking svc1 healthy again, should go back to svc1");
-      svc1.isHealthy = true;
-      waitForHAState(svc1, HAServiceState.ACTIVE);
-      waitForHAState(svc2, HAServiceState.STANDBY);
-      waitForActiveLockHolder(svc1);
+      // Ensure that we can fail back to svc1  once it it is able
+      // to become active (e.g the admin has restarted it)
+      LOG.info("Allowing svc1 to become active, expiring svc0");
+      svc1.failToBecomeActive = false;
+      cluster.expireAndVerifyFailover(0, 1);
     } finally {
     } finally {
-      stopFCs();
+      cluster.stop();
     }
     }
   }
   }
   
   
@@ -296,27 +337,25 @@ public class TestZKFailoverController extends ClientBase {
   @Test(timeout=15000)
   @Test(timeout=15000)
   public void testZooKeeperFailure() throws Exception {
   public void testZooKeeperFailure() throws Exception {
     try {
     try {
-      setupFCs();
+      cluster.start();
 
 
       // Record initial ZK sessions
       // Record initial ZK sessions
-      long session1 = thr1.zkfc.getElectorForTests().getZKSessionIdForTests();
-      long session2 = thr2.zkfc.getElectorForTests().getZKSessionIdForTests();
+      long session0 = cluster.getElector(0).getZKSessionIdForTests();
+      long session1 = cluster.getElector(1).getZKSessionIdForTests();
 
 
       LOG.info("====== Stopping ZK server");
       LOG.info("====== Stopping ZK server");
       stopServer();
       stopServer();
       waitForServerDown(hostPort, CONNECTION_TIMEOUT);
       waitForServerDown(hostPort, CONNECTION_TIMEOUT);
       
       
       LOG.info("====== Waiting for services to enter NEUTRAL mode");
       LOG.info("====== Waiting for services to enter NEUTRAL mode");
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
+      cluster.waitForElectorState(0,
           ActiveStandbyElector.State.NEUTRAL);
           ActiveStandbyElector.State.NEUTRAL);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
+      cluster.waitForElectorState(1,
           ActiveStandbyElector.State.NEUTRAL);
           ActiveStandbyElector.State.NEUTRAL);
 
 
       LOG.info("====== Checking that the services didn't change HA state");
       LOG.info("====== Checking that the services didn't change HA state");
-      assertEquals(HAServiceState.ACTIVE, svc1.state);
-      assertEquals(HAServiceState.STANDBY, svc2.state);
+      assertEquals(HAServiceState.ACTIVE, cluster.getService(0).state);
+      assertEquals(HAServiceState.STANDBY, cluster.getService(1).state);
       
       
       LOG.info("====== Restarting server");
       LOG.info("====== Restarting server");
       startServer();
       startServer();
@@ -324,134 +363,224 @@ public class TestZKFailoverController extends ClientBase {
 
 
       // Nodes should go back to their original states, since they re-obtain
       // Nodes should go back to their original states, since they re-obtain
       // the same sessions.
       // the same sessions.
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.ACTIVE);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.STANDBY);
+      cluster.waitForElectorState(0, ActiveStandbyElector.State.ACTIVE);
+      cluster.waitForElectorState(1, ActiveStandbyElector.State.STANDBY);
       // Check HA states didn't change.
       // Check HA states didn't change.
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.ACTIVE);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.STANDBY);
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+
       // Check they re-used the same sessions and didn't spuriously reconnect
       // Check they re-used the same sessions and didn't spuriously reconnect
+      assertEquals(session0,
+          cluster.getElector(0).getZKSessionIdForTests());
       assertEquals(session1,
       assertEquals(session1,
-          thr1.zkfc.getElectorForTests().getZKSessionIdForTests());
-      assertEquals(session2,
-          thr2.zkfc.getElectorForTests().getZKSessionIdForTests());
+          cluster.getElector(1).getZKSessionIdForTests());
     } finally {
     } finally {
-      stopFCs();
+      cluster.stop();
     }
     }
   }
   }
-
-  /**
-   * Expire the ZK session of the given service. This requires
-   * (and asserts) that the given service be the current active.
-   * @throws NoNodeException if no service holds the lock
-   */
-  private void expireActiveLockHolder(DummyHAService expectedActive)
-      throws NoNodeException {
-    ZooKeeperServer zks = getServer(serverFactory);
-    Stat stat = new Stat();
-    byte[] data = zks.getZKDatabase().getData(
-        ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT + "/" +
-        ActiveStandbyElector.LOCK_FILENAME, stat, null);
-    
-    assertArrayEquals(Ints.toByteArray(expectedActive.index), data);
-    long session = stat.getEphemeralOwner();
-    LOG.info("Expiring svc " + expectedActive + "'s zookeeper session " + session);
-    zks.closeSession(session);
-  }
   
   
   /**
   /**
-   * Wait for the given HA service to enter the given HA state.
+   * Test that the ZKFC can gracefully cede its active status.
    */
    */
-  private void waitForHAState(DummyHAService svc, HAServiceState state)
-      throws Exception {
-    while (svc.state != state) {
-      ctx.checkException();
-      Thread.sleep(50);
+  @Test(timeout=15000)
+  public void testCedeActive() throws Exception {
+    try {
+      cluster.start();
+      DummyZKFC zkfc = cluster.getZkfc(0);
+      // It should be in active to start.
+      assertEquals(ActiveStandbyElector.State.ACTIVE,
+          zkfc.getElectorForTests().getStateForTests());
+
+      // Ask it to cede active for 3 seconds. It should respond promptly
+      // (i.e. the RPC itself should not take 3 seconds!)
+      ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
+      long st = System.currentTimeMillis();
+      proxy.cedeActive(3000);
+      long et = System.currentTimeMillis();
+      assertTrue("RPC to cedeActive took " + (et - st) + " ms",
+          et - st < 1000);
+      
+      // Should be in "INIT" state since it's not in the election
+      // at this point.
+      assertEquals(ActiveStandbyElector.State.INIT,
+          zkfc.getElectorForTests().getStateForTests());
+
+      // After the prescribed 3 seconds, should go into STANDBY state,
+      // since the other node in the cluster would have taken ACTIVE.
+      cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
+      long et2 = System.currentTimeMillis();
+      assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
+          "ms before rejoining.",
+          et2 - et > 2800);      
+    } finally {
+      cluster.stop();
     }
     }
   }
   }
   
   
-  /**
-   * Wait for the ZKFC to be notified of a change in health state.
-   */
-  private void waitForHealthState(DummyZKFC zkfc, State state)
-      throws Exception {
-    while (zkfc.getLastHealthState() != state) {
-      ctx.checkException();
-      Thread.sleep(50);
+  @Test(timeout=15000)
+  public void testGracefulFailover() throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+      cluster.waitForActiveLockHolder(1);
+      cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
+      cluster.waitForActiveLockHolder(0);
+      
+      assertEquals(0, cluster.getService(0).fenceCount);
+      assertEquals(0, cluster.getService(1).fenceCount);
+    } finally {
+      cluster.stop();
     }
     }
   }
   }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverToUnhealthy() throws Exception {
+    try {
+      cluster.start();
 
 
-  /**
-   * Wait for the given HA service to become the active lock holder.
-   * If the passed svc is null, waits for there to be no active
-   * lock holder.
-   */
-  private void waitForActiveLockHolder(DummyHAService svc)
-      throws Exception {
-    ZooKeeperServer zks = getServer(serverFactory);
-    ActiveStandbyElectorTestUtil.waitForActiveLockData(ctx, zks,
-        ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
-        (svc == null) ? null : Ints.toByteArray(svc.index));
+      cluster.waitForActiveLockHolder(0);
+
+      // Mark it unhealthy, wait for it to exit election
+      cluster.setHealthy(1, false);
+      cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT);
+      
+      // Ask for failover, it should fail, because it's unhealthy
+      try {
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Did not fail to graceful failover to unhealthy service!");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            cluster.getService(1).toString() + 
+            " is not currently healthy.", sfe);
+      }
+    } finally {
+      cluster.stop();
+    }
   }
   }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingActive() throws Exception {
+    try {
+      cluster.start();
 
 
+      cluster.waitForActiveLockHolder(0);
+      cluster.setFailToBecomeActive(1, true);
+      
+      // Ask for failover, it should fail and report back to user.
+      try {
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Did not fail to graceful failover when target failed " +
+            "to become active!");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            "Couldn't make " + cluster.getService(1) + " active", sfe);
+        GenericTestUtils.assertExceptionContains(
+            "injected failure", sfe);
+      }
+      
+      // No fencing
+      assertEquals(0, cluster.getService(0).fenceCount);
+      assertEquals(0, cluster.getService(1).fenceCount);
 
 
-  private int runFC(DummyHAService target, String ... args) throws Exception {
-    DummyZKFC zkfc = new DummyZKFC(target);
-    zkfc.setConf(conf);
-    return zkfc.run(args);
+      // Service 0 should go back to being active after the failed failover
+      cluster.waitForActiveLockHolder(0);
+    } finally {
+      cluster.stop();
+    }
   }
   }
 
 
-  /**
-   * Test-thread which runs a ZK Failover Controller corresponding
-   * to a given dummy service.
-   */
-  private class DummyZKFCThread extends TestingThread {
-    private final DummyZKFC zkfc;
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingStandby() throws Exception {
+    try {
+      cluster.start();
 
 
-    public DummyZKFCThread(TestContext ctx, DummyHAService svc) {
-      super(ctx);
-      this.zkfc = new DummyZKFC(svc);
-      zkfc.setConf(conf);
+      cluster.waitForActiveLockHolder(0);
+      
+      // Ask for failover when old node fails to transition to standby.
+      // This should trigger fencing, since the cedeActive() command
+      // still works, but leaves the breadcrumb in place.
+      cluster.setFailToBecomeStandby(0, true);
+      cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+
+      // Check that the old node was fenced
+      assertEquals(1, cluster.getService(0).fenceCount);
+    } finally {
+      cluster.stop();
     }
     }
+  }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingStandbyAndFailFence()
+      throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      
+      // Ask for failover when old node fails to transition to standby.
+      // This should trigger fencing, since the cedeActive() command
+      // still works, but leaves the breadcrumb in place.
+      cluster.setFailToBecomeStandby(0, true);
+      cluster.setFailToFence(0, true);
 
 
-    @Override
-    public void doWork() throws Exception {
       try {
       try {
-        assertEquals(0, zkfc.run(new String[0]));
-      } catch (InterruptedException ie) {
-        // Interrupted by main thread, that's OK.
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Failover should have failed when old node wont fence");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to fence " + cluster.getService(0), sfe);
       }
       }
+    } finally {
+      cluster.stop();
     }
     }
   }
   }
-  
-  private static class DummyZKFC extends ZKFailoverController {
-    private final DummyHAService localTarget;
-    
-    public DummyZKFC(DummyHAService localTarget) {
-      this.localTarget = localTarget;
-    }
 
 
-    @Override
-    protected byte[] targetToData(HAServiceTarget target) {
-      return Ints.toByteArray(((DummyHAService)target).index);
-    }
-    
-    @Override
-    protected HAServiceTarget dataToTarget(byte[] data) {
-      int index = Ints.fromByteArray(data);
-      return DummyHAService.getInstance(index);
-    }
+  /**
+   * Test which exercises all of the inputs into ZKFC. This is particularly
+   * useful for running under jcarder to check for lock order violations.
+   */
+  @Test(timeout=30000)
+  public void testOneOfEverything() throws Exception {
+    try {
+      cluster.start();
+      
+      // Failover by session expiration
+      LOG.info("====== Failing over by session expiration");
+      cluster.expireAndVerifyFailover(0, 1);
+      cluster.expireAndVerifyFailover(1, 0);
+      
+      // Restart ZK
+      LOG.info("====== Restarting server");
+      stopServer();
+      waitForServerDown(hostPort, CONNECTION_TIMEOUT);
+      startServer();
+      waitForServerUp(hostPort, CONNECTION_TIMEOUT);
 
 
-    @Override
-    protected HAServiceTarget getLocalTarget() {
-      return localTarget;
+      // Failover by bad health
+      cluster.setHealthy(0, false);
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
+      cluster.setHealthy(1, true);
+      cluster.setHealthy(0, false);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.setHealthy(0, true);
+      
+      cluster.waitForHealthState(0, State.SERVICE_HEALTHY);
+      
+      // Graceful failovers
+      cluster.getZkfc(1).gracefulFailoverToYou();
+      cluster.getZkfc(0).gracefulFailoverToYou();
+    } finally {
+      cluster.stop();
     }
     }
   }
   }
+
+  private int runFC(DummyHAService target, String ... args) throws Exception {
+    DummyZKFC zkfc = new DummyZKFC(conf, target);
+    return zkfc.run(args);
+  }
+
 }
 }

+ 156 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java

@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.util.Random;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Stress test for ZKFailoverController.
+ * Starts multiple ZKFCs for dummy services, and then performs many automatic
+ * failovers. While doing so, ensures that a fake "shared resource"
+ * (simulating the shared edits dir) is only owned by one service at a time. 
+ */
+public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
+  
+  private static final int STRESS_RUNTIME_SECS = 30;
+  private static final int EXTRA_TIMEOUT_SECS = 10;
+  
+  private Configuration conf;
+  private MiniZKFCCluster cluster;
+
+  @Before
+  public void setupConfAndServices() throws Exception {
+    conf = new Configuration();
+    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
+    this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
+  }
+  
+  @After
+  public void stopCluster() throws Exception {
+    cluster.stop();
+  }
+
+  /**
+   * Simply fail back and forth between two services for the
+   * configured amount of time, via expiring their ZK sessions.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testExpireBackAndForth() throws Exception {
+    cluster.start();
+    long st = System.currentTimeMillis();
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+
+    int i = 0;
+    while (System.currentTimeMillis() - st < runFor) {
+      // flip flop the services back and forth
+      int from = i % 2;
+      int to = (i + 1) % 2;
+
+      // Expire one service, it should fail over to the other
+      LOG.info("Failing over via expiration from " + from + " to " + to);
+      cluster.expireAndVerifyFailover(from, to);
+
+      i++;
+    }
+  }
+  
+  /**
+   * Randomly expire the ZK sessions of the two ZKFCs. This differs
+   * from the above test in that it is not a controlled failover -
+   * we just do random expirations and expect neither one to ever
+   * generate fatal exceptions.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testRandomExpirations() throws Exception {
+    cluster.start();
+    long st = System.currentTimeMillis();
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+
+    Random r = new Random();
+    while (System.currentTimeMillis() - st < runFor) {
+      cluster.getTestContext().checkException();
+      int targetIdx = r.nextInt(2);
+      ActiveStandbyElector target = cluster.getElector(targetIdx);
+      long sessId = target.getZKSessionIdForTests();
+      if (sessId != -1) {
+        LOG.info(String.format("Expiring session %x for svc %d",
+            sessId, targetIdx));
+        getServer(serverFactory).closeSession(sessId);
+      }
+      Thread.sleep(r.nextInt(300));
+    }
+  }
+  
+  /**
+   * Have the services fail their health checks half the time,
+   * causing the master role to bounce back and forth in the
+   * cluster. Meanwhile, causes ZK to disconnect clients every
+   * 50ms, to trigger the retry code and failures to become active.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testRandomHealthAndDisconnects() throws Exception {
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+    Mockito.doAnswer(new RandomlyThrow(0))
+        .when(cluster.getService(0).proxy).monitorHealth();
+    Mockito.doAnswer(new RandomlyThrow(1))
+        .when(cluster.getService(1).proxy).monitorHealth();
+    ActiveStandbyElector.NUM_RETRIES = 100;
+    
+    // Don't start until after the above mocking. Otherwise we can get
+    // Mockito errors if the HM calls the proxy in the middle of
+    // setting up the mock.
+    cluster.start();
+    
+    long st = System.currentTimeMillis();
+    while (System.currentTimeMillis() - st < runFor) {
+      cluster.getTestContext().checkException();
+      serverFactory.closeAll();
+      Thread.sleep(50);
+    }
+  }
+  
+  
+  /**
+   * Randomly throw an exception half the time the method is called
+   */
+  @SuppressWarnings("rawtypes")
+  private static class RandomlyThrow implements Answer {
+    private Random r = new Random();
+    private final int svcIdx;
+    public RandomlyThrow(int svcIdx) {
+      this.svcIdx = svcIdx;
+    }
+    @Override
+    public Object answer(InvocationOnMock invocation) throws Throwable {
+      if (r.nextBoolean()) {
+        LOG.info("Throwing an exception for svc " + svcIdx);
+        throw new HealthCheckFailedException("random failure");
+      }
+      return invocation.callRealMethod();
+    }
+  }
+}

+ 13 - 6
hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java

@@ -15,13 +15,20 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.mapreduce;
+package org.apache.hadoop.ha;
 
 
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.test.MultithreadedTestUtil;
 
 
-public privileged aspect ClusterAspect {
-
-  public ClientProtocol Cluster.getClientProtocol() {
-    return client;
+public class ZKFCTestUtil {
+  
+  public static void waitForHealthState(ZKFailoverController zkfc,
+      HealthMonitor.State state,
+      MultithreadedTestUtil.TestContext ctx) throws Exception {
+    while (zkfc.getLastHealthState() != state) {
+      if (ctx != null) {
+        ctx.checkException();
+      }
+      Thread.sleep(50);
+    }
   }
   }
 }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java

@@ -27,6 +27,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletRequestWrapper;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
 import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentCaptor;
@@ -36,7 +37,7 @@ public class TestStaticUserWebFilter {
   private FilterConfig mockConfig(String username) {
   private FilterConfig mockConfig(String username) {
     FilterConfig mock = Mockito.mock(FilterConfig.class);
     FilterConfig mock = Mockito.mock(FilterConfig.class);
     Mockito.doReturn(username).when(mock).getInitParameter(
     Mockito.doReturn(username).when(mock).getInitParameter(
-        StaticUserWebFilter.USERNAME_KEY);
+        CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER);
     return mock;
     return mock;
   }
   }
   
   
@@ -73,7 +74,7 @@ public class TestStaticUserWebFilter {
   @Test
   @Test
   public void testConfiguration() {
   public void testConfiguration() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set(StaticUserWebFilter.USERNAME_KEY, "joe");
+    conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER, "joe");
     assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
     assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
   }
   }
 
 

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java

@@ -241,6 +241,30 @@ public class TestText extends TestCase {
     Text.validateUTF8(utf8, 0, length);
     Text.validateUTF8(utf8, 0, length);
   }
   }
 
 
+  public void testClear() throws Exception {
+    // Test lengths on an empty text object
+    Text text = new Text();
+    assertEquals(
+        "Actual string on an empty text object must be an empty string",
+        "", text.toString());
+    assertEquals("Underlying byte array length must be zero",
+        0, text.getBytes().length);
+    assertEquals("String's length must be zero",
+        0, text.getLength());
+
+    // Test if clear works as intended
+    text = new Text("abcd\u20acbdcd\u20ac");
+    int len = text.getLength();
+    text.clear();
+    assertEquals("String must be empty after clear()",
+        "", text.toString());
+    assertTrue(
+        "Length of the byte array must not decrease after clear()",
+        text.getBytes().length >= len);
+    assertEquals("Length of the string must be reset to 0 after clear()",
+        0, text.getLength());
+  }
+
   public void testTextText() throws CharacterCodingException {
   public void testTextText() throws CharacterCodingException {
     Text a=new Text("abc");
     Text a=new Text("abc");
     Text b=new Text("a");
     Text b=new Text("a");

+ 19 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java

@@ -18,11 +18,15 @@
 
 
 package org.apache.hadoop.security.token;
 package org.apache.hadoop.security.token;
 
 
+import static junit.framework.Assert.assertEquals;
+
 import java.io.*;
 import java.io.*;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -94,5 +98,20 @@ public class TestToken extends TestCase {
       checkUrlSafe(encode);
       checkUrlSafe(encode);
     }
     }
   }
   }
+  
+  public void testDecodeIdentifier() throws IOException {
+    TestDelegationTokenSecretManager secretManager =
+      new TestDelegationTokenSecretManager(0, 0, 0, 0);
+    secretManager.startThreads();
+    TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
+        new Text("owner"), new Text("renewer"), new Text("realUser"));
+    
+    Token<TestDelegationTokenIdentifier> token =
+      new Token<TestDelegationTokenIdentifier>(id, secretManager);
+    TokenIdentifier idCopy = token.decodeIdentifier();
+    
+    assertNotSame(id, idCopy);
+    assertEquals(id, idCopy);
+  }
 
 
 }
 }

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier

@@ -0,0 +1,2 @@
+org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
+org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier

+ 0 - 400
hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj

@@ -1,400 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Default DaemonProtocolAspect which is used to provide default implementation
- * for all the common daemon methods. If a daemon requires more specialized
- * version of method, it is responsibility of the DaemonClient to introduce the
- * same in woven classes.
- * 
- */
-public aspect DaemonProtocolAspect {
-
-  private boolean DaemonProtocol.ready;
-  
-  @SuppressWarnings("unchecked")
-  private HashMap<Object, List<ControlAction>> DaemonProtocol.actions = 
-    new HashMap<Object, List<ControlAction>>();
-  private static final Log LOG = LogFactory.getLog(
-      DaemonProtocolAspect.class.getName());
-
-  private static FsPermission defaultPermission = new FsPermission(
-     FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
-
-  /**
-   * Set if the daemon process is ready or not, concrete daemon protocol should
-   * implement pointcuts to determine when the daemon is ready and use the
-   * setter to set the ready state.
-   * 
-   * @param ready
-   *          true if the Daemon is ready.
-   */
-  public void DaemonProtocol.setReady(boolean ready) {
-    this.ready = ready;
-  }
-
-  /**
-   * Checks if the daemon process is alive or not.
-   * 
-   * @throws IOException
-   *           if daemon is not alive.
-   */
-  public void DaemonProtocol.ping() throws IOException {
-  }
-
-  /**
-   * Checks if the daemon process is ready to accepting RPC connections after it
-   * finishes initialization. <br/>
-   * 
-   * @return true if ready to accept connection.
-   * 
-   * @throws IOException
-   */
-  public boolean DaemonProtocol.isReady() throws IOException {
-    return ready;
-  }
-
-  /**
-   * Returns the process related information regarding the daemon process. <br/>
-   * 
-   * @return process information.
-   * @throws IOException
-   */
-  public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException {
-    int activeThreadCount = Thread.activeCount();
-    long currentTime = System.currentTimeMillis();
-    long maxmem = Runtime.getRuntime().maxMemory();
-    long freemem = Runtime.getRuntime().freeMemory();
-    long totalmem = Runtime.getRuntime().totalMemory();
-    Map<String, String> envMap = System.getenv();
-    Properties sysProps = System.getProperties();
-    Map<String, String> props = new HashMap<String, String>();
-    for (Map.Entry entry : sysProps.entrySet()) {
-      props.put((String) entry.getKey(), (String) entry.getValue());
-    }
-    ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime,
-        freemem, maxmem, totalmem, envMap, props);
-    return info;
-  }
-
-  public void DaemonProtocol.enable(List<Enum<?>> faults) throws IOException {
-  }
-
-  public void DaemonProtocol.disableAll() throws IOException {
-  }
-
-  public abstract Configuration DaemonProtocol.getDaemonConf()
-    throws IOException;
-
-  public FileStatus DaemonProtocol.getFileStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    p.makeQualified(fs);
-    FileStatus fileStatus = fs.getFileStatus(p);
-    return cloneFileStatus(fileStatus);
-  }
-  
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFile(String path, String fileName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path); 
-    FileSystem fs = getFS(p, local);
-    Path filePath = new Path(path, fileName);
-    fs.create(filePath);
-    if (permission == null) {
-      fs.setPermission(filePath, defaultPermission);
-    } else {
-      fs.setPermission(filePath, permission);
-    }
-    fs.close();
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFolder(String path, String folderName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    Path folderPath = new Path(path, folderName);
-    fs.mkdirs(folderPath);
-    if (permission == null) {
-      fs.setPermission(folderPath, defaultPermission);
-    } else {
-      fs.setPermission(folderPath, permission);
-    }
-    fs.close();
-  }
-
-  public FileStatus[] DaemonProtocol.listStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    FileStatus[] status = fs.listStatus(p);
-    if (status != null) {
-      FileStatus[] result = new FileStatus[status.length];
-      int i = 0;
-      for (FileStatus fileStatus : status) {
-        result[i++] = cloneFileStatus(fileStatus);
-      }
-      return result;
-    }
-    return status;
-  }
-
-  /**
-   * FileStatus object may not be serializable. Clone it into raw FileStatus 
-   * object.
-   */
-  private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) {
-    return new FileStatus(fileStatus.getLen(),
-        fileStatus.isDir(),
-        fileStatus.getReplication(),
-        fileStatus.getBlockSize(),
-        fileStatus.getModificationTime(),
-        fileStatus.getAccessTime(),
-        fileStatus.getPermission(),
-        fileStatus.getOwner(),
-        fileStatus.getGroup(),
-        fileStatus.getPath());
-  }
-
-  private FileSystem DaemonProtocol.getFS(final Path path, final boolean local)
-      throws IOException {
-    FileSystem ret = null;
-    try {
-      ret = UserGroupInformation.getLoginUser().doAs (
-          new PrivilegedExceptionAction<FileSystem>() {
-            public FileSystem run() throws IOException {
-              FileSystem fs = null;
-              if (local) {
-                fs = FileSystem.getLocal(getDaemonConf());
-              } else {
-                fs = path.getFileSystem(getDaemonConf());
-              }
-              return fs;
-            }
-          });
-    } catch (InterruptedException ie) {
-    }
-    return ret;
-  }
-  
-  @SuppressWarnings("unchecked")
-  public ControlAction[] DaemonProtocol.getActions(Writable key) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(key);
-      if(actionList == null) {
-        return new ControlAction[0];
-      } else {
-        return (ControlAction[]) actionList.toArray(new ControlAction[actionList
-                                                                      .size()]);
-      }
-    }
-  }
-
-
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.sendAction(ControlAction action) 
-      throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        actionList = new ArrayList<ControlAction>();
-        actions.put(action.getTarget(), actionList);
-      }
-      actionList.add(action);
-    } 
-  }
- 
-  @SuppressWarnings("unchecked")
-  public boolean DaemonProtocol.isActionPending(ControlAction action) 
-    throws IOException{
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return false;
-      } else {
-        return actionList.contains(action);
-      }
-    }
-  }
-  
-  
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.removeAction(ControlAction action) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return;
-      } else {
-        actionList.remove(action);
-      }
-    }
-  }
-  
-  public void DaemonProtocol.clearActions() throws IOException {
-    synchronized (actions) {
-      actions.clear();
-    }
-  }
-
-  public String DaemonProtocol.getFilePattern() {
-    //We use the environment variable HADOOP_LOGFILE to get the
-    //pattern to use in the search.
-    String logDir = System.getProperty("hadoop.log.dir");
-    String daemonLogPattern = System.getProperty("hadoop.log.file");
-    if(daemonLogPattern == null && daemonLogPattern.isEmpty()) {
-      return "*";
-    }
-    return  logDir+File.separator+daemonLogPattern+"*";
-  }
-
-  public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
-      String[] list) throws IOException {
-    StringBuffer filePattern = new StringBuffer(getFilePattern());    
-    String[] cmd = null;
-    if (list != null) {
-      StringBuffer filterExpPattern = new StringBuffer();
-      int index=0;
-      for (String excludeExp : list) {
-        if (index++ < list.length -1) {
-           filterExpPattern.append("grep -v " + excludeExp + " | ");
-        } else {
-           filterExpPattern.append("grep -v " + excludeExp + " | wc -l");
-        }
-      }
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep "
-                + pattern + " " + filePattern + " | "
-                + filterExpPattern};
-    } else {
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep -c "
-                + pattern + " " + filePattern
-                + " | awk -F: '{s+=$2} END {print s}'" };    
-    }
-    ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
-    shexec.execute();
-    String output = shexec.getOutput();
-    return Integer.parseInt(output.replaceAll("\n", "").trim());
-  }
-
-  /**
-   * This method is used for suspending the process.
-   * @param pid process id
-   * @throws IOException if an I/O error occurs.
-   * @return true if process is suspended otherwise false.
-   */
-  public boolean DaemonProtocol.suspendProcess(String pid) throws IOException {
-    String suspendCmd = getDaemonConf().get("test.system.hdrc.suspend.cmd",
-        "kill -SIGSTOP");
-    String [] command = {"bash", "-c", suspendCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch (Shell.ExitCodeException e) {
-      LOG.warn("suspended process throws an exitcode "
-          + "exception for not being suspended the given process id.");
-      return false;
-    }
-    LOG.info("The suspend process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  /**
-   * This method is used for resuming the process
-   * @param pid process id of suspended process.
-   * @throws IOException if an I/O error occurs.
-   * @return true if suspeneded process is resumed otherwise false.
-   */
-  public boolean DaemonProtocol.resumeProcess(String pid) throws IOException {
-    String resumeCmd = getDaemonConf().get("test.system.hdrc.resume.cmd",
-        "kill -SIGCONT");
-    String [] command = {"bash", "-c", resumeCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch(Shell.ExitCodeException e) {
-        LOG.warn("Resume process throws an exitcode "
-          + "exception for not being resumed the given process id.");
-      return false;
-    }
-    LOG.info("The resume process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  private String DaemonProtocol.user = null;
-  
-  public String DaemonProtocol.getDaemonUser() {
-    return user;
-  }
-  
-  public void DaemonProtocol.setUser(String user) {
-    this.user = user;
-  }
-}
-

+ 0 - 41
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in

@@ -1,41 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-OBJS=main.o runAs.o
-CC=@CC@
-CFLAGS = @CFLAGS@
-BINARY=runAs
-installdir = @prefix@
-
-all: $(OBJS)
-	$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
-
-main.o: runAs.o main.c
-	$(CC) $(CFLAG) -o main.o -c main.c
-
-runAs.o: runAs.h runAs.c
-	$(CC) $(CFLAG) -o runAs.o -c runAs.c
-
-clean:
-	rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
-
-install: all
-	cp $(BINARY) $(installdir)
-
-uninstall:
-	rm -rf $(installdir)/$(BINARY)
-	rm -rf $(BINARY)

+ 0 - 5117
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure

@@ -1,5117 +0,0 @@
-#! /bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.65 for runAs 0.1.
-#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
-#
-# This configure script is free software; the Free Software Foundation
-# gives unlimited permission to copy, distribute and modify it.
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-if test "x$CONFIG_SHELL" = x; then
-  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '\${1+\"\$@\"}'='\"\$@\"'
-  setopt NO_GLOB_SUBST
-else
-  case \`(set -o) 2>/dev/null\` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-"
-  as_required="as_fn_return () { (exit \$1); }
-as_fn_success () { as_fn_return 0; }
-as_fn_failure () { as_fn_return 1; }
-as_fn_ret_success () { return 0; }
-as_fn_ret_failure () { return 1; }
-
-exitcode=0
-as_fn_success || { exitcode=1; echo as_fn_success failed.; }
-as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
-as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
-as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
-if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
-
-else
-  exitcode=1; echo positional parameters were not saved.
-fi
-test x\$exitcode = x0 || exit 1"
-  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
-  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
-  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
-  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
-test \$(( 1 + 1 )) = 2 || exit 1"
-  if (eval "$as_required") 2>/dev/null; then :
-  as_have_required=yes
-else
-  as_have_required=no
-fi
-  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
-
-else
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-as_found=false
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  as_found=:
-  case $as_dir in #(
-	 /*)
-	   for as_base in sh bash ksh sh5; do
-	     # Try only shells that exist, to save several forks.
-	     as_shell=$as_dir/$as_base
-	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
-		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  CONFIG_SHELL=$as_shell as_have_required=yes
-		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  break 2
-fi
-fi
-	   done;;
-       esac
-  as_found=false
-done
-$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
-	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
-  CONFIG_SHELL=$SHELL as_have_required=yes
-fi; }
-IFS=$as_save_IFS
-
-
-      if test "x$CONFIG_SHELL" != x; then :
-  # We cannot yet assume a decent shell, so we have to provide a
-	# neutralization value for shells without unset; and this also
-	# works around shells that cannot unset nonexistent variables.
-	BASH_ENV=/dev/null
-	ENV=/dev/null
-	(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
-	export CONFIG_SHELL
-	exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
-fi
-
-    if test x$as_have_required = xno; then :
-  $as_echo "$0: This script requires a shell more modern than all"
-  $as_echo "$0: the shells that I found on your system."
-  if test x${ZSH_VERSION+set} = xset ; then
-    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
-    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
-  else
-    $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
-$0: including any error possibly output before this
-$0: message. Then install a modern shell, or manually run
-$0: the script under such a shell if you do have one."
-  fi
-  exit 1
-fi
-fi
-fi
-SHELL=${CONFIG_SHELL-/bin/sh}
-export SHELL
-# Unset more variables known to interfere with behavior of common tools.
-CLICOLOR_FORCE= GREP_OPTIONS=
-unset CLICOLOR_FORCE GREP_OPTIONS
-
-## --------------------- ##
-## M4sh Shell Functions. ##
-## --------------------- ##
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-
-  as_lineno_1=$LINENO as_lineno_1a=$LINENO
-  as_lineno_2=$LINENO as_lineno_2a=$LINENO
-  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
-  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
-  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
-  sed -n '
-    p
-    /[$]LINENO/=
-  ' <$as_myself |
-    sed '
-      s/[$]LINENO.*/&-/
-      t lineno
-      b
-      :lineno
-      N
-      :loop
-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
-      t loop
-      s/-\n.*//
-    ' >$as_me.lineno &&
-  chmod +x "$as_me.lineno" ||
-    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensitive to this).
-  . "./$as_me.lineno"
-  # Exit status is that of the last command.
-  exit
-}
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-test -n "$DJDIR" || exec 7<&0 </dev/null
-exec 6>&1
-
-# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
-# so uname gets run too.
-ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
-
-#
-# Initializations.
-#
-ac_default_prefix=/usr/local
-ac_clean_files=
-ac_config_libobj_dir=.
-LIBOBJS=
-cross_compiling=no
-subdirs=
-MFLAGS=
-MAKEFLAGS=
-
-# Identity of this package.
-PACKAGE_NAME='runAs'
-PACKAGE_TARNAME='runas'
-PACKAGE_VERSION='0.1'
-PACKAGE_STRING='runAs 0.1'
-PACKAGE_BUGREPORT=''
-PACKAGE_URL=''
-
-ac_default_prefix=.
-ac_unique_file="main.c"
-# Factoring default headers for most tests.
-ac_includes_default="\
-#include <stdio.h>
-#ifdef HAVE_SYS_TYPES_H
-# include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-#ifdef STDC_HEADERS
-# include <stdlib.h>
-# include <stddef.h>
-#else
-# ifdef HAVE_STDLIB_H
-#  include <stdlib.h>
-# endif
-#endif
-#ifdef HAVE_STRING_H
-# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
-#  include <memory.h>
-# endif
-# include <string.h>
-#endif
-#ifdef HAVE_STRINGS_H
-# include <strings.h>
-#endif
-#ifdef HAVE_INTTYPES_H
-# include <inttypes.h>
-#endif
-#ifdef HAVE_STDINT_H
-# include <stdint.h>
-#endif
-#ifdef HAVE_UNISTD_H
-# include <unistd.h>
-#endif"
-
-ac_subst_vars='SET_MAKE
-LTLIBOBJS
-LIBOBJS
-EGREP
-GREP
-CPP
-OBJEXT
-EXEEXT
-ac_ct_CC
-CPPFLAGS
-LDFLAGS
-CFLAGS
-CC
-target_alias
-host_alias
-build_alias
-LIBS
-ECHO_T
-ECHO_N
-ECHO_C
-DEFS
-mandir
-localedir
-libdir
-psdir
-pdfdir
-dvidir
-htmldir
-infodir
-docdir
-oldincludedir
-includedir
-localstatedir
-sharedstatedir
-sysconfdir
-datadir
-datarootdir
-libexecdir
-sbindir
-bindir
-program_transform_name
-prefix
-exec_prefix
-PACKAGE_URL
-PACKAGE_BUGREPORT
-PACKAGE_STRING
-PACKAGE_VERSION
-PACKAGE_TARNAME
-PACKAGE_NAME
-PATH_SEPARATOR
-SHELL'
-ac_subst_files=''
-ac_user_opts='
-enable_option_checking
-with_home
-'
-      ac_precious_vars='build_alias
-host_alias
-target_alias
-CC
-CFLAGS
-LDFLAGS
-LIBS
-CPPFLAGS
-CPP'
-
-
-# Initialize some variables set by options.
-ac_init_help=
-ac_init_version=false
-ac_unrecognized_opts=
-ac_unrecognized_sep=
-# The variables have the same names as the options, with
-# dashes changed to underlines.
-cache_file=/dev/null
-exec_prefix=NONE
-no_create=
-no_recursion=
-prefix=NONE
-program_prefix=NONE
-program_suffix=NONE
-program_transform_name=s,x,x,
-silent=
-site=
-srcdir=
-verbose=
-x_includes=NONE
-x_libraries=NONE
-
-# Installation directory options.
-# These are left unexpanded so users can "make install exec_prefix=/foo"
-# and all the variables that are supposed to be based on exec_prefix
-# by default will actually change.
-# Use braces instead of parens because sh, perl, etc. also accept them.
-# (The list follows the same order as the GNU Coding Standards.)
-bindir='${exec_prefix}/bin'
-sbindir='${exec_prefix}/sbin'
-libexecdir='${exec_prefix}/libexec'
-datarootdir='${prefix}/share'
-datadir='${datarootdir}'
-sysconfdir='${prefix}/etc'
-sharedstatedir='${prefix}/com'
-localstatedir='${prefix}/var'
-includedir='${prefix}/include'
-oldincludedir='/usr/include'
-docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
-infodir='${datarootdir}/info'
-htmldir='${docdir}'
-dvidir='${docdir}'
-pdfdir='${docdir}'
-psdir='${docdir}'
-libdir='${exec_prefix}/lib'
-localedir='${datarootdir}/locale'
-mandir='${datarootdir}/man'
-
-ac_prev=
-ac_dashdash=
-for ac_option
-do
-  # If the previous option needs an argument, assign it.
-  if test -n "$ac_prev"; then
-    eval $ac_prev=\$ac_option
-    ac_prev=
-    continue
-  fi
-
-  case $ac_option in
-  *=*)	ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
-  *)	ac_optarg=yes ;;
-  esac
-
-  # Accept the important Cygnus configure options, so we can diagnose typos.
-
-  case $ac_dashdash$ac_option in
-  --)
-    ac_dashdash=yes ;;
-
-  -bindir | --bindir | --bindi | --bind | --bin | --bi)
-    ac_prev=bindir ;;
-  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
-    bindir=$ac_optarg ;;
-
-  -build | --build | --buil | --bui | --bu)
-    ac_prev=build_alias ;;
-  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
-    build_alias=$ac_optarg ;;
-
-  -cache-file | --cache-file | --cache-fil | --cache-fi \
-  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
-    ac_prev=cache_file ;;
-  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
-  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
-    cache_file=$ac_optarg ;;
-
-  --config-cache | -C)
-    cache_file=config.cache ;;
-
-  -datadir | --datadir | --datadi | --datad)
-    ac_prev=datadir ;;
-  -datadir=* | --datadir=* | --datadi=* | --datad=*)
-    datadir=$ac_optarg ;;
-
-  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
-  | --dataroo | --dataro | --datar)
-    ac_prev=datarootdir ;;
-  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
-  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
-    datarootdir=$ac_optarg ;;
-
-  -disable-* | --disable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=no ;;
-
-  -docdir | --docdir | --docdi | --doc | --do)
-    ac_prev=docdir ;;
-  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
-    docdir=$ac_optarg ;;
-
-  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
-    ac_prev=dvidir ;;
-  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
-    dvidir=$ac_optarg ;;
-
-  -enable-* | --enable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=\$ac_optarg ;;
-
-  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
-  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
-  | --exec | --exe | --ex)
-    ac_prev=exec_prefix ;;
-  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
-  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
-  | --exec=* | --exe=* | --ex=*)
-    exec_prefix=$ac_optarg ;;
-
-  -gas | --gas | --ga | --g)
-    # Obsolete; use --with-gas.
-    with_gas=yes ;;
-
-  -help | --help | --hel | --he | -h)
-    ac_init_help=long ;;
-  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
-    ac_init_help=recursive ;;
-  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
-    ac_init_help=short ;;
-
-  -host | --host | --hos | --ho)
-    ac_prev=host_alias ;;
-  -host=* | --host=* | --hos=* | --ho=*)
-    host_alias=$ac_optarg ;;
-
-  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
-    ac_prev=htmldir ;;
-  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
-  | --ht=*)
-    htmldir=$ac_optarg ;;
-
-  -includedir | --includedir | --includedi | --included | --include \
-  | --includ | --inclu | --incl | --inc)
-    ac_prev=includedir ;;
-  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
-  | --includ=* | --inclu=* | --incl=* | --inc=*)
-    includedir=$ac_optarg ;;
-
-  -infodir | --infodir | --infodi | --infod | --info | --inf)
-    ac_prev=infodir ;;
-  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
-    infodir=$ac_optarg ;;
-
-  -libdir | --libdir | --libdi | --libd)
-    ac_prev=libdir ;;
-  -libdir=* | --libdir=* | --libdi=* | --libd=*)
-    libdir=$ac_optarg ;;
-
-  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
-  | --libexe | --libex | --libe)
-    ac_prev=libexecdir ;;
-  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
-  | --libexe=* | --libex=* | --libe=*)
-    libexecdir=$ac_optarg ;;
-
-  -localedir | --localedir | --localedi | --localed | --locale)
-    ac_prev=localedir ;;
-  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
-    localedir=$ac_optarg ;;
-
-  -localstatedir | --localstatedir | --localstatedi | --localstated \
-  | --localstate | --localstat | --localsta | --localst | --locals)
-    ac_prev=localstatedir ;;
-  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
-  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
-    localstatedir=$ac_optarg ;;
-
-  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
-    ac_prev=mandir ;;
-  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
-    mandir=$ac_optarg ;;
-
-  -nfp | --nfp | --nf)
-    # Obsolete; use --without-fp.
-    with_fp=no ;;
-
-  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
-  | --no-cr | --no-c | -n)
-    no_create=yes ;;
-
-  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
-  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
-    no_recursion=yes ;;
-
-  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
-  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
-  | --oldin | --oldi | --old | --ol | --o)
-    ac_prev=oldincludedir ;;
-  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
-  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
-  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
-    oldincludedir=$ac_optarg ;;
-
-  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
-    ac_prev=prefix ;;
-  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
-    prefix=$ac_optarg ;;
-
-  -program-prefix | --program-prefix | --program-prefi | --program-pref \
-  | --program-pre | --program-pr | --program-p)
-    ac_prev=program_prefix ;;
-  -program-prefix=* | --program-prefix=* | --program-prefi=* \
-  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
-    program_prefix=$ac_optarg ;;
-
-  -program-suffix | --program-suffix | --program-suffi | --program-suff \
-  | --program-suf | --program-su | --program-s)
-    ac_prev=program_suffix ;;
-  -program-suffix=* | --program-suffix=* | --program-suffi=* \
-  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
-    program_suffix=$ac_optarg ;;
-
-  -program-transform-name | --program-transform-name \
-  | --program-transform-nam | --program-transform-na \
-  | --program-transform-n | --program-transform- \
-  | --program-transform | --program-transfor \
-  | --program-transfo | --program-transf \
-  | --program-trans | --program-tran \
-  | --progr-tra | --program-tr | --program-t)
-    ac_prev=program_transform_name ;;
-  -program-transform-name=* | --program-transform-name=* \
-  | --program-transform-nam=* | --program-transform-na=* \
-  | --program-transform-n=* | --program-transform-=* \
-  | --program-transform=* | --program-transfor=* \
-  | --program-transfo=* | --program-transf=* \
-  | --program-trans=* | --program-tran=* \
-  | --progr-tra=* | --program-tr=* | --program-t=*)
-    program_transform_name=$ac_optarg ;;
-
-  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
-    ac_prev=pdfdir ;;
-  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
-    pdfdir=$ac_optarg ;;
-
-  -psdir | --psdir | --psdi | --psd | --ps)
-    ac_prev=psdir ;;
-  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
-    psdir=$ac_optarg ;;
-
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil)
-    silent=yes ;;
-
-  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
-    ac_prev=sbindir ;;
-  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
-  | --sbi=* | --sb=*)
-    sbindir=$ac_optarg ;;
-
-  -sharedstatedir | --sharedstatedir | --sharedstatedi \
-  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
-  | --sharedst | --shareds | --shared | --share | --shar \
-  | --sha | --sh)
-    ac_prev=sharedstatedir ;;
-  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
-  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
-  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
-  | --sha=* | --sh=*)
-    sharedstatedir=$ac_optarg ;;
-
-  -site | --site | --sit)
-    ac_prev=site ;;
-  -site=* | --site=* | --sit=*)
-    site=$ac_optarg ;;
-
-  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
-    ac_prev=srcdir ;;
-  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
-    srcdir=$ac_optarg ;;
-
-  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
-  | --syscon | --sysco | --sysc | --sys | --sy)
-    ac_prev=sysconfdir ;;
-  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
-  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
-    sysconfdir=$ac_optarg ;;
-
-  -target | --target | --targe | --targ | --tar | --ta | --t)
-    ac_prev=target_alias ;;
-  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
-    target_alias=$ac_optarg ;;
-
-  -v | -verbose | --verbose | --verbos | --verbo | --verb)
-    verbose=yes ;;
-
-  -version | --version | --versio | --versi | --vers | -V)
-    ac_init_version=: ;;
-
-  -with-* | --with-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=\$ac_optarg ;;
-
-  -without-* | --without-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=no ;;
-
-  --x)
-    # Obsolete; use --with-x.
-    with_x=yes ;;
-
-  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
-  | --x-incl | --x-inc | --x-in | --x-i)
-    ac_prev=x_includes ;;
-  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
-  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
-    x_includes=$ac_optarg ;;
-
-  -x-libraries | --x-libraries | --x-librarie | --x-librari \
-  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
-    ac_prev=x_libraries ;;
-  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
-  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
-    x_libraries=$ac_optarg ;;
-
-  -*) as_fn_error "unrecognized option: \`$ac_option'
-Try \`$0 --help' for more information."
-    ;;
-
-  *=*)
-    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
-    # Reject names that are not valid shell variable names.
-    case $ac_envvar in #(
-      '' | [0-9]* | *[!_$as_cr_alnum]* )
-      as_fn_error "invalid variable name: \`$ac_envvar'" ;;
-    esac
-    eval $ac_envvar=\$ac_optarg
-    export $ac_envvar ;;
-
-  *)
-    # FIXME: should be removed in autoconf 3.0.
-    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
-    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
-    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
-    ;;
-
-  esac
-done
-
-if test -n "$ac_prev"; then
-  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
-  as_fn_error "missing argument to $ac_option"
-fi
-
-if test -n "$ac_unrecognized_opts"; then
-  case $enable_option_checking in
-    no) ;;
-    fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
-    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
-  esac
-fi
-
-# Check all directory arguments for consistency.
-for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
-		datadir sysconfdir sharedstatedir localstatedir includedir \
-		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
-do
-  eval ac_val=\$$ac_var
-  # Remove trailing slashes.
-  case $ac_val in
-    */ )
-      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
-      eval $ac_var=\$ac_val;;
-  esac
-  # Be sure to have absolute directory names.
-  case $ac_val in
-    [\\/$]* | ?:[\\/]* )  continue;;
-    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
-  esac
-  as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
-done
-
-# There might be people who depend on the old broken behavior: `$host'
-# used to hold the argument of --host etc.
-# FIXME: To remove some day.
-build=$build_alias
-host=$host_alias
-target=$target_alias
-
-# FIXME: To remove some day.
-if test "x$host_alias" != x; then
-  if test "x$build_alias" = x; then
-    cross_compiling=maybe
-    $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
-    If a cross compiler is detected then cross compile mode will be used." >&2
-  elif test "x$build_alias" != "x$host_alias"; then
-    cross_compiling=yes
-  fi
-fi
-
-ac_tool_prefix=
-test -n "$host_alias" && ac_tool_prefix=$host_alias-
-
-test "$silent" = yes && exec 6>/dev/null
-
-
-ac_pwd=`pwd` && test -n "$ac_pwd" &&
-ac_ls_di=`ls -di .` &&
-ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
-  as_fn_error "working directory cannot be determined"
-test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
-  as_fn_error "pwd does not report name of working directory"
-
-
-# Find the source files, if location was not specified.
-if test -z "$srcdir"; then
-  ac_srcdir_defaulted=yes
-  # Try the directory containing this script, then the parent directory.
-  ac_confdir=`$as_dirname -- "$as_myself" ||
-$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_myself" : 'X\(//\)[^/]' \| \
-	 X"$as_myself" : 'X\(//\)$' \| \
-	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_myself" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  srcdir=$ac_confdir
-  if test ! -r "$srcdir/$ac_unique_file"; then
-    srcdir=..
-  fi
-else
-  ac_srcdir_defaulted=no
-fi
-if test ! -r "$srcdir/$ac_unique_file"; then
-  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
-  as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
-fi
-ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
-ac_abs_confdir=`(
-	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
-	pwd)`
-# When building in place, set srcdir=.
-if test "$ac_abs_confdir" = "$ac_pwd"; then
-  srcdir=.
-fi
-# Remove unnecessary trailing slashes from srcdir.
-# Double slashes in file names in object file debugging info
-# mess up M-x gdb in Emacs.
-case $srcdir in
-*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
-esac
-for ac_var in $ac_precious_vars; do
-  eval ac_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_env_${ac_var}_value=\$${ac_var}
-  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_cv_env_${ac_var}_value=\$${ac_var}
-done
-
-#
-# Report the --help message.
-#
-if test "$ac_init_help" = "long"; then
-  # Omit some internal or obsolete options to make the list less imposing.
-  # This message is too long to be a string in the A/UX 3.1 sh.
-  cat <<_ACEOF
-\`configure' configures runAs 0.1 to adapt to many kinds of systems.
-
-Usage: $0 [OPTION]... [VAR=VALUE]...
-
-To assign environment variables (e.g., CC, CFLAGS...), specify them as
-VAR=VALUE.  See below for descriptions of some of the useful variables.
-
-Defaults for the options are specified in brackets.
-
-Configuration:
-  -h, --help              display this help and exit
-      --help=short        display options specific to this package
-      --help=recursive    display the short help of all the included packages
-  -V, --version           display version information and exit
-  -q, --quiet, --silent   do not print \`checking...' messages
-      --cache-file=FILE   cache test results in FILE [disabled]
-  -C, --config-cache      alias for \`--cache-file=config.cache'
-  -n, --no-create         do not create output files
-      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
-
-Installation directories:
-  --prefix=PREFIX         install architecture-independent files in PREFIX
-                          [$ac_default_prefix]
-  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
-                          [PREFIX]
-
-By default, \`make install' will install all the files in
-\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
-an installation prefix other than \`$ac_default_prefix' using \`--prefix',
-for instance \`--prefix=\$HOME'.
-
-For better control, use the options below.
-
-Fine tuning of the installation directories:
-  --bindir=DIR            user executables [EPREFIX/bin]
-  --sbindir=DIR           system admin executables [EPREFIX/sbin]
-  --libexecdir=DIR        program executables [EPREFIX/libexec]
-  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
-  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
-  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
-  --libdir=DIR            object code libraries [EPREFIX/lib]
-  --includedir=DIR        C header files [PREFIX/include]
-  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
-  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
-  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
-  --infodir=DIR           info documentation [DATAROOTDIR/info]
-  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
-  --mandir=DIR            man documentation [DATAROOTDIR/man]
-  --docdir=DIR            documentation root [DATAROOTDIR/doc/runas]
-  --htmldir=DIR           html documentation [DOCDIR]
-  --dvidir=DIR            dvi documentation [DOCDIR]
-  --pdfdir=DIR            pdf documentation [DOCDIR]
-  --psdir=DIR             ps documentation [DOCDIR]
-_ACEOF
-
-  cat <<\_ACEOF
-_ACEOF
-fi
-
-if test -n "$ac_init_help"; then
-  case $ac_init_help in
-     short | recursive ) echo "Configuration of runAs 0.1:";;
-   esac
-  cat <<\_ACEOF
-
-Optional Packages:
-  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
-  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
---with-home path to hadoop home dir
-
-Some influential environment variables:
-  CC          C compiler command
-  CFLAGS      C compiler flags
-  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
-              nonstandard directory <lib dir>
-  LIBS        libraries to pass to the linker, e.g. -l<library>
-  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
-              you have headers in a nonstandard directory <include dir>
-  CPP         C preprocessor
-
-Use these variables to override the choices made by `configure' or to help
-it to find libraries and programs with nonstandard names/locations.
-
-Report bugs to the package provider.
-_ACEOF
-ac_status=$?
-fi
-
-if test "$ac_init_help" = "recursive"; then
-  # If there are subdirs, report their specific --help.
-  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
-    test -d "$ac_dir" ||
-      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
-      continue
-    ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-    cd "$ac_dir" || { ac_status=$?; continue; }
-    # Check for guested configure.
-    if test -f "$ac_srcdir/configure.gnu"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
-    elif test -f "$ac_srcdir/configure"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure" --help=recursive
-    else
-      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
-    fi || ac_status=$?
-    cd "$ac_pwd" || { ac_status=$?; break; }
-  done
-fi
-
-test -n "$ac_init_help" && exit $ac_status
-if $ac_init_version; then
-  cat <<\_ACEOF
-runAs configure 0.1
-generated by GNU Autoconf 2.65
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This configure script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it.
-_ACEOF
-  exit
-fi
-
-## ------------------------ ##
-## Autoconf initialization. ##
-## ------------------------ ##
-
-# ac_fn_c_try_compile LINENO
-# --------------------------
-# Try to compile conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext
-  if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_compile
-
-# ac_fn_c_try_cpp LINENO
-# ----------------------
-# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_cpp ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-    ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_cpp
-
-# ac_fn_c_try_run LINENO
-# ----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
-# that executables *can* be run.
-ac_fn_c_try_run ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: program exited with status $ac_status" >&5
-       $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-       ac_retval=$ac_status
-fi
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_run
-
-# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists, giving a warning if it cannot be compiled using
-# the include files in INCLUDES and setting the cache variable VAR
-# accordingly.
-ac_fn_c_check_header_mongrel ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
-$as_echo_n "checking $2 usability... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_header_compiler=yes
-else
-  ac_header_compiler=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
-$as_echo "$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
-$as_echo_n "checking $2 presence... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <$2>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  ac_header_preproc=yes
-else
-  ac_header_preproc=no
-fi
-rm -f conftest.err conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
-$as_echo "$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
-  yes:no: )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
-$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-  no:yes:* )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
-$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
-$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
-$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
-$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-esac
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=\$ac_header_compiler"
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_mongrel
-
-# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists and can be compiled using the include files in
-# INCLUDES, setting the cache variable VAR accordingly.
-ac_fn_c_check_header_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_compile
-
-# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
-# -------------------------------------------
-# Tests whether TYPE exists after having included INCLUDES, setting cache
-# variable VAR accordingly.
-ac_fn_c_check_type ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=no"
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof ($2))
-	 return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof (($2)))
-	    return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  eval "$3=yes"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_type
-
-# ac_fn_c_try_link LINENO
-# -----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 $as_test_x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_link
-
-# ac_fn_c_check_func LINENO FUNC VAR
-# ----------------------------------
-# Tests whether FUNC exists, setting the cache variable VAR accordingly
-ac_fn_c_check_func ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
-#define $2 innocuous_$2
-
-/* System header to define __stub macros and hopefully few prototypes,
-    which can conflict with char $2 (); below.
-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-    <limits.h> exists even on freestanding compilers.  */
-
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-
-#undef $2
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $2 ();
-/* The GNU C library defines this for functions which it implements
-    to always fail with ENOSYS.  Some functions are actually named
-    something starting with __ and the normal name is an alias.  */
-#if defined __stub_$2 || defined __stub___$2
-choke me
-#endif
-
-int
-main ()
-{
-return $2 ();
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext \
-    conftest$ac_exeext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_func
-cat >config.log <<_ACEOF
-This file contains any messages produced by compilers while
-running configure, to aid debugging if configure makes a mistake.
-
-It was created by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  $ $0 $@
-
-_ACEOF
-exec 5>>config.log
-{
-cat <<_ASUNAME
-## --------- ##
-## Platform. ##
-## --------- ##
-
-hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
-
-/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
-/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
-/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
-/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
-
-_ASUNAME
-
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    $as_echo "PATH: $as_dir"
-  done
-IFS=$as_save_IFS
-
-} >&5
-
-cat >&5 <<_ACEOF
-
-
-## ----------- ##
-## Core tests. ##
-## ----------- ##
-
-_ACEOF
-
-
-# Keep a trace of the command line.
-# Strip out --no-create and --no-recursion so they do not pile up.
-# Strip out --silent because we don't want to record it for future runs.
-# Also quote any args containing shell meta-characters.
-# Make two passes to allow for proper duplicate-argument suppression.
-ac_configure_args=
-ac_configure_args0=
-ac_configure_args1=
-ac_must_keep_next=false
-for ac_pass in 1 2
-do
-  for ac_arg
-  do
-    case $ac_arg in
-    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
-    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-    | -silent | --silent | --silen | --sile | --sil)
-      continue ;;
-    *\'*)
-      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    case $ac_pass in
-    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
-    2)
-      as_fn_append ac_configure_args1 " '$ac_arg'"
-      if test $ac_must_keep_next = true; then
-	ac_must_keep_next=false # Got value, back to normal.
-      else
-	case $ac_arg in
-	  *=* | --config-cache | -C | -disable-* | --disable-* \
-	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
-	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
-	  | -with-* | --with-* | -without-* | --without-* | --x)
-	    case "$ac_configure_args0 " in
-	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
-	    esac
-	    ;;
-	  -* ) ac_must_keep_next=true ;;
-	esac
-      fi
-      as_fn_append ac_configure_args " '$ac_arg'"
-      ;;
-    esac
-  done
-done
-{ ac_configure_args0=; unset ac_configure_args0;}
-{ ac_configure_args1=; unset ac_configure_args1;}
-
-# When interrupted or exit'd, cleanup temporary files, and complete
-# config.log.  We remove comments because anyway the quotes in there
-# would cause problems or look ugly.
-# WARNING: Use '\'' to represent an apostrophe within the trap.
-# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
-trap 'exit_status=$?
-  # Save into config.log some information that might help in debugging.
-  {
-    echo
-
-    cat <<\_ASBOX
-## ---------------- ##
-## Cache variables. ##
-## ---------------- ##
-_ASBOX
-    echo
-    # The following way of writing the cache mishandles newlines in values,
-(
-  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-  (set) 2>&1 |
-    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      sed -n \
-	"s/'\''/'\''\\\\'\'''\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
-      ;; #(
-    *)
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-)
-    echo
-
-    cat <<\_ASBOX
-## ----------------- ##
-## Output variables. ##
-## ----------------- ##
-_ASBOX
-    echo
-    for ac_var in $ac_subst_vars
-    do
-      eval ac_val=\$$ac_var
-      case $ac_val in
-      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-      esac
-      $as_echo "$ac_var='\''$ac_val'\''"
-    done | sort
-    echo
-
-    if test -n "$ac_subst_files"; then
-      cat <<\_ASBOX
-## ------------------- ##
-## File substitutions. ##
-## ------------------- ##
-_ASBOX
-      echo
-      for ac_var in $ac_subst_files
-      do
-	eval ac_val=\$$ac_var
-	case $ac_val in
-	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-	esac
-	$as_echo "$ac_var='\''$ac_val'\''"
-      done | sort
-      echo
-    fi
-
-    if test -s confdefs.h; then
-      cat <<\_ASBOX
-## ----------- ##
-## confdefs.h. ##
-## ----------- ##
-_ASBOX
-      echo
-      cat confdefs.h
-      echo
-    fi
-    test "$ac_signal" != 0 &&
-      $as_echo "$as_me: caught signal $ac_signal"
-    $as_echo "$as_me: exit $exit_status"
-  } >&5
-  rm -f core *.core core.conftest.* &&
-    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
-    exit $exit_status
-' 0
-for ac_signal in 1 2 13 15; do
-  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
-done
-ac_signal=0
-
-# confdefs.h avoids OS command line length limits that DEFS can exceed.
-rm -f -r conftest* confdefs.h
-
-$as_echo "/* confdefs.h */" > confdefs.h
-
-# Predefined preprocessor variables.
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_NAME "$PACKAGE_NAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_VERSION "$PACKAGE_VERSION"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_STRING "$PACKAGE_STRING"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_URL "$PACKAGE_URL"
-_ACEOF
-
-
-# Let the site file select an alternate cache file if it wants to.
-# Prefer an explicitly selected file to automatically selected ones.
-ac_site_file1=NONE
-ac_site_file2=NONE
-if test -n "$CONFIG_SITE"; then
-  ac_site_file1=$CONFIG_SITE
-elif test "x$prefix" != xNONE; then
-  ac_site_file1=$prefix/share/config.site
-  ac_site_file2=$prefix/etc/config.site
-else
-  ac_site_file1=$ac_default_prefix/share/config.site
-  ac_site_file2=$ac_default_prefix/etc/config.site
-fi
-for ac_site_file in "$ac_site_file1" "$ac_site_file2"
-do
-  test "x$ac_site_file" = xNONE && continue
-  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
-$as_echo "$as_me: loading site script $ac_site_file" >&6;}
-    sed 's/^/| /' "$ac_site_file" >&5
-    . "$ac_site_file"
-  fi
-done
-
-if test -r "$cache_file"; then
-  # Some versions of bash will fail to source /dev/null (special files
-  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
-  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
-$as_echo "$as_me: loading cache $cache_file" >&6;}
-    case $cache_file in
-      [\\/]* | ?:[\\/]* ) . "$cache_file";;
-      *)                      . "./$cache_file";;
-    esac
-  fi
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
-$as_echo "$as_me: creating cache $cache_file" >&6;}
-  >$cache_file
-fi
-
-# Check that the precious variables saved in the cache have kept the same
-# value.
-ac_cache_corrupted=false
-for ac_var in $ac_precious_vars; do
-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
-  eval ac_new_set=\$ac_env_${ac_var}_set
-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
-  eval ac_new_val=\$ac_env_${ac_var}_value
-  case $ac_old_set,$ac_new_set in
-    set,)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,set)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,);;
-    *)
-      if test "x$ac_old_val" != "x$ac_new_val"; then
-	# differences in whitespace do not lead to failure.
-	ac_old_val_w=`echo x $ac_old_val`
-	ac_new_val_w=`echo x $ac_new_val`
-	if test "$ac_old_val_w" != "$ac_new_val_w"; then
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
-$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
-	  ac_cache_corrupted=:
-	else
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
-$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
-	  eval $ac_var=\$ac_old_val
-	fi
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
-$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
-$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
-      fi;;
-  esac
-  # Pass precious variables to config.status.
-  if test "$ac_new_set" = set; then
-    case $ac_new_val in
-    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
-    *) ac_arg=$ac_var=$ac_new_val ;;
-    esac
-    case " $ac_configure_args " in
-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
-      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
-    esac
-  fi
-done
-if $ac_cache_corrupted; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
-$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
-  as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
-fi
-## -------------------- ##
-## Main body of script. ##
-## -------------------- ##
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-
-
-#add new arguments --with-home
-
-# Check whether --with-home was given.
-if test "${with_home+set}" = set; then :
-  withval=$with_home;
-fi
-
-
-ac_config_headers="$ac_config_headers runAs.h"
-
-
-# Checks for programs.
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-if test -n "$ac_tool_prefix"; then
-  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$ac_cv_prog_CC"; then
-  ac_ct_CC=$CC
-  # Extract the first word of "gcc", so it can be a program name with args.
-set dummy gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-else
-  CC="$ac_cv_prog_CC"
-fi
-
-if test -z "$CC"; then
-          if test -n "$ac_tool_prefix"; then
-    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  fi
-fi
-if test -z "$CC"; then
-  # Extract the first word of "cc", so it can be a program name with args.
-set dummy cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-  ac_prog_rejected=no
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
-       ac_prog_rejected=yes
-       continue
-     fi
-    ac_cv_prog_CC="cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-if test $ac_prog_rejected = yes; then
-  # We found a bogon in the path, so make sure we never use it.
-  set dummy $ac_cv_prog_CC
-  shift
-  if test $# != 0; then
-    # We chose a different compiler from the bogus one.
-    # However, it has the same basename, so the bogon will be chosen
-    # first if we set CC to just the basename; use the full file name.
-    shift
-    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
-  fi
-fi
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$CC"; then
-  if test -n "$ac_tool_prefix"; then
-  for ac_prog in cl.exe
-  do
-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-    test -n "$CC" && break
-  done
-fi
-if test -z "$CC"; then
-  ac_ct_CC=$CC
-  for ac_prog in cl.exe
-do
-  # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  test -n "$ac_ct_CC" && break
-done
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-fi
-
-fi
-
-
-test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
-
-# Provide some information about the compiler.
-$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
-set X $ac_compile
-ac_compiler=$2
-for ac_option in --version -v -V -qversion; do
-  { { ac_try="$ac_compiler $ac_option >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    sed '10a\
-... rest of stderr output deleted ...
-         10q' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-  fi
-  rm -f conftest.er1 conftest.err
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-done
-
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
-# Try to create an executable without -o first, disregard a.out.
-# It will help us diagnose broken compilers, and finding out an intuition
-# of exeext.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
-$as_echo_n "checking whether the C compiler works... " >&6; }
-ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
-
-# The possible output files:
-ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
-
-ac_rmfiles=
-for ac_file in $ac_files
-do
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
-  esac
-done
-rm -f $ac_rmfiles
-
-if { { ac_try="$ac_link_default"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link_default") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
-# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
-# in a Makefile.  We should not override ac_cv_exeext if it was cached,
-# so that the user can short-circuit this test for compilers unknown to
-# Autoconf.
-for ac_file in $ac_files ''
-do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
-	;;
-    [ab].out )
-	# We found the default executable, but exeext='' is most
-	# certainly right.
-	break;;
-    *.* )
-	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
-	then :; else
-	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	fi
-	# We set ac_cv_exeext here because the later test for it is not
-	# safe: cross compilers may not add the suffix if given an `-o'
-	# argument, so we may need to know it at that point already.
-	# Even if this section looks crufty: it has the advantage of
-	# actually working.
-	break;;
-    * )
-	break;;
-  esac
-done
-test "$ac_cv_exeext" = no && ac_cv_exeext=
-
-else
-  ac_file=''
-fi
-if test -z "$ac_file"; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-$as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "C compiler cannot create executables
-See \`config.log' for more details." "$LINENO" 5; }; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
-$as_echo_n "checking for C compiler default output file name... " >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
-$as_echo "$ac_file" >&6; }
-ac_exeext=$ac_cv_exeext
-
-rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
-$as_echo_n "checking for suffix of executables... " >&6; }
-if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # If both `conftest.exe' and `conftest' are `present' (well, observable)
-# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
-# work properly (i.e., refer to `conftest.exe'), while it won't with
-# `rm'.
-for ac_file in conftest.exe conftest conftest.*; do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	  break;;
-    * ) break;;
-  esac
-done
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest conftest$ac_cv_exeext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
-$as_echo "$ac_cv_exeext" >&6; }
-
-rm -f conftest.$ac_ext
-EXEEXT=$ac_cv_exeext
-ac_exeext=$EXEEXT
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdio.h>
-int
-main ()
-{
-FILE *f = fopen ("conftest.out", "w");
- return ferror (f) || fclose (f) != 0;
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files="$ac_clean_files conftest.out"
-# Check that the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
-$as_echo_n "checking whether we are cross compiling... " >&6; }
-if test "$cross_compiling" != yes; then
-  { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-  if { ac_try='./conftest$ac_cv_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then
-    cross_compiling=no
-  else
-    if test "$cross_compiling" = maybe; then
-	cross_compiling=yes
-    else
-	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." "$LINENO" 5; }
-    fi
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
-$as_echo "$cross_compiling" >&6; }
-
-rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
-$as_echo_n "checking for suffix of object files... " >&6; }
-if test "${ac_cv_objext+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.o conftest.obj
-if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  for ac_file in conftest.o conftest.obj conftest.*; do
-  test -f "$ac_file" || continue;
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
-    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
-       break;;
-  esac
-done
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest.$ac_cv_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
-$as_echo "$ac_cv_objext" >&6; }
-OBJEXT=$ac_cv_objext
-ac_objext=$OBJEXT
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
-$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-#ifndef __GNUC__
-       choke me
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_compiler_gnu=yes
-else
-  ac_compiler_gnu=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-ac_cv_c_compiler_gnu=$ac_compiler_gnu
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
-$as_echo "$ac_cv_c_compiler_gnu" >&6; }
-if test $ac_compiler_gnu = yes; then
-  GCC=yes
-else
-  GCC=
-fi
-ac_test_CFLAGS=${CFLAGS+set}
-ac_save_CFLAGS=$CFLAGS
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
-$as_echo_n "checking whether $CC accepts -g... " >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_save_c_werror_flag=$ac_c_werror_flag
-   ac_c_werror_flag=yes
-   ac_cv_prog_cc_g=no
-   CFLAGS="-g"
-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-else
-  CFLAGS=""
-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  ac_c_werror_flag=$ac_save_c_werror_flag
-	 CFLAGS="-g"
-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-   ac_c_werror_flag=$ac_save_c_werror_flag
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
-$as_echo "$ac_cv_prog_cc_g" >&6; }
-if test "$ac_test_CFLAGS" = set; then
-  CFLAGS=$ac_save_CFLAGS
-elif test $ac_cv_prog_cc_g = yes; then
-  if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
-  else
-    CFLAGS="-g"
-  fi
-else
-  if test "$GCC" = yes; then
-    CFLAGS="-O2"
-  else
-    CFLAGS=
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
-$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_cv_prog_cc_c89=no
-ac_save_CC=$CC
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdarg.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
-struct buf { int x; };
-FILE * (*rcsopen) (struct buf *, struct stat *, int);
-static char *e (p, i)
-     char **p;
-     int i;
-{
-  return p[i];
-}
-static char *f (char * (*g) (char **, int), char **p, ...)
-{
-  char *s;
-  va_list v;
-  va_start (v,p);
-  s = g (p, va_arg (v,int));
-  va_end (v);
-  return s;
-}
-
-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
-   function prototypes and stuff, but not '\xHH' hex character constants.
-   These don't provoke an error unfortunately, instead are silently treated
-   as 'x'.  The following induces an error, until -std is added to get
-   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
-   array size at least.  It's necessary to write '\x00'==0 to get something
-   that's true only with -std.  */
-int osf4_cc_array ['\x00' == 0 ? 1 : -1];
-
-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
-   inside strings and character constants.  */
-#define FOO(x) 'x'
-int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
-
-int test (int i, double x);
-struct s1 {int (*f) (int a);};
-struct s2 {int (*f) (double a);};
-int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
-int argc;
-char **argv;
-int
-main ()
-{
-return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
-  ;
-  return 0;
-}
-_ACEOF
-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
-	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
-do
-  CC="$ac_save_CC $ac_arg"
-  if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_c89=$ac_arg
-fi
-rm -f core conftest.err conftest.$ac_objext
-  test "x$ac_cv_prog_cc_c89" != "xno" && break
-done
-rm -f conftest.$ac_ext
-CC=$ac_save_CC
-
-fi
-# AC_CACHE_VAL
-case "x$ac_cv_prog_cc_c89" in
-  x)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
-$as_echo "none needed" >&6; } ;;
-  xno)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
-$as_echo "unsupported" >&6; } ;;
-  *)
-    CC="$CC $ac_cv_prog_cc_c89"
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
-$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
-esac
-if test "x$ac_cv_prog_cc_c89" != xno; then :
-
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-# Checks for libraries.
-
-# Checks for header files.
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
-$as_echo_n "checking how to run the C preprocessor... " >&6; }
-# On Suns, sometimes $CPP names a directory.
-if test -n "$CPP" && test -d "$CPP"; then
-  CPP=
-fi
-if test -z "$CPP"; then
-  if test "${ac_cv_prog_CPP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-      # Double quotes because CPP needs to be expanded
-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
-    do
-      ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-  break
-fi
-
-    done
-    ac_cv_prog_CPP=$CPP
-
-fi
-  CPP=$ac_cv_prog_CPP
-else
-  ac_cv_prog_CPP=$CPP
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
-$as_echo "$CPP" >&6; }
-ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
-$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -z "$GREP"; then
-  ac_path_GREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in grep ggrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
-# Check for GNU ac_path_GREP and select it if it is found.
-  # Check for GNU $ac_path_GREP
-case `"$ac_path_GREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'GREP' >> "conftest.nl"
-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_GREP="$ac_path_GREP"
-      ac_path_GREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_GREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_GREP"; then
-    as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_GREP=$GREP
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
-$as_echo "$ac_cv_path_GREP" >&6; }
- GREP="$ac_cv_path_GREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
-$as_echo_n "checking for egrep... " >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
-   then ac_cv_path_EGREP="$GREP -E"
-   else
-     if test -z "$EGREP"; then
-  ac_path_EGREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in egrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
-# Check for GNU ac_path_EGREP and select it if it is found.
-  # Check for GNU $ac_path_EGREP
-case `"$ac_path_EGREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'EGREP' >> "conftest.nl"
-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_EGREP="$ac_path_EGREP"
-      ac_path_EGREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_EGREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_EGREP"; then
-    as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_EGREP=$EGREP
-fi
-
-   fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
-$as_echo "$ac_cv_path_EGREP" >&6; }
- EGREP="$ac_cv_path_EGREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
-$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <float.h>
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdc=yes
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-if test $ac_cv_header_stdc = yes; then
-  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <string.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "memchr" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "free" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
-  if test "$cross_compiling" = yes; then :
-  :
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ctype.h>
-#include <stdlib.h>
-#if ((' ' & 0x0FF) == 0x020)
-# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
-# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
-#else
-# define ISLOWER(c) \
-		   (('a' <= (c) && (c) <= 'i') \
-		     || ('j' <= (c) && (c) <= 'r') \
-		     || ('s' <= (c) && (c) <= 'z'))
-# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
-#endif
-
-#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
-int
-main ()
-{
-  int i;
-  for (i = 0; i < 256; i++)
-    if (XOR (islower (i), ISLOWER (i))
-	|| toupper (i) != TOUPPER (i))
-      return 2;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
-$as_echo "$ac_cv_header_stdc" >&6; }
-if test $ac_cv_header_stdc = yes; then
-
-$as_echo "#define STDC_HEADERS 1" >>confdefs.h
-
-fi
-
-# On IRIX 5.3, sys/types and inttypes.h are conflicting.
-for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
-		  inttypes.h stdint.h unistd.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
-"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-for ac_header in stdlib.h string.h unistd.h fcntl.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-cat >>confdefs.h <<_ACEOF
-#define HADOOP_PREFIX "$with_home"
-_ACEOF
-
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
-$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
-if test "${ac_cv_c_const+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-/* FIXME: Include the comments suggested by Paul. */
-#ifndef __cplusplus
-  /* Ultrix mips cc rejects this.  */
-  typedef int charset[2];
-  const charset cs;
-  /* SunOS 4.1.1 cc rejects this.  */
-  char const *const *pcpcc;
-  char **ppc;
-  /* NEC SVR4.0.2 mips cc rejects this.  */
-  struct point {int x, y;};
-  static struct point const zero = {0,0};
-  /* AIX XL C 1.02.0.0 rejects this.
-     It does not let you subtract one const X* pointer from another in
-     an arm of an if-expression whose if-part is not a constant
-     expression */
-  const char *g = "string";
-  pcpcc = &g + (g ? g-g : 0);
-  /* HPUX 7.0 cc rejects these. */
-  ++pcpcc;
-  ppc = (char**) pcpcc;
-  pcpcc = (char const *const *) ppc;
-  { /* SCO 3.2v4 cc rejects this.  */
-    char *t;
-    char const *s = 0 ? (char *) 0 : (char const *) 0;
-
-    *t++ = 0;
-    if (s) return 0;
-  }
-  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
-    int x[] = {25, 17};
-    const int *foo = &x[0];
-    ++foo;
-  }
-  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
-    typedef const int *iptr;
-    iptr p = 0;
-    ++p;
-  }
-  { /* AIX XL C 1.02.0.0 rejects this saying
-       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
-    struct s { int j; const int *ap[3]; };
-    struct s *b; b->j = 5;
-  }
-  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
-    const int foo = 10;
-    if (!foo) return 0;
-  }
-  return !cs[0] && !zero.x;
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_c_const=yes
-else
-  ac_cv_c_const=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
-$as_echo "$ac_cv_c_const" >&6; }
-if test $ac_cv_c_const = no; then
-
-$as_echo "#define const /**/" >>confdefs.h
-
-fi
-
-ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default"
-if test "x$ac_cv_type_pid_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define pid_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default"
-if test "x$ac_cv_type_mode_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define mode_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
-if test "x$ac_cv_type_size_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define size_t unsigned int
-_ACEOF
-
-fi
-
-
-# Checks for library functions.
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5
-$as_echo_n "checking for GNU libc compatible malloc... " >&6; }
-if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_malloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *malloc ();
-#endif
-
-int
-main ()
-{
-return ! malloc (0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_malloc_0_nonnull=yes
-else
-  ac_cv_func_malloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_malloc_0_nonnull" >&6; }
-if test $ac_cv_func_malloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_MALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_MALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" malloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define malloc rpl_malloc" >>confdefs.h
-
-fi
-
-
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5
-$as_echo_n "checking for GNU libc compatible realloc... " >&6; }
-if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_realloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *realloc ();
-#endif
-
-int
-main ()
-{
-return ! realloc (0, 0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_realloc_0_nonnull=yes
-else
-  ac_cv_func_realloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_realloc_0_nonnull" >&6; }
-if test $ac_cv_func_realloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_REALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_REALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" realloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS realloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define realloc rpl_realloc" >>confdefs.h
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5
-$as_echo_n "checking for uid_t in sys/types.h... " >&6; }
-if test "${ac_cv_type_uid_t+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <sys/types.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "uid_t" >/dev/null 2>&1; then :
-  ac_cv_type_uid_t=yes
-else
-  ac_cv_type_uid_t=no
-fi
-rm -f conftest*
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5
-$as_echo "$ac_cv_type_uid_t" >&6; }
-if test $ac_cv_type_uid_t = no; then
-
-$as_echo "#define uid_t int" >>confdefs.h
-
-
-$as_echo "#define gid_t int" >>confdefs.h
-
-fi
-
-for ac_header in unistd.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default"
-if test "x$ac_cv_header_unistd_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_UNISTD_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5
-$as_echo_n "checking for working chown... " >&6; }
-if test "${ac_cv_func_chown_works+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_chown_works=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$ac_includes_default
-#include <fcntl.h>
-
-int
-main ()
-{
-  char *f = "conftest.chown";
-  struct stat before, after;
-
-  if (creat (f, 0600) < 0)
-    return 1;
-  if (stat (f, &before) < 0)
-    return 1;
-  if (chown (f, (uid_t) -1, (gid_t) -1) == -1)
-    return 1;
-  if (stat (f, &after) < 0)
-    return 1;
-  return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_chown_works=yes
-else
-  ac_cv_func_chown_works=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-rm -f conftest.chown
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5
-$as_echo "$ac_cv_func_chown_works" >&6; }
-if test $ac_cv_func_chown_works = yes; then
-
-$as_echo "#define HAVE_CHOWN 1" >>confdefs.h
-
-fi
-
-for ac_func in strerror memset mkdir rmdir strdup
-do :
-  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
-ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-done
-
-
-ac_config_files="$ac_config_files Makefile"
-
-cat >confcache <<\_ACEOF
-# This file is a shell script that caches the results of configure
-# tests run on this system so they can be shared between configure
-# scripts and configure runs, see configure's option --config-cache.
-# It is not useful on other systems.  If it contains results you don't
-# want to keep, you may remove or edit it.
-#
-# config.status only pays attention to the cache file if you give it
-# the --recheck option to rerun configure.
-#
-# `ac_cv_env_foo' variables (set or unset) will be overridden when
-# loading this file, other *unset* `ac_cv_foo' will be assigned the
-# following values.
-
-_ACEOF
-
-# The following way of writing the cache mishandles newlines in values,
-# but we know of no workaround that is simple, portable, and efficient.
-# So, we kill variables containing newlines.
-# Ultrix sh set writes to stderr and can't be redirected directly,
-# and sets the high bit in the cache file unless we assign to the vars.
-(
-  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-
-  (set) 2>&1 |
-    case $as_nl`(ac_space=' '; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      # `set' does not quote correctly, so add quotes: double-quote
-      # substitution turns \\\\ into \\, and sed turns \\ into \.
-      sed -n \
-	"s/'/'\\\\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
-      ;; #(
-    *)
-      # `set' quotes correctly as required by POSIX, so do not add quotes.
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-) |
-  sed '
-     /^ac_cv_env_/b end
-     t clear
-     :clear
-     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
-     t end
-     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
-     :end' >>confcache
-if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
-  if test -w "$cache_file"; then
-    test "x$cache_file" != "x/dev/null" &&
-      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
-$as_echo "$as_me: updating cache $cache_file" >&6;}
-    cat confcache >$cache_file
-  else
-    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
-$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
-  fi
-fi
-rm -f confcache
-
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-# Let make expand exec_prefix.
-test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
-
-DEFS=-DHAVE_CONFIG_H
-
-ac_libobjs=
-ac_ltlibobjs=
-for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
-  # 1. Remove the extension, and $U if already installed.
-  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
-  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
-  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
-  #    will be set to the directory where LIBOBJS objects are built.
-  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
-  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
-done
-LIBOBJS=$ac_libobjs
-
-LTLIBOBJS=$ac_ltlibobjs
-
-
-
-: ${CONFIG_STATUS=./config.status}
-ac_write_fail=0
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
-$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
-as_write_fail=0
-cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
-#! $SHELL
-# Generated by $as_me.
-# Run this file to recreate the current configuration.
-# Compiler output produced by configure, useful for debugging
-# configure, is in config.log if it exists.
-
-debug=false
-ac_cs_recheck=false
-ac_cs_silent=false
-
-SHELL=\${CONFIG_SHELL-$SHELL}
-export SHELL
-_ASEOF
-cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-exec 6>&1
-## ----------------------------------- ##
-## Main body of $CONFIG_STATUS script. ##
-## ----------------------------------- ##
-_ASEOF
-test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# Save the log message, to keep $0 and so on meaningful, and to
-# report actual input values of CONFIG_FILES etc. instead of their
-# values after options handling.
-ac_log="
-This file was extended by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  CONFIG_FILES    = $CONFIG_FILES
-  CONFIG_HEADERS  = $CONFIG_HEADERS
-  CONFIG_LINKS    = $CONFIG_LINKS
-  CONFIG_COMMANDS = $CONFIG_COMMANDS
-  $ $0 $@
-
-on `(hostname || uname -n) 2>/dev/null | sed 1q`
-"
-
-_ACEOF
-
-case $ac_config_files in *"
-"*) set x $ac_config_files; shift; ac_config_files=$*;;
-esac
-
-case $ac_config_headers in *"
-"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
-esac
-
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-# Files that config.status was made for.
-config_files="$ac_config_files"
-config_headers="$ac_config_headers"
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-ac_cs_usage="\
-\`$as_me' instantiates files and other configuration actions
-from templates according to the current configuration.  Unless the files
-and actions are specified as TAGs, all are instantiated by default.
-
-Usage: $0 [OPTION]... [TAG]...
-
-  -h, --help       print this help, then exit
-  -V, --version    print version number and configuration settings, then exit
-      --config     print configuration, then exit
-  -q, --quiet, --silent
-                   do not print progress messages
-  -d, --debug      don't remove temporary files
-      --recheck    update $as_me by reconfiguring in the same conditions
-      --file=FILE[:TEMPLATE]
-                   instantiate the configuration file FILE
-      --header=FILE[:TEMPLATE]
-                   instantiate the configuration header FILE
-
-Configuration files:
-$config_files
-
-Configuration headers:
-$config_headers
-
-Report bugs to the package provider."
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
-ac_cs_version="\\
-runAs config.status 0.1
-configured by $0, generated by GNU Autoconf 2.65,
-  with options \\"\$ac_cs_config\\"
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This config.status script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it."
-
-ac_pwd='$ac_pwd'
-srcdir='$srcdir'
-test -n "\$AWK" || AWK=awk
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# The default lists apply if the user does not specify any file.
-ac_need_defaults=:
-while test $# != 0
-do
-  case $1 in
-  --*=*)
-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
-    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
-    ac_shift=:
-    ;;
-  *)
-    ac_option=$1
-    ac_optarg=$2
-    ac_shift=shift
-    ;;
-  esac
-
-  case $ac_option in
-  # Handling of the options.
-  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
-    ac_cs_recheck=: ;;
-  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
-    $as_echo "$ac_cs_version"; exit ;;
-  --config | --confi | --conf | --con | --co | --c )
-    $as_echo "$ac_cs_config"; exit ;;
-  --debug | --debu | --deb | --de | --d | -d )
-    debug=: ;;
-  --file | --fil | --fi | --f )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_FILES " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --header | --heade | --head | --hea )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --he | --h)
-    # Conflict between --help and --header
-    as_fn_error "ambiguous option: \`$1'
-Try \`$0 --help' for more information.";;
-  --help | --hel | -h )
-    $as_echo "$ac_cs_usage"; exit ;;
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil | --si | --s)
-    ac_cs_silent=: ;;
-
-  # This is an error.
-  -*) as_fn_error "unrecognized option: \`$1'
-Try \`$0 --help' for more information." ;;
-
-  *) as_fn_append ac_config_targets " $1"
-     ac_need_defaults=false ;;
-
-  esac
-  shift
-done
-
-ac_configure_extra_args=
-
-if $ac_cs_silent; then
-  exec 6>/dev/null
-  ac_configure_extra_args="$ac_configure_extra_args --silent"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-if \$ac_cs_recheck; then
-  set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
-  shift
-  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
-  CONFIG_SHELL='$SHELL'
-  export CONFIG_SHELL
-  exec "\$@"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-exec 5>>config.log
-{
-  echo
-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
-## Running $as_me. ##
-_ASBOX
-  $as_echo "$ac_log"
-} >&5
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-
-# Handling of arguments.
-for ac_config_target in $ac_config_targets
-do
-  case $ac_config_target in
-    "runAs.h") CONFIG_HEADERS="$CONFIG_HEADERS runAs.h" ;;
-    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
-
-  *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
-  esac
-done
-
-
-# If the user did not use the arguments to specify the items to instantiate,
-# then the envvar interface is used.  Set only those that are not.
-# We use the long form for the default assignment because of an extremely
-# bizarre bug on SunOS 4.1.3.
-if $ac_need_defaults; then
-  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
-  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
-fi
-
-# Have a temporary directory for convenience.  Make it in the build tree
-# simply because there is no reason against having it here, and in addition,
-# creating and moving files from /tmp can sometimes cause problems.
-# Hook for its removal unless debugging.
-# Note that there is a small window in which the directory will not be cleaned:
-# after its creation but before its name has been assigned to `$tmp'.
-$debug ||
-{
-  tmp=
-  trap 'exit_status=$?
-  { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
-' 0
-  trap 'as_fn_exit 1' 1 2 13 15
-}
-# Create a (secure) tmp directory for tmp files.
-
-{
-  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
-  test -n "$tmp" && test -d "$tmp"
-}  ||
-{
-  tmp=./conf$$-$RANDOM
-  (umask 077 && mkdir "$tmp")
-} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
-
-# Set up the scripts for CONFIG_FILES section.
-# No need to generate them if there are no CONFIG_FILES.
-# This happens for instance with `./config.status config.h'.
-if test -n "$CONFIG_FILES"; then
-
-
-ac_cr=`echo X | tr X '\015'`
-# On cygwin, bash can eat \r inside `` if the user requested igncr.
-# But we know of no other shell where ac_cr would be empty at this
-# point, so we can use a bashism as a fallback.
-if test "x$ac_cr" = x; then
-  eval ac_cr=\$\'\\r\'
-fi
-ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
-if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
-  ac_cs_awk_cr='\r'
-else
-  ac_cs_awk_cr=$ac_cr
-fi
-
-echo 'BEGIN {' >"$tmp/subs1.awk" &&
-_ACEOF
-
-
-{
-  echo "cat >conf$$subs.awk <<_ACEOF" &&
-  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
-  echo "_ACEOF"
-} >conf$$subs.sh ||
-  as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
-ac_delim='%!_!# '
-for ac_last_try in false false false false false :; do
-  . ./conf$$subs.sh ||
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-
-  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
-  if test $ac_delim_n = $ac_delim_num; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-rm -f conf$$subs.sh
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
-_ACEOF
-sed -n '
-h
-s/^/S["/; s/!.*/"]=/
-p
-g
-s/^[^!]*!//
-:repl
-t repl
-s/'"$ac_delim"'$//
-t delim
-:nl
-h
-s/\(.\{148\}\)..*/\1/
-t more1
-s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
-p
-n
-b repl
-:more1
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t nl
-:delim
-h
-s/\(.\{148\}\)..*/\1/
-t more2
-s/["\\]/\\&/g; s/^/"/; s/$/"/
-p
-b
-:more2
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t delim
-' <conf$$subs.awk | sed '
-/^[^""]/{
-  N
-  s/\n//
-}
-' >>$CONFIG_STATUS || ac_write_fail=1
-rm -f conf$$subs.awk
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACAWK
-cat >>"\$tmp/subs1.awk" <<_ACAWK &&
-  for (key in S) S_is_set[key] = 1
-  FS = ""
-
-}
-{
-  line = $ 0
-  nfields = split(line, field, "@")
-  substed = 0
-  len = length(field[1])
-  for (i = 2; i < nfields; i++) {
-    key = field[i]
-    keylen = length(key)
-    if (S_is_set[key]) {
-      value = S[key]
-      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
-      len += length(value) + length(field[++i])
-      substed = 1
-    } else
-      len += 1 + keylen
-  }
-
-  print line
-}
-
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
-  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
-else
-  cat
-fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
-  || as_fn_error "could not setup config files machinery" "$LINENO" 5
-_ACEOF
-
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
-# trailing colons and then remove the whole line if VPATH becomes empty
-# (actually we leave an empty line to preserve line numbers).
-if test "x$srcdir" = x.; then
-  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[	 ]*\):*/\1/
-s/:*$//
-s/^[^=]*=[	 ]*$//
-}'
-fi
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-fi # test -n "$CONFIG_FILES"
-
-# Set up the scripts for CONFIG_HEADERS section.
-# No need to generate them if there are no CONFIG_HEADERS.
-# This happens for instance with `./config.status Makefile'.
-if test -n "$CONFIG_HEADERS"; then
-cat >"$tmp/defines.awk" <<\_ACAWK ||
-BEGIN {
-_ACEOF
-
-# Transform confdefs.h into an awk script `defines.awk', embedded as
-# here-document in config.status, that substitutes the proper values into
-# config.h.in to produce config.h.
-
-# Create a delimiter string that does not exist in confdefs.h, to ease
-# handling of long lines.
-ac_delim='%!_!# '
-for ac_last_try in false false :; do
-  ac_t=`sed -n "/$ac_delim/p" confdefs.h`
-  if test -z "$ac_t"; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-
-# For the awk script, D is an array of macro values keyed by name,
-# likewise P contains macro parameters if any.  Preserve backslash
-# newline sequences.
-
-ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
-sed -n '
-s/.\{148\}/&'"$ac_delim"'/g
-t rset
-:rset
-s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
-t def
-d
-:def
-s/\\$//
-t bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3"/p
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
-d
-:bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3\\\\\\n"\\/p
-t cont
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
-t cont
-d
-:cont
-n
-s/.\{148\}/&'"$ac_delim"'/g
-t clear
-:clear
-s/\\$//
-t bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/"/p
-d
-:bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
-b cont
-' <confdefs.h | sed '
-s/'"$ac_delim"'/"\\\
-"/g' >>$CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  for (key in D) D_is_set[key] = 1
-  FS = ""
-}
-/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
-  line = \$ 0
-  split(line, arg, " ")
-  if (arg[1] == "#") {
-    defundef = arg[2]
-    mac1 = arg[3]
-  } else {
-    defundef = substr(arg[1], 2)
-    mac1 = arg[2]
-  }
-  split(mac1, mac2, "(") #)
-  macro = mac2[1]
-  prefix = substr(line, 1, index(line, defundef) - 1)
-  if (D_is_set[macro]) {
-    # Preserve the white space surrounding the "#".
-    print prefix "define", macro P[macro] D[macro]
-    next
-  } else {
-    # Replace #undef with comments.  This is necessary, for example,
-    # in the case of _POSIX_SOURCE, which is predefined and required
-    # on some systems where configure will not decide to define it.
-    if (defundef == "undef") {
-      print "/*", prefix defundef, macro, "*/"
-      next
-    }
-  }
-}
-{ print }
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-  as_fn_error "could not setup config headers machinery" "$LINENO" 5
-fi # test -n "$CONFIG_HEADERS"
-
-
-eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    "
-shift
-for ac_tag
-do
-  case $ac_tag in
-  :[FHLC]) ac_mode=$ac_tag; continue;;
-  esac
-  case $ac_mode$ac_tag in
-  :[FHL]*:*);;
-  :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
-  :[FH]-) ac_tag=-:-;;
-  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
-  esac
-  ac_save_IFS=$IFS
-  IFS=:
-  set x $ac_tag
-  IFS=$ac_save_IFS
-  shift
-  ac_file=$1
-  shift
-
-  case $ac_mode in
-  :L) ac_source=$1;;
-  :[FH])
-    ac_file_inputs=
-    for ac_f
-    do
-      case $ac_f in
-      -) ac_f="$tmp/stdin";;
-      *) # Look for the file first in the build tree, then in the source tree
-	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
-	 # because $ac_f cannot contain `:'.
-	 test -f "$ac_f" ||
-	   case $ac_f in
-	   [\\/$]*) false;;
-	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
-	   esac ||
-	   as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
-      esac
-      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
-      as_fn_append ac_file_inputs " '$ac_f'"
-    done
-
-    # Let's still pretend it is `configure' which instantiates (i.e., don't
-    # use $as_me), people would be surprised to read:
-    #    /* config.h.  Generated by config.status.  */
-    configure_input='Generated from '`
-	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
-	`' by configure.'
-    if test x"$ac_file" != x-; then
-      configure_input="$ac_file.  $configure_input"
-      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
-$as_echo "$as_me: creating $ac_file" >&6;}
-    fi
-    # Neutralize special characters interpreted by sed in replacement strings.
-    case $configure_input in #(
-    *\&* | *\|* | *\\* )
-       ac_sed_conf_input=`$as_echo "$configure_input" |
-       sed 's/[\\\\&|]/\\\\&/g'`;; #(
-    *) ac_sed_conf_input=$configure_input;;
-    esac
-
-    case $ac_tag in
-    *:-:* | *:-) cat >"$tmp/stdin" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
-    esac
-    ;;
-  esac
-
-  ac_dir=`$as_dirname -- "$ac_file" ||
-$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$ac_file" : 'X\(//\)[^/]' \| \
-	 X"$ac_file" : 'X\(//\)$' \| \
-	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$ac_file" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  as_dir="$ac_dir"; as_fn_mkdir_p
-  ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-
-  case $ac_mode in
-  :F)
-  #
-  # CONFIG_FILE
-  #
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# If the template does not know about datarootdir, expand it.
-# FIXME: This hack should be removed a few years after 2.60.
-ac_datarootdir_hack=; ac_datarootdir_seen=
-ac_sed_dataroot='
-/datarootdir/ {
-  p
-  q
-}
-/@datadir@/p
-/@docdir@/p
-/@infodir@/p
-/@localedir@/p
-/@mandir@/p'
-case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
-*datarootdir*) ac_datarootdir_seen=yes;;
-*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
-$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  ac_datarootdir_hack='
-  s&@datadir@&$datadir&g
-  s&@docdir@&$docdir&g
-  s&@infodir@&$infodir&g
-  s&@localedir@&$localedir&g
-  s&@mandir@&$mandir&g
-  s&\\\${datarootdir}&$datarootdir&g' ;;
-esac
-_ACEOF
-
-# Neutralize VPATH when `$srcdir' = `.'.
-# Shell code in configure.ac might set extrasub.
-# FIXME: do we really want to maintain this feature?
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_sed_extra="$ac_vpsub
-$extrasub
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-:t
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s|@configure_input@|$ac_sed_conf_input|;t t
-s&@top_builddir@&$ac_top_builddir_sub&;t t
-s&@top_build_prefix@&$ac_top_build_prefix&;t t
-s&@srcdir@&$ac_srcdir&;t t
-s&@abs_srcdir@&$ac_abs_srcdir&;t t
-s&@top_srcdir@&$ac_top_srcdir&;t t
-s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
-s&@builddir@&$ac_builddir&;t t
-s&@abs_builddir@&$ac_abs_builddir&;t t
-s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
-$ac_datarootdir_hack
-"
-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
-
-test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
-  { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
-  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&5
-$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&2;}
-
-  rm -f "$tmp/stdin"
-  case $ac_file in
-  -) cat "$tmp/out" && rm -f "$tmp/out";;
-  *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
-  esac \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
- ;;
-  :H)
-  #
-  # CONFIG_HEADER
-  #
-  if test x"$ac_file" != x-; then
-    {
-      $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
-    } >"$tmp/config.h" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5
-    if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
-      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
-$as_echo "$as_me: $ac_file is unchanged" >&6;}
-    else
-      rm -f "$ac_file"
-      mv "$tmp/config.h" "$ac_file" \
-	|| as_fn_error "could not create $ac_file" "$LINENO" 5
-    fi
-  else
-    $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
-      || as_fn_error "could not create -" "$LINENO" 5
-  fi
- ;;
-
-
-  esac
-
-done # for ac_tag
-
-
-as_fn_exit 0
-_ACEOF
-ac_clean_files=$ac_clean_files_save
-
-test $ac_write_fail = 0 ||
-  as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
-
-
-# configure is writing to config.log, and then calls config.status.
-# config.status does its own redirection, appending to config.log.
-# Unfortunately, on DOS this fails, as config.log is still kept open
-# by configure, so config.status won't be able to write to it; its
-# output is simply discarded.  So we exec the FD to /dev/null,
-# effectively closing config.log, so it can be properly (re)opened and
-# appended to by config.status.  When coming back to configure, we
-# need to make the FD available again.
-if test "$no_create" != yes; then
-  ac_cs_success=:
-  ac_config_status_args=
-  test "$silent" = yes &&
-    ac_config_status_args="$ac_config_status_args --quiet"
-  exec 5>/dev/null
-  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
-  exec 5>>config.log
-  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
-  # would make configure fail if this is the last instruction.
-  $ac_cs_success || as_fn_exit $?
-fi
-if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
-$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
-$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
-if test "${ac_cv_header_stdbool_h+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-#include <stdbool.h>
-#ifndef bool
- "error: bool is not defined"
-#endif
-#ifndef false
- "error: false is not defined"
-#endif
-#if false
- "error: false is not 0"
-#endif
-#ifndef true
- "error: true is not defined"
-#endif
-#if true != 1
- "error: true is not 1"
-#endif
-#ifndef __bool_true_false_are_defined
- "error: __bool_true_false_are_defined is not defined"
-#endif
-
-	struct s { _Bool s: 1; _Bool t; } s;
-
-	char a[true == 1 ? 1 : -1];
-	char b[false == 0 ? 1 : -1];
-	char c[__bool_true_false_are_defined == 1 ? 1 : -1];
-	char d[(bool) 0.5 == true ? 1 : -1];
-	bool e = &s;
-	char f[(_Bool) 0.0 == false ? 1 : -1];
-	char g[true];
-	char h[sizeof (_Bool)];
-	char i[sizeof s.t];
-	enum { j = false, k = true, l = false * true, m = true * 256 };
-	/* The following fails for
-	   HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
-	_Bool n[m];
-	char o[sizeof n == m * sizeof n[0] ? 1 : -1];
-	char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
-#	if defined __xlc__ || defined __GNUC__
-	 /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0
-	    reported by James Lemley on 2005-10-05; see
-	    http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html
-	    This test is not quite right, since xlc is allowed to
-	    reject this program, as the initializer for xlcbug is
-	    not one of the forms that C requires support for.
-	    However, doing the test right would require a runtime
-	    test, and that would make cross-compilation harder.
-	    Let us hope that IBM fixes the xlc bug, and also adds
-	    support for this kind of constant expression.  In the
-	    meantime, this test will reject xlc, which is OK, since
-	    our stdbool.h substitute should suffice.  We also test
-	    this with GCC, where it should work, to detect more
-	    quickly whether someone messes up the test in the
-	    future.  */
-	 char digs[] = "0123456789";
-	 int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1);
-#	endif
-	/* Catch a bug in an HP-UX C compiler.  See
-	   http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
-	   http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
-	 */
-	_Bool q = true;
-	_Bool *pq = &q;
-
-int
-main ()
-{
-
-	*pq |= q;
-	*pq |= ! q;
-	/* Refer to every declared value, to avoid compiler optimizations.  */
-	return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
-		+ !m + !n + !o + !p + !q + !pq);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdbool_h=yes
-else
-  ac_cv_header_stdbool_h=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
-$as_echo "$ac_cv_header_stdbool_h" >&6; }
-ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
-if test "x$ac_cv_type__Bool" = x""yes; then :
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE__BOOL 1
-_ACEOF
-
-
-fi
-
-if test $ac_cv_header_stdbool_h = yes; then
-
-$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
-
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
-$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
-set x ${MAKE-make}
-ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
-if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat >conftest.make <<\_ACEOF
-SHELL = /bin/sh
-all:
-	@echo '@@@%%%=$(MAKE)=@@@%%%'
-_ACEOF
-# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
-case `${MAKE-make} -f conftest.make 2>/dev/null` in
-  *@@@%%%=?*=@@@%%%*)
-    eval ac_cv_prog_make_${ac_make}_set=yes;;
-  *)
-    eval ac_cv_prog_make_${ac_make}_set=no;;
-esac
-rm -f conftest.make
-fi
-if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-  SET_MAKE=
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-  SET_MAKE="MAKE=${MAKE-make}"
-fi
-

+ 0 - 65
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac

@@ -1,65 +0,0 @@
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AC_PREREQ(2.59)
-AC_INIT([runAs],[0.1])
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-AC_PREFIX_DEFAULT(.)
-
-#add new arguments --with-home
-AC_ARG_WITH(home,[--with-home path to hadoop home dir])
-AC_CONFIG_SRCDIR([main.c])
-AC_CONFIG_HEADER([runAs.h])
-
-# Checks for programs.
-AC_PROG_CC
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_HEADER_STDC
-AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-AC_DEFINE_UNQUOTED(HADOOP_PREFIX,"$with_home")
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_MODE_T
-AC_TYPE_SIZE_T
-
-# Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-AC_FUNC_CHOWN
-AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-AC_HEADER_STDBOOL
-AC_PROG_MAKE_SET

+ 0 - 59
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/**
- * The binary would be accepting the command of following format:
- * cluster-controller user hostname hadoop-daemon.sh-command
- */
-int main(int argc, char **argv) {
-  int errorcode;
-  char *user;
-  char *hostname;
-  char *command;
-  struct passwd user_detail;
-  int i = 1;
-  /*
-   * Minimum number of arguments required for the binary to perform.
-   */
-  if (argc < 4) {
-    fprintf(stderr, "Invalid number of arguments passed to the binary\n");
-    return INVALID_ARGUMENT_NUMER;
-  }
-
-  user = argv[1];
-  if (user == NULL) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (getuserdetail(user, &user_detail) != 0) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
-      fprintf(stderr, "Cannot run tasks as super user\n");
-      return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
-  }
-
-  hostname = argv[2];
-  command = argv[3];
-  return process_controller_command(user, hostname, command);
-}

+ 0 - 111
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c

@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetail(char *user, struct passwd *user_detail) {
-  struct passwd *tempPwdPtr;
-  int size = sysconf(_SC_GETPW_R_SIZE_MAX);
-  char pwdbuffer[size];
-  if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
-    fprintf(stderr, "Invalid user provided to getpwnam\n");
-    return -1;
-  }
-  return 0;
-}
-
-/**
- * Function to switch the user identity and set the appropriate 
- * group control as the user specified in the argument.
- */
-int switchuser(char *user) {
-  //populate the user details
-  struct passwd user_detail;
-  if ((getuserdetail(user, &user_detail)) != 0) {
-    return INVALID_USER_NAME;
-  }
-  //set the right supplementary groups for the user.
-  if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
-    fprintf(stderr, "Init groups call for the user : %s failed\n",
-        user_detail.pw_name);
-    return INITGROUPS_FAILED;
-  }
-  errno = 0;
-  //switch the group.
-  setgid(user_detail.pw_gid);
-  if (errno != 0) {
-    fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //swith the user
-  setuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //set the effective user id.
-  seteuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  return 0;
-}
-
-/*
- * Top level method which processes a cluster management
- * command.
- */
-int process_cluster_command(char * user,  char * node , char *command) {
-  char *finalcommandstr;
-  int len;
-  int errorcode = 0;
-  if (strncmp(command, "", strlen(command)) == 0) {
-    fprintf(stderr, "Invalid command passed\n");
-    return INVALID_COMMAND_PASSED;
-  }
-  len = STRLEN + strlen(command);
-  finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
-  snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_PREFIX,
-      command);
-  finalcommandstr[len + 1] = '\0';
-  errorcode = switchuser(user);
-  if (errorcode != 0) {
-    fprintf(stderr, "switch user failed\n");
-    return errorcode;
-  }
-  errno = 0;
-  execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
-  if (errno != 0) {
-    fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
-  }
-  return 0;
-}
-
-/*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char * node, char *command) {
-  return process_cluster_command(user, node, command);
-}

+ 0 - 59
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pwd.h>
-#include <assert.h>
-#include <getopt.h>
-#include <grp.h>
-
-/*
-* List of possible error codes.
-*/
-enum errorcodes {
-  INVALID_ARGUMENT_NUMER = 1,
-  INVALID_USER_NAME, //2
-  SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
-  INITGROUPS_FAILED, //4
-  SETUID_OPER_FAILED, //5
-  INVALID_COMMAND_PASSED, //6
-};
-
-#undef HADOOP_PREFIX
-
-#define SSH_COMMAND "ssh"
-
-#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded 
-
-#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_PREFIX)
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetails(char *user, struct passwd *user_detail);
-
- /*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char *node, char *command);

+ 0 - 68
hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml

@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-<!--
-  This is Herriot specific protocols. This section shouldn't be present in
-  a production cluster configuration. This file needs to be linked up to the
-  main conf/hadoop-policy.xml in the deployment process
--->
-  <property>
-    <name>security.daemon.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DaemonProtocol, extended by all other
-    Herriot RPC protocols.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.nn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    NameNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.dn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    DataNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.tt.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TTProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    TaskTracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

+ 0 - 599
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java

@@ -1,599 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-import javax.management.*;
-import javax.management.remote.JMXConnector;
-import javax.management.remote.JMXConnectorFactory;
-import javax.management.remote.JMXServiceURL;
-
-/**
- * Abstract class which encapsulates the DaemonClient which is used in the 
- * system tests.<br/>
- * 
- * @param PROXY the proxy implementation of a specific Daemon 
- */
-public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
-  private Configuration conf;
-  private Boolean jmxEnabled = null;
-  private MBeanServerConnection connection;
-  private int jmxPortNumber = -1;
-  private RemoteProcess process;
-  private boolean connected;
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
-  private static final String HADOOP_JMX_DOMAIN = "Hadoop";
-  private static final String HADOOP_OPTS_ENV = "HADOOP_OPTS";
-
-  /**
-   * Create a Daemon client.<br/>
-   * 
-   * @param conf client to be used by proxy to connect to Daemon.
-   * @param process the Daemon process to manage the particular daemon.
-   * 
-   * @throws IOException on RPC error
-   */
-  public AbstractDaemonClient(Configuration conf, RemoteProcess process) 
-      throws IOException {
-    this.conf = conf;
-    this.process = process;
-  }
-
-  /**
-   * Gets if the client is connected to the Daemon <br/>
-   * 
-   * @return true if connected.
-   */
-  public boolean isConnected() {
-    return connected;
-  }
-
-  protected void setConnected(boolean connected) {
-    this.connected = connected;
-  }
-
-  /**
-   * Create an RPC proxy to the daemon <br/>
-   * 
-   * @throws IOException on RPC error
-   */
-  public abstract void connect() throws IOException;
-
-  /**
-   * Disconnect the underlying RPC proxy to the daemon.<br/>
-   * @throws IOException in case of communication errors
-   */
-  public abstract void disconnect() throws IOException;
-
-  /**
-   * Get the proxy to connect to a particular service Daemon.<br/>
-   * 
-   * @return proxy to connect to a particular service Daemon.
-   */
-  protected abstract PROXY getProxy();
-
-  /**
-   * Gets the daemon level configuration.<br/>
-   * 
-   * @return configuration using which daemon is running
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Gets the host on which Daemon is currently running. <br/>
-   * 
-   * @return hostname
-   */
-  public String getHostName() {
-    return process.getHostName();
-  }
-
-  /**
-   * Gets if the Daemon is ready to accept RPC connections. <br/>
-   * 
-   * @return true if daemon is ready.
-   * @throws IOException on RPC error
-   */
-  public boolean isReady() throws IOException {
-    return getProxy().isReady();
-  }
-
-  /**
-   * Kills the Daemon process <br/>
-   * @throws IOException on RPC error
-   */
-  public void kill() throws IOException {
-    process.kill();
-  }
-
-  /**
-   * Checks if the Daemon process is alive or not <br/>
-   * @throws IOException on RPC error
-   */
-  public void ping() throws IOException {
-    getProxy().ping();
-  }
-
-  /**
-   * Start up the Daemon process. <br/>
-   * @throws IOException on RPC error
-   */
-  public void start() throws IOException {
-    process.start();
-  }
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException on RPC error. 
-   */
-  public ProcessInfo getProcessInfo() throws IOException {
-    return getProxy().getProcessInfo();
-  }
-
-  /**
-   * Abstract method to retrieve the name of a daemon specific env. var
-   * @return name of Hadoop environment variable containing a daemon options
-   */
-  abstract public String getHadoopOptsEnvName ();
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  public boolean isJmxEnabled() throws IOException {
-    return isJmxEnabled(HADOOP_OPTS_ENV) ||
-        isJmxEnabled(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @param envivar name of an evironment variable to be searched
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  protected boolean isJmxEnabled(String envivar) throws IOException {
-    if (jmxEnabled != null) return jmxEnabled;
-    boolean ret = false;
-    String jmxRemoteString = "-Dcom.sun.management.jmxremote";
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    LOG.debug("Looking into " + hadoopOpts + " from " + envivar);
-    List<String> options = Arrays.asList(hadoopOpts.split(" "));
-    ret = options.contains(jmxRemoteString);
-    jmxEnabled = ret;
-    return ret;
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   * By default this method will look into "HADOOP_OPTS" variable only.
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  public int getJmxPortNumber() throws IOException, IllegalArgumentException {
-    int portNo = getJmxPortNumber(HADOOP_OPTS_ENV);
-    return portNo != -1 ? portNo : getJmxPortNumber(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   *
-   * @param envivar name of the env. var. to look for JMX specific settings
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  protected int getJmxPortNumber(final String envivar) throws
-      IOException, IllegalArgumentException {
-    if (jmxPortNumber != -1) return jmxPortNumber;
-    String jmxPortString = "-Dcom.sun.management.jmxremote.port";
-
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    int portNumber = -1;
-    boolean found = false;
-    String[] options = hadoopOpts.split(" ");
-     for (String option : options) {
-       if (option.startsWith(jmxPortString)) {
-         found = true;
-         try {
-           portNumber = Integer.parseInt(option.split("=")[1]);
-         } catch (NumberFormatException e) {
-           throw new IllegalArgumentException("JMX port number isn't integer");
-         }
-         break;
-       }
-     }
-     if (!found)
-       throw new IllegalArgumentException("Can't detect JMX port number");
-    jmxPortNumber = portNumber;
-    return jmxPortNumber;
-  }
-
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws IOException see specific implementation
-   */
-  public FileStatus getFileStatus(String path, boolean local) throws IOException {
-    return getProxy().getFileStatus(path, local);
-  }
-
-  /**
-   * Create a file with full permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-      boolean local) throws IOException {
-    getProxy().createFile(path, fileName, null, local);
-  }
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFile(path, fileName, permission, local);
-  }
-
-  /**
-   * Create a folder with default permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs. 
-   */
-  public void createFolder(String path, String folderName, 
-     boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, null, local);
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, permission, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local) 
-    throws IOException {
-    return getProxy().listStatus(path, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory recursive/nonrecursively depending on parameters
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @param recursive 
-   *          whether to recursively get the status
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException is thrown on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local, boolean recursive)
-    throws IOException {
-    List<FileStatus> status = new ArrayList<FileStatus>();
-    addStatus(status, path, local, recursive);
-    return status.toArray(new FileStatus[0]);
-  }
-
-  private void addStatus(List<FileStatus> status, String f, 
-      boolean local, boolean recursive) 
-    throws IOException {
-    FileStatus[] fs = listStatus(f, local);
-    if (fs != null) {
-      for (FileStatus fileStatus : fs) {
-        if (!f.equals(fileStatus.getPath().toString())) {
-          status.add(fileStatus);
-          if (recursive) {
-            addStatus(status, fileStatus.getPath().toString(), local, recursive);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets number of times FATAL log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is FATAL. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of fatal message.
-   * @throws IOException in case of communication errors
-   */
-  public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "FATAL";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times ERROR log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is ERROR. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of error message.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfErrorStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "ERROR";    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times Warning log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is WARN. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of warning message.
-   * @throws IOException thrown on RPC error. 
-   */
-  public int getNumberOfWarnStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "WARN";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of time given Exception were present in log file. <br/>
-   * 
-   * @param e exception class.
-   * @param excludeExpList list of exceptions to exclude. 
-   * @return number of exceptions in log
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfExceptionsInLog(Exception e,
-      String[] excludeExpList) throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = e.getClass().getSimpleName();    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Number of times ConcurrentModificationException present in log file. 
-   * <br/>
-   * @param excludeExpList list of exceptions to exclude.
-   * @return number of times exception in log file.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfConcurrentModificationExceptionsInLog(
-      String[] excludeExpList) throws IOException {
-    return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
-        excludeExpList);
-  }
-
-  private int errorCount;
-  private int fatalCount;
-  private int concurrentExceptionCount;
-
-  /**
-   * Populate the initial exception counts to be used to assert once a testcase
-   * is done there was no exception in the daemon when testcase was run.
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void populateExceptionCount(String [] excludeExpList) 
-      throws IOException {
-    errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages in logs : " + errorCount);
-    fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal statement in logs : " + fatalCount);
-    concurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrent modification in logs : "
-        + concurrentExceptionCount);
-  }
-
-  /**
-   * Assert if the new exceptions were logged into the log file.
-   * <br/>
-   * <b><i>
-   * Pre-req for the method is that populateExceptionCount() has 
-   * to be called before calling this method.</b></i>
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void assertNoExceptionsOccurred(String [] excludeExpList) 
-      throws IOException {
-    int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages while asserting :" + newerrorCount);
-    int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal messages while asserting : " + newfatalCount);
-    int newconcurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrentmodification exception while asserting :"
-        + newconcurrentExceptionCount);
-    Assert.assertEquals(
-        "New Error Messages logged in the log file", errorCount, newerrorCount);
-    Assert.assertEquals(
-        "New Fatal messages logged in the log file", fatalCount, newfatalCount);
-    Assert.assertEquals(
-        "New ConcurrentModificationException in log file",
-        concurrentExceptionCount, newconcurrentExceptionCount);
-  }
-
-  /**
-   * Builds correct name of JMX object name from given domain, service name, type
-   * @param domain JMX domain name
-   * @param serviceName of the service where MBean is registered (NameNode)
-   * @param typeName of the MXBean class
-   * @return ObjectName for requested MXBean of <code>null</code> if one wasn't
-   *    found
-   * @throws java.io.IOException in if object name is malformed
-   */
-  protected ObjectName getJmxBeanName(String domain, String serviceName,
-                                      String typeName) throws IOException {
-    if (domain == null)
-      domain = HADOOP_JMX_DOMAIN;
-
-    ObjectName jmxBean;
-    try {
-      jmxBean = new ObjectName(domain + ":service=" + serviceName +
-        ",name=" + typeName);
-    } catch (MalformedObjectNameException e) {
-      LOG.debug(e.getStackTrace());
-      throw new IOException(e);
-    }
-    return jmxBean;
-  }
-
-  /**
-   * Create connection with the remote JMX server at given host and port
-   * @param host name of the remote JMX server host
-   * @param port port number of the remote JXM server host
-   * @return instance of MBeanServerConnection or <code>null</code> if one
-   *    hasn't been established
-   * @throws IOException in case of comminication errors
-   */
-  protected MBeanServerConnection establishJmxConnection(String host, int port)
-    throws IOException {
-    if (connection != null) return connection;
-    String urlPattern = null;
-    try {
-      urlPattern = "service:jmx:rmi:///jndi/rmi://" +
-        host + ":" + port +
-        "/jmxrmi";
-      JMXServiceURL url = new JMXServiceURL(urlPattern);
-      JMXConnector connector = JMXConnectorFactory.connect(url, null);
-      connection = connector.getMBeanServerConnection();
-    } catch (java.net.MalformedURLException badURLExc) {
-      LOG.debug("bad url: " + urlPattern, badURLExc);
-      throw new IOException(badURLExc);
-    }
-    return connection;
-  }
-
-  Hashtable<String, ObjectName> jmxObjectNames =
-    new Hashtable<String, ObjectName>();
-
-  /**
-   * Method implements all logic for receiving a bean's attribute.
-   * If any initializations such as establishing bean server connections, etc.
-   * are need it will do it.
-   * @param serviceName name of the service where MBean is registered (NameNode)
-   * @param type name of the MXBean class
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the attribute or <code>null</code> if not found
-   * @throws IOException is thrown in case of any errors
-   */
-  protected Object getJmxAttribute (String serviceName,
-                                    String type,
-                                    String attributeName)
-    throws IOException {
-    Object retAttribute = null;
-    String domain = null;
-    if (isJmxEnabled()) {
-      try {
-        MBeanServerConnection conn =
-          establishJmxConnection(getHostName(),
-              getJmxPortNumber(HADOOP_OPTS_ENV));
-        for (String d : conn.getDomains()) {
-          if (d != null && d.startsWith(HADOOP_JMX_DOMAIN))
-            domain = d;
-        }
-        if (!jmxObjectNames.containsKey(type))
-          jmxObjectNames.put(type, getJmxBeanName(domain, serviceName, type));
-        retAttribute =
-          conn.getAttribute(jmxObjectNames.get(type), attributeName);
-      } catch (MBeanException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      } catch (AttributeNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (InstanceNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (ReflectionException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      }
-    }
-    return retAttribute;
-  }
-
-  /**
-   * This method has to be implemented by appropriate concrete daemon client
-   * e.g. DNClient, NNClient, etc.
-   * Concrete implementation has to provide names of the service and bean type
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  public abstract Object getDaemonAttribute (String attributeName)
-    throws IOException;
-}

+ 0 - 537
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java

@@ -1,537 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileInputStream;
-import java.io.DataInputStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Iterator;
-import java.util.Enumeration;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.net.URI;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Abstract class which represent the cluster having multiple daemons.
- */
-@SuppressWarnings("unchecked")
-public abstract class AbstractDaemonCluster {
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
-  private String [] excludeExpList ;
-  private Configuration conf;
-  protected ClusterProcessManager clusterManager;
-  private Map<Enum<?>, List<AbstractDaemonClient>> daemons = 
-    new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
-  private String newConfDir = null;  
-  private static final  String CONF_HADOOP_LOCAL_DIR =
-      "test.system.hdrc.hadoop.local.confdir"; 
-  private final static Object waitLock = new Object();
-  
-  /**
-   * Constructor to create a cluster client.<br/>
-   * 
-   * @param conf
-   *          Configuration to be used while constructing the cluster.
-   * @param rcluster
-   *          process manger instance to be used for managing the daemons.
-   * 
-   * @throws IOException
-   */
-  public AbstractDaemonCluster(Configuration conf,
-      ClusterProcessManager rcluster) throws IOException {
-    this.conf = conf;
-    this.clusterManager = rcluster;
-    createAllClients();
-  }
-
-  /**
-   * The method returns the cluster manager. The system test cases require an
-   * instance of HadoopDaemonRemoteCluster to invoke certain operation on the
-   * daemon.
-   * 
-   * @return instance of clusterManager
-   */
-  public ClusterProcessManager getClusterManager() {
-    return clusterManager;
-  }
-
-  protected void createAllClients() throws IOException {
-    for (RemoteProcess p : clusterManager.getAllProcesses()) {
-      List<AbstractDaemonClient> dms = daemons.get(p.getRole());
-      if (dms == null) {
-        dms = new ArrayList<AbstractDaemonClient>();
-        daemons.put(p.getRole(), dms);
-      }
-      dms.add(createClient(p));
-    }
-  }
-  
-  /**
-   * Method to create the daemon client.<br/>
-   * 
-   * @param process
-   *          to manage the daemon.
-   * @return instance of the daemon client
-   * 
-   * @throws IOException
-   */
-  protected abstract AbstractDaemonClient<DaemonProtocol> 
-    createClient(RemoteProcess process) throws IOException;
-
-  /**
-   * Get the global cluster configuration which was used to create the 
-   * cluster. <br/>
-   * 
-   * @return global configuration of the cluster.
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   *
-
-  /**
-   * Return the client handle of all the Daemons.<br/>
-   * 
-   * @return map of role to daemon clients' list.
-   */
-  public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
-    return daemons;
-  }
-
-  /**
-   * Checks if the cluster is ready for testing. <br/>
-   * Algorithm for checking is as follows : <br/>
-   * <ul>
-   * <li> Wait for Daemon to come up </li>
-   * <li> Check if daemon is ready </li>
-   * <li> If one of the daemon is not ready, return false </li>
-   * </ul> 
-   * 
-   * @return true if whole cluster is ready.
-   * 
-   * @throws IOException
-   */
-  public boolean isReady() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        waitForDaemon(daemon);
-        if (!daemon.isReady()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  protected void waitForDaemon(AbstractDaemonClient d) {
-    final int TEN_SEC = 10000;
-    while(true) {
-      try {
-        LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
-        LOG.info("Daemon might not be " +
-            "ready or the call to setReady() method hasn't been " +
-            "injected to " + d.getClass() + " ");
-        d.connect();
-        break;
-      } catch (IOException e) {
-        try {
-          Thread.sleep(TEN_SEC);
-        } catch (InterruptedException ie) {
-        }
-      }
-    }
-  }
-
-  /**
-   * Starts the cluster daemons.
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    clusterManager.start();
-  }
-
-  /**
-   * Stops the cluster daemons.
-   * @throws IOException
-   */
-  public void stop() throws IOException {
-    clusterManager.stop();
-  }
-
-  /**
-   * Connect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void connect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.connect();
-      }
-    }
-  }
-
-  /**
-   * Disconnect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void disconnect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.disconnect();
-      }
-    }
-  }
-
-  /**
-   * Ping all the daemons of the cluster.
-   * @throws IOException
-   */
-  public void ping() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.ping();
-      }
-    }
-  }
-
-  /**
-   * Connect to the cluster and ensure that it is clean to run tests.
-   * @throws Exception
-   */
-  public void setUp() throws Exception {
-    while (!isReady()) {
-      Thread.sleep(1000);
-    }
-    connect();
-    ping();
-    clearAllControlActions();
-    ensureClean();
-    populateExceptionCounts();
-  }
-  
-  /**
-   * This is mainly used for the test cases to set the list of exceptions
-   * that will be excluded.
-   * @param excludeExpList list of exceptions to exclude
-   */
-  public void setExcludeExpList(String [] excludeExpList) {
-    this.excludeExpList = excludeExpList;
-  }
-  
-  public void clearAllControlActions() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.getProxy().clearActions();
-      }
-    }
-  }
-
-  /**
-   * Ensure that the cluster is clean to run tests.
-   * @throws IOException
-   */
-  public void ensureClean() throws IOException {
-  }
-
-  /**
-   * Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
-   * @throws IOException
-   */
-  public void tearDown() throws IOException {
-    ensureClean();
-    clearAllControlActions();
-    assertNoExceptionMessages();
-    disconnect();
-  }
-
-  /**
-   * Populate the exception counts in all the daemons so that it can be checked when 
-   * the testcase has finished running.<br/>
-   * @throws IOException
-   */
-  protected void populateExceptionCounts() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.populateExceptionCount(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Assert no exception has been thrown during the sequence of the actions.
-   * <br/>
-   * @throws IOException
-   */
-  protected void assertNoExceptionMessages() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.assertNoExceptionsOccurred(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Get the proxy user definitions from cluster from configuration.
-   * @return ProxyUserDefinitions - proxy users data like groups and hosts.
-   * @throws Exception - if no proxy users found in config.
-   */
-  public ProxyUserDefinitions getHadoopProxyUsers() throws
-     Exception {
-    Iterator itr = conf.iterator();
-    ArrayList<String> proxyUsers = new ArrayList<String>();
-    while (itr.hasNext()) {
-      if (itr.next().toString().indexOf("hadoop.proxyuser") >= 0 &&
-          itr.next().toString().indexOf("groups=") >= 0) {
-         proxyUsers.add(itr.next().toString().split("\\.")[2]);
-      }
-    }
-    if (proxyUsers.size() == 0) {
-       LOG.error("No proxy users found in the configuration.");
-       throw new Exception("No proxy users found in the configuration.");
-    }
-
-    ProxyUserDefinitions pud = new ProxyUserDefinitions() {
-      @Override
-      public boolean writeToFile(URI filePath) throws IOException {
-        throw new UnsupportedOperationException("No such method exists.");
-      };
-    };
-
-    for (String userName : proxyUsers) {
-       List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".groups").split("//,"));
-       List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".hosts").split("//,"));
-       ProxyUserDefinitions.GroupsAndHost definitions =
-           pud.new GroupsAndHost();
-       definitions.setGroups(groups);
-       definitions.setHosts(hosts);
-       pud.addProxyUser(userName, definitions);
-    }
-    return pud;
-  }
-  
-  /**
-   * It's a local folder where the config file stores temporarily
-   * while serializing the object.
-   * @return String temporary local folder path for configuration.
-   */
-  private String getHadoopLocalConfDir() {
-    String hadoopLocalConfDir = conf.get(CONF_HADOOP_LOCAL_DIR);
-    if (hadoopLocalConfDir == null || hadoopLocalConfDir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the CONF_HADOOP_LOCAL_DIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop conf local directory");
-    }
-    return hadoopLocalConfDir;
-  }
-
-  /**
-   * It uses to restart the cluster with new configuration at runtime.<br/>
-   * @param props attributes for new configuration.
-   * @param configFile configuration file.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restartClusterWithNewConfig(Hashtable<String,?> props, 
-      String configFile) throws IOException {
-
-    String mapredConf = null;
-    String localDirPath = null;
-    File localFolderObj = null;
-    File xmlFileObj = null;
-    String confXMLFile = null;
-    Configuration initConf = new Configuration(getConf());
-    Enumeration<String> e = props.keys();
-    while (e.hasMoreElements()) {
-      String propKey = e.nextElement();
-      Object propValue = props.get(propKey);
-      initConf.set(propKey,propValue.toString());
-    }
-
-    localDirPath = getHadoopLocalConfDir();
-    localFolderObj = new File(localDirPath);
-    if (!localFolderObj.exists()) {
-      localFolderObj.mkdir();
-    }
-    confXMLFile = localDirPath + File.separator + configFile;
-    xmlFileObj = new File(confXMLFile);
-    initConf.writeXml(new FileOutputStream(xmlFileObj));
-    newConfDir = clusterManager.pushConfig(localDirPath);
-    stop();
-    waitForClusterToStop();
-    clusterManager.start(newConfDir);
-    waitForClusterToStart();
-    localFolderObj.delete();
-  }
-  
-  /**
-   * It uses to restart the cluster with default configuration.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restart() throws 
-      IOException {
-    stop();
-    waitForClusterToStop();
-    start();
-    waitForClusterToStart();
-    cleanupNewConf(newConfDir);
-  }
-
-  /**
-   * It uses to delete the new configuration folder.
-   * @param path - configuration directory path.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void cleanupNewConf(String path) throws IOException {
-    File file = new File(path);
-    file.delete();
-  }
-  
-  /**
-   * It uses to wait until the cluster is stopped.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void waitForClusterToStop() throws 
-      IOException {
-    List<Thread> chkDaemonStop = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {	  
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStopThread dmStop = new DaemonStopThread(daemon);
-        chkDaemonStop.add(dmStop);
-        dmStop.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStop){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-         LOG.warn("Interrupted while thread is joining." + intExp.getMessage());
-      }
-    }
-  }
- 
-  /**
-   * It uses to wait until the cluster is started.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void  waitForClusterToStart() throws 
-      IOException {
-    List<Thread> chkDaemonStart = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStartThread dmStart = new DaemonStartThread(daemon);
-        chkDaemonStart.add(dmStart);;
-        dmStart.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStart){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-        LOG.warn("Interrupted while thread is joining" + intExp.getMessage());
-      }
-    }
-  }
-
-  /**
-   * It waits for specified amount of time.
-   * @param duration time in milliseconds.
-   * @throws InterruptedException if any thread interrupted the current
-   * thread while it is waiting for a notification.
-   */
-  public void waitFor(long duration) {
-    try {
-      synchronized (waitLock) {
-        waitLock.wait(duration);
-      }
-    } catch (InterruptedException intExp) {
-       LOG.warn("Interrrupeted while thread is waiting" + intExp.getMessage());
-    }
-  }
-  
-  class DaemonStartThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStartThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run(){
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to come up.....");
-      while (true) { 
-        try {
-          daemon.ping();
-          LOG.info("Daemon is : " + daemon.getHostName() + " pinging...");
-          break;
-        } catch (Exception exp) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() + " is waiting to come up.");
-          }
-          waitFor(60000);
-        }
-      }
-    }
-  }
-  
-  class DaemonStopThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStopThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run() {
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to stop.....");
-      while (true) {
-        try {
-          daemon.ping();
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() +" is waiting state to stop.");
-          }
-          waitFor(60000);
-        } catch (Exception exp) {
-          LOG.info("Daemon is : " + daemon.getHostName() + " stopped...");
-          break;
-        } 
-      }
-    }
-  }
-}
-

+ 0 - 86
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java

@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Class to represent a control action which can be performed on Daemon.<br/>
- * 
- */
-
-public abstract class ControlAction<T extends Writable> implements Writable {
-
-  private T target;
-
-  /**
-   * Default constructor of the Control Action, sets the Action type to zero. <br/>
-   */
-  public ControlAction() {
-  }
-
-  /**
-   * Constructor which sets the type of the Control action to a specific type. <br/>
-   * 
-   * @param target
-   *          of the control action.
-   */
-  public ControlAction(T target) {
-    this.target = target;
-  }
-
-  /**
-   * Gets the id of the control action <br/>
-   * 
-   * @return target of action
-   */
-  public T getTarget() {
-    return target;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    target.readFields(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    target.write(out);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof ControlAction) {
-      ControlAction<T> other = (ControlAction<T>) obj;
-      return (this.target.equals(other.getTarget()));
-    } else {
-      return false;
-    }
-  }
-  
-  
-  @Override
-  public String toString() {
-    return "Action Target : " + this.target;
-  }
-}

+ 0 - 204
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java

@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/**
- * RPC interface of a given Daemon.
- */
-public interface DaemonProtocol extends VersionedProtocol{
-  long versionID = 1L;
-
-  /**
-   * Returns the Daemon configuration.
-   * @return Configuration
-   * @throws IOException in case of errors
-   */
-  Configuration getDaemonConf() throws IOException;
-
-  /**
-   * Check if the Daemon is alive.
-   * 
-   * @throws IOException
-   *           if Daemon is unreachable.
-   */
-  void ping() throws IOException;
-
-  /**
-   * Check if the Daemon is ready to accept RPC connections.
-   * 
-   * @return true if Daemon is ready to accept RPC connection.
-   * @throws IOException in case of errors
-   */
-  boolean isReady() throws IOException;
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException in case of errors
-   */
-  ProcessInfo getProcessInfo() throws IOException;
-  
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws FileNotFoundException when the path does not exist;
-   *         IOException see specific implementation
-   */
-  FileStatus getFileStatus(String path, boolean local) throws IOException;
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  void createFile(String path, String fileName, 
-      FsPermission permission, boolean local) throws IOException;
-   
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-      FsPermission permission, boolean local) throws IOException;
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException in case of errors
-   */
-  FileStatus[] listStatus(String path, boolean local) throws IOException;
-  
-  /**
-   * Enables a particular control action to be performed on the Daemon <br/>
-   * 
-   * @param action is a control action  to be enabled.
-   * 
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  void sendAction(ControlAction action) throws IOException;
-  
-  /**
-   * Checks if the particular control action has be delivered to the Daemon 
-   * component <br/>
-   * 
-   * @param action to be checked.
-   * 
-   * @return true if action is still in waiting queue of 
-   *          actions to be delivered.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  boolean isActionPending(ControlAction action) throws IOException;
-  
-  /**
-   * Removes a particular control action from the list of the actions which the
-   * daemon maintains. <br/>
-   * <i><b>Not to be directly called by Test Case or clients.</b></i>
-   * @param action to be removed
-   * @throws IOException in case of errors
-   */
-  
-  @SuppressWarnings("unchecked")
-  void removeAction(ControlAction action) throws IOException;
-  
-  /**
-   * Clears out the list of control actions on the particular daemon.
-   * <br/>
-   * @throws IOException in case of errors
-   */
-  void clearActions() throws IOException;
-  
-  /**
-   * Gets a list of pending actions which are targeted on the specified key. 
-   * <br/>
-   * <i><b>Not to be directly used by clients</b></i>
-   * @param key target
-   * @return list of actions.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  ControlAction[] getActions(Writable key) throws IOException;
-
-  /**
-   * Gets the number of times a particular pattern has been found in the 
-   * daemons log file.<br/>
-   * <b><i>Please note that search spans across all previous messages of
-   * Daemon, so better practice is to get previous counts before an operation
-   * and then re-check if the sequence of action has caused any problems</i></b>
-   * @param pattern to look for in the damon's log file
-   * @param list of exceptions to ignore
-   * @return number of times the pattern if found in log file.
-   * @throws IOException in case of errors
-   */
-  int getNumberOfMatchesInLogFile(String pattern, String[] list) 
-      throws IOException;
-
-  /**
-   * Gets the user who started the particular daemon initially. <br/>
-   * 
-   * @return user who started the particular daemon.
-   * @throws IOException in case of errors
-   */
-  String getDaemonUser() throws IOException;
-
-  /**
-   * It uses for suspending the process.
-   * @param pid process id.
-   * @return true if the process is suspended otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean suspendProcess(String pid) throws IOException;
-
-  /**
-   * It uses for resuming the suspended process.
-   * @param pid process id
-   * @return true if suspended process is resumed otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean resumeProcess(String pid) throws IOException;
-}

+ 0 - 77
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java

@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.util.Map;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Daemon system level process information.
- */
-public interface ProcessInfo extends Writable {
-  /**
-   * Get the current time in the millisecond.<br/>
-   * 
-   * @return current time on daemon clock in millisecond.
-   */
-  public long currentTimeMillis();
-
-  /**
-   * Get the environment that was used to start the Daemon process.<br/>
-   * 
-   * @return the environment variable list.
-   */
-  public Map<String,String> getEnv();
-
-  /**
-   * Get the System properties of the Daemon process.<br/>
-   * 
-   * @return the properties list.
-   */
-  public Map<String,String> getSystemProperties();
-
-  /**
-   * Get the number of active threads in Daemon VM.<br/>
-   * 
-   * @return number of active threads in Daemon VM.
-   */
-  public int activeThreadCount();
-
-  /**
-   * Get the maximum heap size that is configured for the Daemon VM. <br/>
-   * 
-   * @return maximum heap size.
-   */
-  public long maxMemory();
-
-  /**
-   * Get the free memory in Daemon VM.<br/>
-   * 
-   * @return free memory.
-   */
-  public long freeMemory();
-
-  /**
-   * Get the total used memory in Demon VM. <br/>
-   * 
-   * @return total used memory.
-   */
-  public long totalMemory();
-}

+ 0 - 159
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java

@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class ProcessInfoImpl implements ProcessInfo {
-
-  private int threadCount;
-  private long currentTime;
-  private long freemem;
-  private long maxmem;
-  private long totmem;
-  private Map<String, String> env;
-  private Map<String, String> props;
-
-  public ProcessInfoImpl() {
-    env = new HashMap<String, String>();
-    props = new HashMap<String, String>();
-  }
-
-  /**
-   * Construct a concrete process information object. <br/>
-   * 
-   * @param threadCount
-   *          count of threads.
-   * @param currentTime
-   * @param freemem
-   * @param maxmem
-   * @param totmem
-   * @param env environment list.
-   * @param props
-   */
-  public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
-      long maxmem, long totmem, Map<String, String> env, 
-      Map<String, String> props) {
-    this.threadCount = threadCount;
-    this.currentTime = currentTime;
-    this.freemem = freemem;
-    this.maxmem = maxmem;
-    this.totmem = totmem;
-    this.env = env;
-    this.props = props;
-  }
-
-  @Override
-  public int activeThreadCount() {
-    return threadCount;
-  }
-
-  @Override
-  public long currentTimeMillis() {
-    return currentTime;
-  }
-
-  @Override
-  public long freeMemory() {
-    return freemem;
-  }
-
-  @Override
-  public Map<String, String> getEnv() {
-    return env;
-  }
-
-  @Override
-  public Map<String,String> getSystemProperties() {
-    return props;
-  }
-
-  @Override
-  public long maxMemory() {
-    return maxmem;
-  }
-
-  @Override
-  public long totalMemory() {
-    return totmem;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.threadCount = in.readInt();
-    this.currentTime = in.readLong();
-    this.freemem = in.readLong();
-    this.maxmem = in.readLong();
-    this.totmem = in.readLong();
-    read(in, env);
-    read(in, props);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(threadCount);
-    out.writeLong(currentTime);
-    out.writeLong(freemem);
-    out.writeLong(maxmem);
-    out.writeLong(totmem);
-    write(out, env);
-    write(out, props);
-  }
-
-  private void read(DataInput in, Map<String, String> map) throws IOException {
-    int size = in.readInt();
-    for (int i = 0; i < size; i = i + 2) {
-      String key = in.readUTF();
-      String value = in.readUTF();
-      map.put(key, value);
-    }
-  }
-
-  private void write(DataOutput out, Map<String, String> map) 
-  throws IOException {
-    int size = (map.size() * 2);
-    out.writeInt(size);
-    for (Map.Entry<String, String> entry : map.entrySet()) {
-      out.writeUTF(entry.getKey());
-      out.writeUTF(entry.getValue());
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuffer strBuf = new StringBuffer();
-    strBuf.append(String.format("active threads : %d\n", threadCount));
-    strBuf.append(String.format("current time  : %d\n", currentTime));
-    strBuf.append(String.format("free memory  : %d\n", freemem));
-    strBuf.append(String.format("total memory  : %d\n", totmem));
-    strBuf.append(String.format("max memory  : %d\n", maxmem));
-    strBuf.append("Environment Variables : \n");
-    for (Map.Entry<String, String> entry : env.entrySet()) {
-      strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
-          entry.getValue()));
-    }
-    return strBuf.toString();
-  }
-
-}

+ 0 - 90
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java

@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.system;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.io.IOException;
-import java.net.URI;
-
-/**
- *  Its the data container which contains host names and
- *  groups against each proxy user.
- */
-public abstract class ProxyUserDefinitions {
-
-  /**
-   *  Groups and host names container
-   */
-  public class GroupsAndHost {
-    private List<String> groups;
-    private List<String> hosts;
-    public List<String> getGroups() {
-      return groups;
-    }
-    public void setGroups(List<String> groups) {
-      this.groups = groups;
-    }
-    public List<String> getHosts() {
-      return hosts;
-    }
-    public void setHosts(List<String> hosts) {
-      this.hosts = hosts;
-    }
-  }
-
-  protected Map<String, GroupsAndHost> proxyUsers;
-  protected ProxyUserDefinitions () {
-    proxyUsers = new HashMap<String, GroupsAndHost>();
-  }
-
-  /**
-   * Add proxy user data to a container.
-   * @param userName - proxy user name.
-   * @param definitions - groups and host names.
-   */
-  public void addProxyUser (String userName, GroupsAndHost definitions) {
-    proxyUsers.put(userName, definitions);
-  }
-
-  /**
-   * Get the host names and groups against given proxy user.
-   * @return - GroupsAndHost object.
-   */
-  public GroupsAndHost getProxyUser (String userName) {
-    return proxyUsers.get(userName);
-  }
-
-  /**
-   * Get the Proxy users data which contains the host names
-   * and groups against each user.
-   * @return - the proxy users data as hash map.
-   */
-  public Map<String, GroupsAndHost> getProxyUsers () {
-    return proxyUsers;
-  }
-
-  /**
-   * The implementation of this method has to be provided by a child of the class
-   * @param filePath
-   * @return
-   * @throws IOException
-   */
-  public abstract boolean writeToFile(URI filePath) throws IOException;
-}

部分文件因为文件数量过多而无法显示