Explorar el Código

ZOOKEEPER-775. A large scale pub/sub system (Erwin, Ivan and Ben via mahadev)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/zookeeper/trunk@987314 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar hace 15 años
padre
commit
c16f5b46bc
Se han modificado 100 ficheros con 13198 adiciones y 0 borrados
  1. 3 0
      CHANGES.txt
  2. 202 0
      src/contrib/hedwig/LICENSE.txt
  3. 2 0
      src/contrib/hedwig/NOTICE.txt
  4. 3 0
      src/contrib/hedwig/README
  5. 73 0
      src/contrib/hedwig/client/pom.xml
  6. 13 0
      src/contrib/hedwig/client/src/main/cpp/Makefile.am
  7. 186 0
      src/contrib/hedwig/client/src/main/cpp/aminclude.am
  8. 1252 0
      src/contrib/hedwig/client/src/main/cpp/c-doc.Doxyfile
  9. 56 0
      src/contrib/hedwig/client/src/main/cpp/config.h.in
  10. 18 0
      src/contrib/hedwig/client/src/main/cpp/configure.ac
  11. 12 0
      src/contrib/hedwig/client/src/main/cpp/hedwig-0.1.pc.in
  12. 45 0
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/callback.h
  13. 67 0
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/client.h
  14. 49 0
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/exceptions.h
  15. 59 0
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/publish.h
  16. 50 0
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/subscribe.h
  17. 14 0
      src/contrib/hedwig/client/src/main/cpp/lib/Makefile.am
  18. 436 0
      src/contrib/hedwig/client/src/main/cpp/lib/channel.cpp
  19. 105 0
      src/contrib/hedwig/client/src/main/cpp/lib/channel.h
  20. 47 0
      src/contrib/hedwig/client/src/main/cpp/lib/client.cpp
  21. 505 0
      src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.cpp
  22. 131 0
      src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.h
  23. 95 0
      src/contrib/hedwig/client/src/main/cpp/lib/data.h
  24. 27 0
      src/contrib/hedwig/client/src/main/cpp/lib/exceptions.cpp
  25. 87 0
      src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.cpp
  26. 54 0
      src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.h
  27. 387 0
      src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.cpp
  28. 149 0
      src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.h
  29. 233 0
      src/contrib/hedwig/client/src/main/cpp/lib/util.cpp
  30. 122 0
      src/contrib/hedwig/client/src/main/cpp/lib/util.h
  31. 49 0
      src/contrib/hedwig/client/src/main/cpp/log4cpp.conf
  32. 533 0
      src/contrib/hedwig/client/src/main/cpp/m4/ax_doxygen.m4
  33. 6 0
      src/contrib/hedwig/client/src/main/cpp/test/Makefile.am
  34. 64 0
      src/contrib/hedwig/client/src/main/cpp/test/main.cpp
  35. 268 0
      src/contrib/hedwig/client/src/main/cpp/test/publishtest.cpp
  36. 47 0
      src/contrib/hedwig/client/src/main/cpp/test/pubsubdatatest.cpp
  37. 322 0
      src/contrib/hedwig/client/src/main/cpp/test/pubsubtest.cpp
  38. 175 0
      src/contrib/hedwig/client/src/main/cpp/test/servercontrol.cpp
  39. 64 0
      src/contrib/hedwig/client/src/main/cpp/test/servercontrol.h
  40. 222 0
      src/contrib/hedwig/client/src/main/cpp/test/subscribetest.cpp
  41. 21 0
      src/contrib/hedwig/client/src/main/cpp/test/test.sh
  42. 76 0
      src/contrib/hedwig/client/src/main/cpp/test/util.h
  43. 90 0
      src/contrib/hedwig/client/src/main/cpp/test/utiltest.cpp
  44. 48 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/MessageHandler.java
  45. 63 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/Publisher.java
  46. 237 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/Subscriber.java
  47. 133 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkPublisher.java
  48. 136 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkSubscriber.java
  49. 176 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkUtils.java
  50. 46 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkWorker.java
  51. 127 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/HedwigBenchmark.java
  52. 148 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/conf/ClientConfiguration.java
  53. 58 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/MessageConsumeData.java
  54. 149 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/PubSubData.java
  55. 74 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/TopicSubscriber.java
  56. 37 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/InvalidSubscriberIdException.java
  57. 38 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/ServerRedirectLoopException.java
  58. 39 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/TooManyServerRedirectsException.java
  59. 95 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/MessageConsumeCallback.java
  60. 87 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/PubSubCallback.java
  61. 70 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/PublishResponseHandler.java
  62. 113 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/SubscribeReconnectCallback.java
  63. 329 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/SubscribeResponseHandler.java
  64. 83 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/UnsubscribeResponseHandler.java
  65. 58 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ClientChannelPipelineFactory.java
  66. 122 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ConnectCallback.java
  67. 359 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigClient.java
  68. 224 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigPublisher.java
  69. 585 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigSubscriber.java
  70. 365 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ResponseHandler.java
  71. 98 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/WriteCallback.java
  72. 41 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/ssl/SslClientContextFactory.java
  73. 65 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/ssl/SslContextFactory.java
  74. 45 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/conf/AbstractConfiguration.java
  75. 47 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Callback.java
  76. 185 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/CallbackUtils.java
  77. 49 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/ConcurrencyUtils.java
  78. 50 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Either.java
  79. 97 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/FileUtils.java
  80. 138 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/HedwigSocketAddress.java
  81. 43 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Option.java
  82. 42 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Pair.java
  83. 56 0
      src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/PathUtils.java
  84. 32 0
      src/contrib/hedwig/client/src/main/resources/log4j.properties
  85. 51 0
      src/contrib/hedwig/client/src/test/java/org/apache/hedwig/client/AppTest.java
  86. 41 0
      src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestFileUtils.java
  87. 104 0
      src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestHedwigSocketAddress.java
  88. 54 0
      src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestPathUtils.java
  89. 7 0
      src/contrib/hedwig/conf/hw_client_sample.conf
  90. 10 0
      src/contrib/hedwig/conf/hw_server_sample.conf
  91. 146 0
      src/contrib/hedwig/doc/build.txt
  92. 338 0
      src/contrib/hedwig/doc/dev.txt
  93. 17 0
      src/contrib/hedwig/doc/doc.txt
  94. 252 0
      src/contrib/hedwig/doc/user.txt
  95. 286 0
      src/contrib/hedwig/formatter.xml
  96. 68 0
      src/contrib/hedwig/pom.xml
  97. 26 0
      src/contrib/hedwig/protocol/Makefile
  98. 77 0
      src/contrib/hedwig/protocol/pom.xml
  99. 162 0
      src/contrib/hedwig/protocol/src/main/java/org/apache/hedwig/exceptions/PubSubException.java
  100. 153 0
      src/contrib/hedwig/protocol/src/main/java/org/apache/hedwig/protoextensions/MessageIdUtils.java

+ 3 - 0
CHANGES.txt

@@ -130,6 +130,9 @@ NEW FEATURES:
   ZOOKEEPER-808. Web-based Administrative Interface
   (Andrei Savu via phunt)
 
+  ZOOKEEPER-775. A large scale pub/sub system (Erwin, Ivan and Ben via
+  mahadev)
+
 Release 3.3.0 - 2010-03-24
 
 Non-backward compatible changes:

+ 202 - 0
src/contrib/hedwig/LICENSE.txt

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 2 - 0
src/contrib/hedwig/NOTICE.txt

@@ -0,0 +1,2 @@
+Copyright (c) 2010 Yahoo! Inc.  All rights reserved.
+

+ 3 - 0
src/contrib/hedwig/README

@@ -0,0 +1,3 @@
+Hedwig is a large scale pub/sub system built on top of ZooKeeper and BookKeeper.
+
+For documentation on building, setting up, and using Hedwig see the `doc` directory.

+ 73 - 0
src/contrib/hedwig/client/pom.xml

@@ -0,0 +1,73 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hedwig</groupId>
+    <artifactId>hedwig</artifactId>
+    <version>1.0-SNAPSHOT</version>
+  </parent>
+  <properties>
+      <mainclass>org.apache.hedwig.client.App</mainclass>
+  </properties>
+  <groupId>org.apache.hedwig</groupId>
+  <artifactId>client</artifactId>
+  <packaging>jar</packaging>
+  <version>1.0-SNAPSHOT</version>
+  <name>client</name>
+  <url>http://maven.apache.org</url>
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hedwig</groupId>
+      <artifactId>protocol</artifactId>
+      <version>1.0-SNAPSHOT</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>1.2.14</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.jboss.netty</groupId>
+      <artifactId>netty</artifactId>
+      <version>3.1.2.GA</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+      <version>1.6</version>
+    </dependency>
+  </dependencies>
+  <repositories>
+    <repository>
+      <id>jboss.release</id>
+      <name>JBoss releases</name>
+      <url>http://repository.jboss.org/maven2</url>
+    </repository>
+  </repositories>
+</project>

+ 13 - 0
src/contrib/hedwig/client/src/main/cpp/Makefile.am

@@ -0,0 +1,13 @@
+ACLOCAL_AMFLAGS = -I m4
+
+SUBDIRS = lib test
+
+library_includedir=$(includedir)/hedwig-0.1/hedwig
+library_include_HEADERS = inc/hedwig/callback.h inc/hedwig/client.h inc/hedwig/exceptions.h inc/hedwig/publish.h inc/hedwig/subscribe.h 
+
+pkgconfigdir = $(libdir)/pkgconfig
+nodist_pkgconfig_DATA = hedwig-0.1.pc
+
+include aminclude.am
+
+EXTRA_DIST = $(DX_CONFIG) doc/html

+ 186 - 0
src/contrib/hedwig/client/src/main/cpp/aminclude.am

@@ -0,0 +1,186 @@
+# Copyright (C) 2004 Oren Ben-Kiki
+# This file is distributed under the same terms as the Automake macro files.
+
+# Generate automatic documentation using Doxygen. Goals and variables values
+# are controlled by the various DX_COND_??? conditionals set by autoconf.
+#
+# The provided goals are:
+# doxygen-doc: Generate all doxygen documentation.
+# doxygen-run: Run doxygen, which will generate some of the documentation
+#              (HTML, CHM, CHI, MAN, RTF, XML) but will not do the post
+#              processing required for the rest of it (PS, PDF, and some MAN).
+# doxygen-man: Rename some doxygen generated man pages.
+# doxygen-ps: Generate doxygen PostScript documentation.
+# doxygen-pdf: Generate doxygen PDF documentation.
+#
+# Note that by default these are not integrated into the automake goals. If
+# doxygen is used to generate man pages, you can achieve this integration by
+# setting man3_MANS to the list of man pages generated and then adding the
+# dependency:
+#
+#   $(man3_MANS): doxygen-doc
+#
+# This will cause make to run doxygen and generate all the documentation.
+#
+# The following variable is intended for use in Makefile.am:
+#
+# DX_CLEANFILES = everything to clean.
+#
+# This is usually added to MOSTLYCLEANFILES.
+
+## --------------------------------- ##
+## Format-independent Doxygen rules. ##
+## --------------------------------- ##
+
+if DX_COND_doc
+
+## ------------------------------- ##
+## Rules specific for HTML output. ##
+## ------------------------------- ##
+
+if DX_COND_html
+
+DX_CLEAN_HTML = @DX_DOCDIR@/html
+
+endif DX_COND_html
+
+## ------------------------------ ##
+## Rules specific for CHM output. ##
+## ------------------------------ ##
+
+if DX_COND_chm
+
+DX_CLEAN_CHM = @DX_DOCDIR@/chm
+
+if DX_COND_chi
+
+DX_CLEAN_CHI = @DX_DOCDIR@/@PACKAGE@.chi
+
+endif DX_COND_chi
+
+endif DX_COND_chm
+
+## ------------------------------ ##
+## Rules specific for MAN output. ##
+## ------------------------------ ##
+
+if DX_COND_man
+
+DX_CLEAN_MAN = @DX_DOCDIR@/man
+
+endif DX_COND_man
+
+## ------------------------------ ##
+## Rules specific for RTF output. ##
+## ------------------------------ ##
+
+if DX_COND_rtf
+
+DX_CLEAN_RTF = @DX_DOCDIR@/rtf
+
+endif DX_COND_rtf
+
+## ------------------------------ ##
+## Rules specific for XML output. ##
+## ------------------------------ ##
+
+if DX_COND_xml
+
+DX_CLEAN_XML = @DX_DOCDIR@/xml
+
+endif DX_COND_xml
+
+## ----------------------------- ##
+## Rules specific for PS output. ##
+## ----------------------------- ##
+
+if DX_COND_ps
+
+DX_CLEAN_PS = @DX_DOCDIR@/@PACKAGE@.ps
+
+DX_PS_GOAL = doxygen-ps
+
+doxygen-ps: @DX_DOCDIR@/@PACKAGE@.ps
+
+@DX_DOCDIR@/@PACKAGE@.ps: @DX_DOCDIR@/@PACKAGE@.tag
+	cd @DX_DOCDIR@/latex; \
+	rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
+	$(DX_LATEX) refman.tex; \
+	$(MAKEINDEX_PATH) refman.idx; \
+	$(DX_LATEX) refman.tex; \
+	countdown=5; \
+	while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
+	                  refman.log > /dev/null 2>&1 \
+	   && test $$countdown -gt 0; do \
+	    $(DX_LATEX) refman.tex; \
+	    countdown=`expr $$countdown - 1`; \
+	done; \
+	$(DX_DVIPS) -o ../@PACKAGE@.ps refman.dvi
+
+endif DX_COND_ps
+
+## ------------------------------ ##
+## Rules specific for PDF output. ##
+## ------------------------------ ##
+
+if DX_COND_pdf
+
+DX_CLEAN_PDF = @DX_DOCDIR@/@PACKAGE@.pdf
+
+DX_PDF_GOAL = doxygen-pdf
+
+doxygen-pdf: @DX_DOCDIR@/@PACKAGE@.pdf
+
+@DX_DOCDIR@/@PACKAGE@.pdf: @DX_DOCDIR@/@PACKAGE@.tag
+	cd @DX_DOCDIR@/latex; \
+	rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
+	$(DX_PDFLATEX) refman.tex; \
+	$(DX_MAKEINDEX) refman.idx; \
+	$(DX_PDFLATEX) refman.tex; \
+	countdown=5; \
+	while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
+	                  refman.log > /dev/null 2>&1 \
+	   && test $$countdown -gt 0; do \
+	    $(DX_PDFLATEX) refman.tex; \
+	    countdown=`expr $$countdown - 1`; \
+	done; \
+	mv refman.pdf ../@PACKAGE@.pdf
+
+endif DX_COND_pdf
+
+## ------------------------------------------------- ##
+## Rules specific for LaTeX (shared for PS and PDF). ##
+## ------------------------------------------------- ##
+
+if DX_COND_latex
+
+DX_CLEAN_LATEX = @DX_DOCDIR@/latex
+
+endif DX_COND_latex
+
+.PHONY: doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
+
+.INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
+
+doxygen-run: @DX_DOCDIR@/@PACKAGE@.tag
+
+doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
+
+@DX_DOCDIR@/@PACKAGE@.tag: $(DX_CONFIG) $(pkginclude_HEADERS)
+	rm -rf @DX_DOCDIR@
+	$(DX_ENV) $(DX_DOXYGEN) $(srcdir)/$(DX_CONFIG)
+
+DX_CLEANFILES = \
+    @DX_DOCDIR@/@PACKAGE@.tag \
+    -r \
+    $(DX_CLEAN_HTML) \
+    $(DX_CLEAN_CHM) \
+    $(DX_CLEAN_CHI) \
+    $(DX_CLEAN_MAN) \
+    $(DX_CLEAN_RTF) \
+    $(DX_CLEAN_XML) \
+    $(DX_CLEAN_PS) \
+    $(DX_CLEAN_PDF) \
+    $(DX_CLEAN_LATEX)
+
+endif DX_COND_doc

+ 1252 - 0
src/contrib/hedwig/client/src/main/cpp/c-doc.Doxyfile

@@ -0,0 +1,1252 @@
+# Doxyfile 1.4.7
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME = $(PROJECT)-$(VERSION)
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER = 
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = $(DOCDIR)
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
+# 4096 sub-directories (in 2 levels) under the output directory of each output 
+# format and will distribute the generated files over these directories. 
+# Enabling this option can be useful when feeding doxygen a huge amount of 
+# source files, where putting all generated files in the same directory would 
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, 
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, 
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, 
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, 
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output. 
+# The encoding is not always determined by the language that is chosen, 
+# but also whether or not the output is meant for Windows or non-Windows users. 
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES 
+# forces the Windows encoding (this is the default for the Windows binary), 
+# whereas setting the tag to NO uses a Unix-style encoding (the default for 
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator 
+# that is used to form the text in various listings. Each string 
+# in this list, if found as the leading text of the brief description, will be 
+# stripped from the text and the result after processing the whole list, is 
+# used as the annotated text. Otherwise, the brief description is used as-is. 
+# If left blank, the following values are used ("$name" is automatically 
+# replaced with the name of the entity): "The $name class" "The $name widget" 
+# "The $name file" "is" "provides" "specifies" "contains" 
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = 
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all 
+# inherited members of a class in the documentation of that class as if those 
+# members were ordinary class members. Constructors, destructors and assignment 
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. The tag can be used to show relative paths in the file list. 
+# If left blank the directory from which doxygen is run is used as the 
+# path to strip.
+
+STRIP_FROM_PATH = 
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
+# the path mentioned in the documentation of a class, which tells 
+# the reader which header file to include in order to use a class. 
+# If left blank only the name of the header file containing the class 
+# definition is used. Otherwise one should specify the include paths that 
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH = 
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like the Qt-style comments (thus requiring an 
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen 
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member 
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce 
+# a new page for each member. If set to NO, the documentation of a member will 
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES = 
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
+# sources only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java 
+# sources only. Doxygen will then generate output that is more tailored for Java. 
+# For instance, namespaces will be presented as packages, qualified scopes 
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to 
+# include (a tag file for) the STL sources as input, then you should 
+# set this tag to YES in order to let doxygen match functions declarations and 
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. 
+# func(std::string) {}). This also make the inheritance and collaboration 
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local 
+# methods, which are defined in the implementation section but not in 
+# the interface are included in the documentation. 
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
+# sorted by fully-qualified names, including namespaces. If set to 
+# NO (the default), the class list will be sorted only by class name, 
+# not including the namespace part. 
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the 
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST = YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories 
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy 
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that 
+# doxygen should invoke to get the current version for each file (typically from the 
+# version control system). Doxygen will invoke the program by executing (via 
+# popen()) the command <command> <input-file>, where <command> is the value of 
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file 
+# provided by doxygen. Whatever the program writes to standard output 
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER = 
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for 
+# functions that are documented, but have no documentation for their parameters 
+# or return value. If set to NO (the default) doxygen will only warn about 
+# wrong or incomplete parameter documentation, but not about the absence of 
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text. Optionally the format may contain 
+# $version, which will be replaced by the version of the file (if it could 
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT = inc/ lib/
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx 
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS = 
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE = 
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or 
+# directories that are symbolic links (a Unix filesystem feature) are excluded 
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories. Note that the wildcards are matched 
+# against the file with absolute path, so to exclude all test directories 
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
+EXAMPLE_PATH = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.  If FILTER_PATTERNS is specified, this tag will be 
+# ignored.
+
+INPUT_FILTER = 
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
+# basis.  Doxygen will compare the file name with each pattern and apply the 
+# filter if there is a match.  The filters are a list of the form: 
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
+# is applied to all files.
+
+FILTER_PATTERNS = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources. 
+# Note: To get rid of all source code in the generated output, make sure also 
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default) 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.  Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code 
+# will point to the HTML generated by the htags(1) tool instead of doxygen 
+# built-in source browser. The htags tool is part of GNU's global source 
+# tagging system (see http://www.gnu.org/software/global/global.html). You 
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML = $(GENERATE_HTML)
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
+HTML_HEADER = 
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
+HTML_FOOTER = 
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet. Note that doxygen will try to copy 
+# the style sheet file to the HTML output directory, so don't put your own 
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) 
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = $(GENERATE_HTMLHELP)
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
+# be used to specify the file name of the resulting .chm file. You 
+# can add a path in front of the file if the result should not be 
+# written to the html output directory.
+
+CHM_FILE = ../$(PROJECT).chm
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
+# be used to specify the location (absolute path including file name) of 
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION = $(HHC_PATH)
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = $(GENERATE_CHI)
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX = $(GENERATE_LATEX)
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = $(PAPER_SIZE)
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX = $(GENERATE_PDF)
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimized for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF = $(GENERATE_RTF)
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assignments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN = $(GENERATE_MAN)
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation.
+
+GENERATE_XML = $(GENERATE_XML)
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_SCHEMA = 
+
+# The XML_DTD tag can be used to specify an XML DTD, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_DTD = 
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
+# dump the program listings (including syntax highlighting 
+# and cross-referencing information) to the XML output. Note that 
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader.  This is useful 
+# if you want to understand what is going on.  On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX = 
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed. To prevent a macro definition from being 
+# undefined via #undef or recursively expanded use the := operator 
+# instead of the = operator.
+
+PREDEFINED = 
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse 
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#   TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#   TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE = $(DOCDIR)/$(PROJECT).tag
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base 
+# or super classes. Setting the tag to NO turns the diagrams off. Note that 
+# this option is superseded by the HAVE_DOT option below. This is only a 
+# fallback. It is recommended to install and use dot, since it yields more 
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = $(HAVE_DOT)
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
+# generate a call dependency graph for every global function or class method. 
+# Note that enabling this option will significantly increase the time of a run. 
+# So in most cases it will be better to enable call graphs for selected 
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
+# generate a caller dependency graph for every global function or class method. 
+# Note that enabling this option will significantly increase the time of a run. 
+# So in most cases it will be better to enable caller graphs for selected 
+# functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES 
+# then doxygen will show the dependencies a directory has on other directories 
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH = $(DOT_PATH)
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS = 
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes 
+# that lay further from the root node will be omitted. Note that setting this 
+# option to 1 or 2 may greatly reduce the computation time needed for large 
+# code bases. Also note that a graph may be further truncated if the graph's 
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH 
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), 
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
+# background. This is disabled by default, which results in a white background. 
+# Warning: Depending on the platform used, enabling this option may lead to 
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to 
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
+# files in one run (i.e. multiple -o and -T options on the command line). This 
+# makes dot run faster, but since only newer versions of dot (>1.8.10) 
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO

+ 56 - 0
src/contrib/hedwig/client/src/main/cpp/config.h.in

@@ -0,0 +1,56 @@
+/* config.h.in.  Generated from configure.ac by autoheader.  */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#undef LT_OBJDIR
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS

+ 18 - 0
src/contrib/hedwig/client/src/main/cpp/configure.ac

@@ -0,0 +1,18 @@
+AC_INIT([Hedwig C++ Client], [0.1], [zookeeper-dev@hadoop.apache.org], [hedwig-cpp], [http://hadoop.apache.org/zookeeper//])
+
+AC_PREREQ([2.59])
+AM_INIT_AUTOMAKE([1.10 no-define foreign])
+AC_CONFIG_HEADERS([config.h])
+AC_PROG_CXX
+AC_CONFIG_FILES([Makefile lib/Makefile test/Makefile hedwig-0.1.pc])
+AC_PROG_LIBTOOL
+AC_CONFIG_MACRO_DIR([m4])
+PKG_CHECK_MODULES([DEPS], [log4cpp >= 0.23 protobuf >= 2.3.0 cppunit])
+
+DX_HTML_FEATURE(ON)
+DX_INIT_DOXYGEN(hedwig-c++, c-doc.Doxyfile, doc)
+
+CXXFLAGS="$CXXFLAGS -fno-inline"
+
+AC_OUTPUT
+

+ 12 - 0
src/contrib/hedwig/client/src/main/cpp/hedwig-0.1.pc.in

@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: something
+Description: Some library.
+Requires: 
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lhedwig01
+Cflags: -I${includedir}/hedwig-0.1 -I${libdir}/hedwig-0.1/include
+

+ 45 - 0
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/callback.h

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_CALLBACK_H
+#define HEDWIG_CALLBACK_H
+
+#include <string>
+#include <hedwig/exceptions.h>
+#include <hedwig/protocol.h>
+#include <tr1/memory>
+
+namespace Hedwig {
+  class OperationCallback {
+  public:
+    virtual void operationComplete() = 0;
+    virtual void operationFailed(const std::exception& exception) = 0;
+    
+    virtual ~OperationCallback() {};
+  };
+  typedef std::tr1::shared_ptr<OperationCallback> OperationCallbackPtr;
+
+  class MessageHandlerCallback {
+  public:
+    virtual void consume(const std::string& topic, const std::string& subscriberId, const Message& msg, OperationCallbackPtr& callback) = 0;
+    
+    virtual ~MessageHandlerCallback() {};
+  };
+  typedef std::tr1::shared_ptr<MessageHandlerCallback> MessageHandlerCallbackPtr;
+}
+
+#endif

+ 67 - 0
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/client.h

@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_CLIENT_H
+#define HEDWIG_CLIENT_H
+
+#include <string>
+#include <tr1/memory>
+
+#include <hedwig/subscribe.h>
+#include <hedwig/publish.h>
+#include <hedwig/exceptions.h>
+#include <boost/noncopyable.hpp>
+
+namespace Hedwig {
+
+  class ClientImpl;
+  typedef std::tr1::shared_ptr<ClientImpl> ClientImplPtr;
+
+  class Configuration {
+  public:
+    Configuration() {};
+
+    virtual const std::string& getDefaultServer() const;    
+  };
+
+  /** 
+      Main Hedwig client class. This class is used to acquire an instance of the Subscriber of Publisher.
+  */
+  class Client : private boost::noncopyable {
+  public: 
+    Client(const Configuration& conf);
+
+    /**
+       Retrieve the subscriber object
+    */
+    Subscriber& getSubscriber();
+
+    /**
+       Retrieve the publisher object
+    */
+    Publisher& getPublisher();
+
+    ~Client();
+
+  private:
+    ClientImplPtr clientimpl;
+  };
+
+ 
+};
+
+#endif

+ 49 - 0
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/exceptions.h

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_EXCEPTION_H
+#define HEDWIG_EXCEPTION_H
+
+#include <exception>
+
+namespace Hedwig {
+
+  class ClientException : public std::exception { };
+
+  class ServiceDownException : public ClientException {};
+  class CannotConnectException : public ClientException {};
+  class UnexpectedResponseException : public ClientException {};
+  class OomException : public ClientException {};
+  class UnknownRequestException : public ClientException {};
+  class InvalidRedirectException : public ClientException {};
+
+  class PublisherException : public ClientException { };
+  
+
+  class SubscriberException : public ClientException { };
+  class AlreadySubscribedException : public SubscriberException {};
+  class NotSubscribedException : public SubscriberException {};
+
+  class ConfigurationException : public ClientException { };
+  class InvalidPortException : public ConfigurationException {};
+  class HostResolutionException : public ClientException {};
+  
+  class InvalidStateException : public ClientException {};
+  class ShuttingDownException : public InvalidStateException {};
+};
+
+#endif

+ 59 - 0
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/publish.h

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_PUBLISH_H
+#define HEDWIG_PUBLISH_H
+
+#include <string>
+
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <hedwig/protocol.h>
+#include <boost/noncopyable.hpp>
+
+namespace Hedwig {
+
+  /**
+     Interface for publishing to a hedwig instance.
+  */
+  class Publisher : private boost::noncopyable {
+  public:
+    /**
+       Publish message for topic, and block until we receive a ACK response from the hedwig server.
+       
+       @param topic Topic to publish to.
+       @param message Data to publish for topic.
+    */
+    virtual void publish(const std::string& topic, const std::string& message) = 0;
+    
+    /** 
+	Asynchronously publish message for topic. 
+	
+	@code
+	OperationCallbackPtr callback(new MyCallback());
+	pub.asyncPublish(callback);
+	@endcode
+
+	@param topic Topic to publish to.
+	@param message Data to publish to topic
+	@param callback Callback which will be used to report success or failure. Success is only reported once the server replies with an ACK response to the publication.
+    */
+    virtual void asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback) = 0;
+  };
+};
+
+#endif

+ 50 - 0
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/subscribe.h

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_SUBSCRIBE_H
+#define HEDWIG_SUBSCRIBE_H
+
+#include <string>
+
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <hedwig/protocol.h>
+#include <boost/noncopyable.hpp>
+
+namespace Hedwig {
+
+  /**
+     Interface for subscribing to a hedwig instance. 
+  */
+  class Subscriber : private boost::noncopyable {
+  public:
+    virtual void subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode) = 0;
+    virtual void asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback) = 0;
+    
+    virtual void unsubscribe(const std::string& topic, const std::string& subscriberId) = 0;
+    virtual void asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback) = 0;  
+
+    virtual void consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId) = 0;
+
+    virtual void startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback) = 0;
+    virtual void stopDelivery(const std::string& topic, const std::string& subscriberId) = 0;
+
+    virtual void closeSubscription(const std::string& topic, const std::string& subscriberId) = 0;
+  };
+};
+
+#endif

+ 14 - 0
src/contrib/hedwig/client/src/main/cpp/lib/Makefile.am

@@ -0,0 +1,14 @@
+PROTODEF = ../../../../../protocol/src/main/protobuf/PubSubProtocol.proto
+
+lib_LTLIBRARIES = libhedwig01.la
+libhedwig01_la_SOURCES = protocol.cpp channel.cpp client.cpp util.cpp exceptions.cpp clientimpl.cpp publisherimpl.cpp subscriberimpl.cpp
+libhedwig01_la_CPPFLAGS = -I../inc $(DEPS_CFLAGS)
+libhedwig01_la_LIBADD = $(DEPS_LIBS) 
+libhedwig01_la_LDFLAGS = -no-undefined
+
+protocol.cpp: $(PROTODEF)
+	protoc --cpp_out=. -I`dirname $(PROTODEF)` $(PROTODEF)
+	mv PubSubProtocol.pb.cc protocol.cpp
+	sed -i "s/PubSubProtocol.pb.h/hedwig\/protocol.h/" protocol.cpp
+	mv PubSubProtocol.pb.h ../inc/hedwig/protocol.h
+

+ 436 - 0
src/contrib/hedwig/client/src/main/cpp/lib/channel.cpp

@@ -0,0 +1,436 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netinet/tcp.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <vector>
+#include <utility>
+#include <deque>
+#include "channel.h"
+#include "util.h"
+#include "clientimpl.h"
+
+#include <log4cpp/Category.hh>
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwig."__FILE__);
+
+const int MAX_MESSAGE_SIZE = 2*1024*1024; // 2 Meg
+
+using namespace Hedwig;
+
+namespace Hedwig {
+
+  class RunnableThread {  
+  public:
+    RunnableThread(DuplexChannel& channel, const ChannelHandlerPtr& handler);
+    virtual ~RunnableThread();
+    virtual void entryPoint() = 0;
+    
+    void run();
+    virtual void kill();
+    
+  protected:
+    DuplexChannel& channel;
+    ChannelHandlerPtr handler;
+    pthread_t thread;
+    pthread_attr_t attr;
+  };
+  
+  typedef std::pair<const PubSubRequest*, OperationCallbackPtr> RequestPair;
+
+  class PacketsAvailableCondition : public WaitConditionBase {
+  public:
+    PacketsAvailableCondition(std::deque<RequestPair>& queue) : queue(queue), dead(false) {
+    }
+
+    ~PacketsAvailableCondition() { wait(); }
+
+    bool isTrue() { return dead || !queue.empty(); }
+    void kill() { dead = true; }
+
+  private:
+    std::deque<RequestPair>& queue;
+    bool dead;
+  };
+
+  class WriteThread : public RunnableThread {
+  public: 
+    WriteThread(DuplexChannel& channel, int socketfd, const ChannelHandlerPtr& handler);
+    
+    void entryPoint();
+    void writeRequest(const PubSubRequest& m, const OperationCallbackPtr& callback);
+    virtual void kill();
+
+    ~WriteThread();
+    
+  private:
+    int socketfd;
+
+    PacketsAvailableCondition packetsAvailableWaitCondition;
+    Mutex queueMutex;
+    std::deque<RequestPair> requestQueue;
+    bool dead;
+  };
+  
+  class ReadThread : public RunnableThread {
+  public:
+    ReadThread(DuplexChannel& channel, int socketfd, const ChannelHandlerPtr& handler);
+    
+    void entryPoint();
+    
+    ~ReadThread();
+    
+  private:    
+    int socketfd;
+  };
+}
+
+DuplexChannel::DuplexChannel(const HostAddress& addr, const Configuration& cfg, const ChannelHandlerPtr& handler)
+  : address(addr), handler(handler), writer(NULL), reader(NULL), socketfd(-1), state(UNINITIALISED), txnid2data_lock()
+{
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Creating DuplexChannel(" << this << ")";
+  }
+}
+
+void DuplexChannel::connect() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "DuplexChannel(" << this << ")::connect " << address.getAddressString();
+  }
+
+
+  socketfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+  
+  if (-1 == socketfd) {
+    LOG.errorStream() << "DuplexChannel(" << this << ") Unable to create socket";
+
+    throw CannotCreateSocketException();
+  }
+
+  if (-1 == ::connect(socketfd, (const struct sockaddr *)&(address.socketAddress()), sizeof(struct sockaddr_in))) {
+    LOG.errorStream() << "DuplexChannel(" << this << ") Could not connect socket";
+    close(socketfd);
+
+    throw CannotConnectException();
+  }
+
+
+  int flag = 1;
+  int res = 0;
+  if ((res = setsockopt(socketfd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int))) != 0){
+    close(socketfd);
+    LOG.errorStream() << "Error setting nodelay on (" << this << ") " << res;
+    throw ChannelSetupException();
+  }
+
+  reader = new ReadThread(*this, socketfd, handler);
+  writer = new WriteThread(*this, socketfd, handler);
+
+  reader->run();
+  writer->run();
+
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "DuplexChannel(" << this << ")::connect successful. Notifying handler.";
+  }    
+  state = CONNECTED;
+  handler->channelConnected(this);
+}
+
+const HostAddress& DuplexChannel::getHostAddress() const {
+  return address;
+}
+
+void DuplexChannel::kill() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Killing duplex channel (" << this << ")";
+  }    
+
+  destruction_lock.lock();
+  if (state == CONNECTED) {
+    state = DEAD;
+    
+    destruction_lock.unlock();
+    
+    if (socketfd != -1) {
+      shutdown(socketfd, SHUT_RDWR);
+    }
+    
+    if (writer) {
+      writer->kill();
+      delete writer;
+    }
+    if (reader) {
+      reader->kill();
+      delete reader;
+    }
+    if (socketfd != -1) {
+      close(socketfd);
+    }
+  } else {
+    destruction_lock.unlock();
+  }
+  handler = ChannelHandlerPtr(); // clear the handler in case it ever referenced the channel
+}
+
+DuplexChannel::~DuplexChannel() {
+  /** If we are going away, fail all transactions that haven't been completed */
+  failAllTransactions();
+  kill();
+
+
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Destroying DuplexChannel(" << this << ")";
+  }    
+}
+
+void DuplexChannel::writeRequest(const PubSubRequest& m, const OperationCallbackPtr& callback) {
+  if (state != CONNECTED) {
+    LOG.errorStream() << "Tried to write transaction [" << m.txnid() << "] to a channel [" << this << "] which is " << (state == DEAD ? "DEAD" : "UNINITIALISED");
+    callback->operationFailed(UninitialisedChannelException());
+  }
+			      
+  writer->writeRequest(m, callback);
+}
+
+/**
+   Store the transaction data for a request.
+*/
+void DuplexChannel::storeTransaction(const PubSubDataPtr& data) {
+  txnid2data_lock.lock();
+  txnid2data[data->getTxnId()] = data;
+  txnid2data_lock.unlock();;
+}
+
+/**
+   Give the transaction back to the caller. 
+*/
+PubSubDataPtr DuplexChannel::retrieveTransaction(long txnid) {
+  txnid2data_lock.lock();
+  PubSubDataPtr data = txnid2data[txnid];
+  txnid2data.erase(txnid);
+  txnid2data_lock.unlock();
+  return data;
+}
+
+void DuplexChannel::failAllTransactions() {
+  txnid2data_lock.lock();
+  for (TransactionMap::iterator iter = txnid2data.begin(); iter != txnid2data.end(); ++iter) {
+    PubSubDataPtr& data = (*iter).second;
+    data->getCallback()->operationFailed(ChannelDiedException());
+  }
+  txnid2data.clear();
+  txnid2data_lock.unlock();
+}
+
+/** 
+Entry point for pthread initialisation
+*/
+void* ThreadEntryPoint(void *obj) {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Thread entered (" << obj << ")";
+  }
+
+  RunnableThread* thread = (RunnableThread*) obj;
+  thread->entryPoint();
+}
+ 
+RunnableThread::RunnableThread(DuplexChannel& channel, const ChannelHandlerPtr& handler) 
+  : channel(channel), handler(handler)
+{
+  //  pthread_cond_init(&deathlock, NULL);
+}
+
+void RunnableThread::run() {
+  int ret;
+
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Running thread (" << this << ")";
+  }    
+  
+  pthread_attr_init(&attr);
+  ret = pthread_create(&thread, &attr, ThreadEntryPoint, this);
+  if (ret != 0) {
+    LOG.errorStream() << "Error creating thread (" << this << "). Notifying handler.";
+    handler->exceptionOccurred(&channel, ChannelThreadException());
+  }
+}
+
+void RunnableThread::kill() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Killing thread (" << this << ")";
+  }    
+
+  pthread_cancel(thread);
+  pthread_join(thread, NULL);
+
+  pthread_attr_destroy(&attr);
+}
+
+RunnableThread::~RunnableThread() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Deleting thread (" << this << ")";
+  }    
+}
+/**
+Writer thread
+*/
+WriteThread::WriteThread(DuplexChannel& channel, int socketfd, const ChannelHandlerPtr& handler) 
+  : RunnableThread(channel, handler), socketfd(socketfd), packetsAvailableWaitCondition(requestQueue), queueMutex(), dead(false) {
+  
+}
+
+// should probably be using a queue here.
+void WriteThread::writeRequest(const PubSubRequest& m, const OperationCallbackPtr& callback) {
+  #warning "you should validate these inputs"
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Adding message to queue " << &m;
+  }
+  packetsAvailableWaitCondition.lock();
+  queueMutex.lock();
+  requestQueue.push_back(RequestPair(&m, callback));
+  queueMutex.unlock();;
+
+  packetsAvailableWaitCondition.signalAndUnlock();
+}
+  
+void WriteThread::entryPoint() {
+  while (true) {
+    packetsAvailableWaitCondition.wait();
+
+    if (dead) {
+      if (LOG.isDebugEnabled()) {
+	LOG.debugStream() << "returning from thread " << this;
+      }
+      return;
+    }
+    while (!requestQueue.empty()) { 
+      queueMutex.lock();;
+      RequestPair currentRequest = requestQueue.front();;
+      requestQueue.pop_front();
+      queueMutex.unlock();
+      if (LOG.isDebugEnabled()) {
+	LOG.debugStream() << "Writing message to socket " << currentRequest.first;
+      }
+      
+      uint32_t size = htonl(currentRequest.first->ByteSize());
+      write(socketfd, &size, sizeof(size));
+      
+      bool res = currentRequest.first->SerializeToFileDescriptor(socketfd);
+      
+      if (!res || errno != 0) {
+	LOG.errorStream() << "Error writing to socket (" << this << ") errno(" << errno << ") res(" << res << "). Disconnected.";
+	ChannelWriteException e;
+	
+	currentRequest.second->operationFailed(e);
+	channel.kill(); // make sure it's dead
+	handler->channelDisconnected(&channel, e);
+	
+	return;
+      } else {
+	currentRequest.second->operationComplete();
+      }
+    }  
+  }
+}
+
+void WriteThread::kill() {
+  dead = true;
+  packetsAvailableWaitCondition.lock();
+  packetsAvailableWaitCondition.kill();
+  packetsAvailableWaitCondition.signalAndUnlock();
+  
+  RunnableThread::kill();
+}
+
+WriteThread::~WriteThread() {
+  queueMutex.unlock();
+}
+
+/**
+Reader Thread
+*/
+
+ReadThread::ReadThread(DuplexChannel& channel, int socketfd, const ChannelHandlerPtr& handler) 
+  : RunnableThread(channel, handler), socketfd(socketfd) {
+}
+  
+void ReadThread::entryPoint() {
+  PubSubResponse* response = new PubSubResponse();
+  uint8_t* dataarray = NULL;//(uint8_t*)malloc(MAX_MESSAGE_SIZE); // shouldn't be allocating every time. check that there's a max size
+  int currentbufsize = 0;
+  
+  while (true) {
+    uint32_t size = 0;
+    int bytesread = 0;
+
+    bytesread = read(socketfd, &size, sizeof(size));
+    size = ntohl(size);
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Start reading packet of size: " << size;
+    }
+    if (bytesread < 1 || size > MAX_MESSAGE_SIZE) {
+      LOG.errorStream() << "Zero read from socket or unreasonable size read, size(" << size << ") errno(" << errno << ") " << strerror(errno);
+      channel.kill(); // make sure it's dead
+      handler->channelDisconnected(&channel, ChannelReadException());
+      break;
+    }
+
+    if (currentbufsize < size) {
+      dataarray = (uint8_t*)realloc(dataarray, size);
+    }
+    if (dataarray == NULL) {
+      LOG.errorStream() << "Error allocating input buffer of size " << size << " errno(" << errno << ") " << strerror(errno);
+      channel.kill(); // make sure it's dead
+      handler->channelDisconnected(&channel, ChannelReadException());
+      
+      break;
+    }
+    
+    memset(dataarray, 0, size);
+    bytesread = read(socketfd, dataarray, size);
+    bool res = response->ParseFromArray(dataarray, size);
+
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Packet read ";
+    }
+    
+    if (!res && errno != 0 || bytesread < size) {
+      LOG.errorStream() << "Error reading from socket (" << this << ") errno(" << errno << ") res(" << res << "). Disconnected.";
+      channel.kill(); // make sure it's dead
+      handler->channelDisconnected(&channel, ChannelReadException());
+
+      break;
+    } else {
+      handler->messageReceived(&channel, *response);
+    }
+  }
+  free(dataarray);
+  delete response;
+}
+
+ReadThread::~ReadThread() {
+}

+ 105 - 0
src/contrib/hedwig/client/src/main/cpp/lib/channel.h

@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HEDWIG_CHANNEL_H
+#define HEDWIG_CHANNEL_H
+
+#include <hedwig/protocol.h>
+#include <hedwig/callback.h>
+#include <hedwig/client.h>
+#include "util.h"
+#include "data.h"
+#include <tr1/memory>
+#include <tr1/unordered_map>
+
+namespace Hedwig {
+  class ChannelException : public std::exception { };
+  class UninitialisedChannelException : public ChannelException {};
+
+  class ChannelConnectException : public ChannelException {};
+  class CannotCreateSocketException : public ChannelConnectException {};
+  class ChannelSetupException : public ChannelConnectException {};
+
+  class ChannelDiedException : public ChannelException {};
+
+  class ChannelWriteException : public ChannelException {};
+  class ChannelReadException : public ChannelException {};
+  class ChannelThreadException : public ChannelException {};
+
+
+  class ChannelHandler {
+  public:
+    virtual void messageReceived(DuplexChannel* channel, const PubSubResponse& m) = 0;
+    virtual void channelConnected(DuplexChannel* channel) = 0;
+
+    virtual void channelDisconnected(DuplexChannel* channel, const std::exception& e) = 0;
+    virtual void exceptionOccurred(DuplexChannel* channel, const std::exception& e) = 0;
+
+    virtual ~ChannelHandler() {}
+  };
+  typedef std::tr1::shared_ptr<ChannelHandler> ChannelHandlerPtr;
+
+  class WriteThread;
+  class ReadThread;
+
+  class DuplexChannel {
+  public:
+    DuplexChannel(const HostAddress& addr, const Configuration& cfg, const ChannelHandlerPtr& handler);
+
+    void connect();
+
+    void writeRequest(const PubSubRequest& m, const OperationCallbackPtr& callback);
+    
+    const HostAddress& getHostAddress() const;
+
+    void storeTransaction(const PubSubDataPtr& data);
+    PubSubDataPtr retrieveTransaction(long txnid);
+    void failAllTransactions();
+    
+    virtual void kill();
+
+    ~DuplexChannel();
+  private:
+    HostAddress address;
+    ChannelHandlerPtr handler;
+    int socketfd;
+    WriteThread *writer;
+    ReadThread *reader;
+    
+    enum State { UNINITIALISED, CONNECTED, DEAD };
+    State state;
+    
+    typedef std::tr1::unordered_map<long, PubSubDataPtr> TransactionMap;
+    TransactionMap txnid2data;
+    Mutex txnid2data_lock;
+    Mutex destruction_lock;
+  };
+  
+  typedef std::tr1::shared_ptr<DuplexChannel> DuplexChannelPtr;
+};
+
+namespace std 
+{
+  namespace tr1 
+  {
+  // defined in util.cpp
+  template <> struct hash<Hedwig::DuplexChannelPtr> : public unary_function<Hedwig::DuplexChannelPtr, size_t> {
+    size_t operator()(const Hedwig::DuplexChannelPtr& channel) const;
+  };
+  }
+};
+#endif

+ 47 - 0
src/contrib/hedwig/client/src/main/cpp/lib/client.cpp

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hedwig/client.h>
+#include <memory>
+
+#include "clientimpl.h"
+
+using namespace Hedwig;
+
+const std::string DEFAULT_SERVER = "localhost:4080";
+const std::string& Configuration::getDefaultServer() const {
+  return DEFAULT_SERVER;
+}
+
+Client::Client(const Configuration& conf) {
+  clientimpl = ClientImpl::Create( conf );
+}
+
+Subscriber& Client::getSubscriber() {
+  return clientimpl->getSubscriber();
+}
+
+Publisher& Client::getPublisher() {
+  return clientimpl->getPublisher();
+}
+
+Client::~Client() {
+  clientimpl->Destroy();
+}
+
+

+ 505 - 0
src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.cpp

@@ -0,0 +1,505 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "clientimpl.h"
+#include "channel.h"
+#include "publisherimpl.h"
+#include "subscriberimpl.h"
+#include <log4cpp/Category.hh>
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwig."__FILE__);
+
+using namespace Hedwig;
+
+
+void SyncOperationCallback::operationComplete() {
+  lock();
+  response = SUCCESS;
+  signalAndUnlock();
+}
+
+void SyncOperationCallback::operationFailed(const std::exception& exception) {
+  lock();
+  if (typeid(exception) == typeid(ChannelConnectException)) {
+    response = NOCONNECT;
+  } else if (typeid(exception) == typeid(ServiceDownException)) {
+    response = SERVICEDOWN;
+  } else if (typeid(exception) == typeid(AlreadySubscribedException)) {
+    response = ALREADY_SUBSCRIBED;
+  } else if (typeid(exception) == typeid(NotSubscribedException)) {
+    response = NOT_SUBSCRIBED;
+  } else {
+    response = UNKNOWN;
+  }
+  signalAndUnlock();
+}
+    
+bool SyncOperationCallback::isTrue() {
+  return response != PENDING;
+}
+
+void SyncOperationCallback::throwExceptionIfNeeded() {
+  switch (response) {
+  case SUCCESS:
+    break;
+  case NOCONNECT:
+    throw CannotConnectException();
+    break;
+  case SERVICEDOWN:
+    throw ServiceDownException();
+    break;
+  case ALREADY_SUBSCRIBED:
+    throw AlreadySubscribedException();
+    break;
+  case NOT_SUBSCRIBED:
+    throw NotSubscribedException();
+    break;
+  default:
+    throw ClientException();
+    break;
+  }
+}
+
+HedwigClientChannelHandler::HedwigClientChannelHandler(ClientImplPtr& client) 
+  : client(client){
+}
+
+void HedwigClientChannelHandler::messageReceived(DuplexChannel* channel, const PubSubResponse& m) {
+  LOG.debugStream() << "Message received";
+  if (m.has_message()) {
+    LOG.errorStream() << "Subscription response, ignore for now";
+    return;
+  }
+  
+  long txnid = m.txnid();
+  PubSubDataPtr data = channel->retrieveTransaction(m.txnid()); 
+  /* you now have ownership of data, don't leave this funciton without deleting it or 
+     palming it off to someone else */
+
+  if (data == NULL) {
+    LOG.errorStream() << "Transaction " << m.txnid() << " doesn't exist in channel " << channel;
+    return;
+  }
+
+  if (m.statuscode() == NOT_RESPONSIBLE_FOR_TOPIC) {
+    client->redirectRequest(channel, data, m);
+    return;
+  }
+
+  switch (data->getType()) {
+  case PUBLISH:
+    client->getPublisherImpl().messageHandler(m, data);
+    break;
+  case SUBSCRIBE:
+  case UNSUBSCRIBE:
+    client->getSubscriberImpl().messageHandler(m, data);
+    break;
+  default:
+    LOG.errorStream() << "Unimplemented request type " << data->getType();
+    break;
+  }
+}
+
+
+void HedwigClientChannelHandler::channelConnected(DuplexChannel* channel) {
+  // do nothing 
+}
+
+void HedwigClientChannelHandler::channelDisconnected(DuplexChannel* channel, const std::exception& e) {
+  LOG.errorStream() << "Channel disconnected";
+
+  client->channelDied(channel);
+}
+
+void HedwigClientChannelHandler::exceptionOccurred(DuplexChannel* channel, const std::exception& e) {
+  LOG.errorStream() << "Exception occurred" << e.what();
+}
+
+ClientTxnCounter::ClientTxnCounter() : counter(0) 
+{
+}
+
+ClientTxnCounter::~ClientTxnCounter() {
+}
+
+/**
+Increment the transaction counter and return the new value.
+
+@returns the next transaction id
+*/
+long ClientTxnCounter::next() {  // would be nice to remove lock from here, look more into it
+  mutex.lock();
+  long next= ++counter; 
+  mutex.unlock();
+  return next;
+}
+
+
+
+PubSubDataPtr PubSubData::forPublishRequest(long txnid, const std::string& topic, const std::string& body, const OperationCallbackPtr& callback) {
+  PubSubDataPtr ptr(new PubSubData());
+  ptr->type = PUBLISH;
+  ptr->txnid = txnid;
+  ptr->topic = topic;
+  ptr->body = body;
+  ptr->callback = callback;
+  return ptr;
+}
+
+PubSubDataPtr PubSubData::forSubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback, SubscribeRequest::CreateOrAttach mode) {
+  PubSubDataPtr ptr(new PubSubData());
+  ptr->type = SUBSCRIBE;
+  ptr->txnid = txnid;
+  ptr->subscriberid = subscriberid;
+  ptr->topic = topic;
+  ptr->callback = callback;
+  ptr->mode = mode;
+  return ptr;  
+}
+
+PubSubDataPtr PubSubData::forUnsubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback) {
+  PubSubDataPtr ptr(new PubSubData());
+  ptr->type = UNSUBSCRIBE;
+  ptr->txnid = txnid;
+  ptr->subscriberid = subscriberid;
+  ptr->topic = topic;
+  ptr->callback = callback;
+  return ptr;  
+}
+
+PubSubDataPtr PubSubData::forConsumeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const MessageSeqId msgid) {
+  PubSubDataPtr ptr(new PubSubData());
+  ptr->type = CONSUME;
+  ptr->txnid = txnid;
+  ptr->subscriberid = subscriberid;
+  ptr->topic = topic;
+  ptr->msgid = msgid;
+  return ptr;  
+}
+
+PubSubData::PubSubData() : request(NULL) {  
+}
+
+PubSubData::~PubSubData() {
+  if (request != NULL) {
+    delete request;
+  }
+}
+
+OperationType PubSubData::getType() const {
+  return type;
+}
+
+long PubSubData::getTxnId() const {
+  return txnid;
+}
+
+const std::string& PubSubData::getTopic() const {
+  return topic;
+}
+
+const std::string& PubSubData::getBody() const {
+  return body;
+}
+
+const PubSubRequest& PubSubData::getRequest() {
+  if (request != NULL) {
+    delete request;
+    request = NULL;
+  }
+  request = new Hedwig::PubSubRequest();
+  request->set_protocolversion(Hedwig::VERSION_ONE);
+  request->set_type(type);
+  request->set_txnid(txnid);
+  request->set_shouldclaim(shouldClaim);
+  request->set_topic(topic);
+    
+  if (type == PUBLISH) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Creating publish request";
+    }
+    Hedwig::PublishRequest* pubreq = request->mutable_publishrequest();
+    Hedwig::Message* msg = pubreq->mutable_msg();
+    msg->set_body(body);
+  } else if (type == SUBSCRIBE) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Creating subscribe request";
+    }
+
+    Hedwig::SubscribeRequest* subreq = request->mutable_subscriberequest();
+    subreq->set_subscriberid(subscriberid);
+    subreq->set_createorattach(mode);
+  } else if (type == CONSUME) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Creating consume request";
+    }
+
+    Hedwig::ConsumeRequest* conreq = request->mutable_consumerequest();
+    conreq->set_subscriberid(subscriberid);
+    conreq->mutable_msgid()->CopyFrom(msgid);
+  } else if (type == UNSUBSCRIBE) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Creating unsubscribe request";
+    }
+    
+    Hedwig::UnsubscribeRequest* unsubreq = request->mutable_unsubscriberequest();
+    unsubreq->set_subscriberid(subscriberid);    
+  } else {
+    LOG.errorStream() << "Tried to create a request message for the wrong type [" << type << "]";
+    throw UnknownRequestException();
+  }
+
+
+
+  return *request;
+}
+
+void PubSubData::setShouldClaim(bool shouldClaim) {
+  shouldClaim = shouldClaim;
+}
+
+void PubSubData::addTriedServer(HostAddress& h) {
+  triedservers.insert(h);
+}
+
+bool PubSubData::hasTriedServer(HostAddress& h) {
+  return triedservers.count(h) > 0;
+}
+
+void PubSubData::clearTriedServers() {
+  triedservers.clear();
+}
+
+OperationCallbackPtr& PubSubData::getCallback() {
+  return callback;
+}
+
+void PubSubData::setCallback(const OperationCallbackPtr& callback) {
+  this->callback = callback;
+}
+
+const std::string& PubSubData::getSubscriberId() const {
+  return subscriberid;
+}
+
+SubscribeRequest::CreateOrAttach PubSubData::getMode() const {
+  return mode;
+}
+
+ClientImplPtr& ClientImpl::Create(const Configuration& conf) {
+  ClientImpl* impl = new ClientImpl(conf);
+    if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Creating Clientimpl " << impl;
+  }
+
+  return impl->selfptr;
+}
+
+void ClientImpl::Destroy() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "destroying Clientimpl " << this;
+  }
+  allchannels_lock.lock();
+
+  shuttingDownFlag = true;
+  for (ChannelMap::iterator iter = allchannels.begin(); iter != allchannels.end(); ++iter ) {
+    (*iter).second->kill();
+  }  
+  allchannels.clear();
+  allchannels_lock.unlock();
+  /* destruction of the maps will clean up any items they hold */
+  
+  if (subscriber != NULL) {
+    delete subscriber;
+    subscriber = NULL;
+  }
+  if (publisher != NULL) {
+    delete publisher;
+    publisher = NULL;
+  }
+
+  selfptr = ClientImplPtr(); // clear the self pointer
+}
+
+ClientImpl::ClientImpl(const Configuration& conf) 
+  : selfptr(this), conf(conf), subscriber(NULL), publisher(NULL), counterobj(), shuttingDownFlag(false)
+{
+}
+
+Subscriber& ClientImpl::getSubscriber() {
+  return getSubscriberImpl();
+}
+
+Publisher& ClientImpl::getPublisher() {
+  return getPublisherImpl();
+}
+    
+SubscriberImpl& ClientImpl::getSubscriberImpl() {
+  if (subscriber == NULL) {
+    subscribercreate_lock.lock();
+    if (subscriber == NULL) {
+      subscriber = new SubscriberImpl(selfptr);
+    }
+    subscribercreate_lock.unlock();
+  }
+  return *subscriber;
+}
+
+PublisherImpl& ClientImpl::getPublisherImpl() {
+  if (publisher == NULL) { 
+    publishercreate_lock.lock();
+    if (publisher == NULL) {
+      publisher = new PublisherImpl(selfptr);
+    }
+    publishercreate_lock.unlock();
+  }
+  return *publisher;
+}
+
+ClientTxnCounter& ClientImpl::counter() {
+  return counterobj;
+}
+
+void ClientImpl::redirectRequest(DuplexChannel* channel, PubSubDataPtr& data, const PubSubResponse& response) {
+  HostAddress oldhost = channel->getHostAddress();
+  data->addTriedServer(oldhost);
+  
+  HostAddress h = HostAddress::fromString(response.statusmsg());
+  if (data->hasTriedServer(h)) {
+    LOG.errorStream() << "We've been told to try request [" << data->getTxnId() << "] with [" << h.getAddressString()<< "] by " << channel->getHostAddress().getAddressString() << " but we've already tried that. Failing operation";
+    data->getCallback()->operationFailed(InvalidRedirectException());
+    return;
+  }
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "We've been told  [" << data->getTopic() << "] is on [" << h.getAddressString() << "] by [" << oldhost.getAddressString() << "]. Redirecting request " << data->getTxnId();
+  }
+  data->setShouldClaim(true);
+
+  setHostForTopic(data->getTopic(), h);
+  DuplexChannelPtr newchannel;
+  try {
+    if (data->getType() == SUBSCRIBE) {
+      SubscriberClientChannelHandlerPtr handler(new SubscriberClientChannelHandler(selfptr, this->getSubscriberImpl(), data));
+      ChannelHandlerPtr basehandler = handler;
+      
+      newchannel = createChannelForTopic(data->getTopic(), basehandler);
+      handler->setChannel(newchannel);
+      
+      getSubscriberImpl().doSubscribe(newchannel, data, handler);
+    } else {
+      newchannel = getChannelForTopic(data->getTopic());
+      
+      if (data->getType() == PUBLISH) {
+	getPublisherImpl().doPublish(newchannel, data);
+      } else {
+	getSubscriberImpl().doUnsubscribe(newchannel, data);
+      }
+    }
+  } catch (ShuttingDownException& e) {
+    return; // no point in redirecting if we're shutting down
+  }
+}
+
+ClientImpl::~ClientImpl() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "deleting Clientimpl " << this;
+  }
+}
+
+DuplexChannelPtr ClientImpl::createChannelForTopic(const std::string& topic, ChannelHandlerPtr& handler) {
+  // get the host address
+  // create a channel to the host
+  HostAddress addr = topic2host[topic];
+  if (addr.isNullHost()) {
+    addr = HostAddress::fromString(conf.getDefaultServer());
+  }
+
+  DuplexChannelPtr channel(new DuplexChannel(addr, conf, handler));
+  channel->connect();
+
+  allchannels_lock.lock();
+  if (shuttingDownFlag) {
+    channel->kill();
+    allchannels_lock.unlock();
+    throw ShuttingDownException();
+  }
+  allchannels[channel.get()] = channel;
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "(create) All channels size: " << allchannels.size();
+  }
+  allchannels_lock.unlock();
+
+  return channel;
+}
+
+DuplexChannelPtr ClientImpl::getChannelForTopic(const std::string& topic) {
+  HostAddress addr = topic2host[topic];
+  DuplexChannelPtr channel = host2channel[addr];
+
+  if (channel.get() == 0 || addr.isNullHost()) {
+    ChannelHandlerPtr handler(new HedwigClientChannelHandler(selfptr));
+    channel = createChannelForTopic(topic, handler);
+    host2channel_lock.lock();
+    host2channel[addr] = channel;
+    host2channel_lock.unlock();
+    return channel;
+  }
+
+  return channel;
+}
+
+void ClientImpl::setHostForTopic(const std::string& topic, const HostAddress& host) {
+  topic2host_lock.lock();
+  topic2host[topic] = host;
+  topic2host_lock.unlock();
+}
+
+bool ClientImpl::shuttingDown() const {
+  return shuttingDownFlag;
+}
+
+/**
+   A channel has just died. Remove it so we never give it to any other publisher or subscriber.
+   
+   This does not delete the channel. Some publishers or subscribers will still hold it and will be errored
+   when they try to do anything with it. 
+*/
+void ClientImpl::channelDied(DuplexChannel* channel) {
+  if (shuttingDownFlag) {
+    return;
+  }
+
+  host2topics_lock.lock();
+  host2channel_lock.lock();
+  topic2host_lock.lock();
+  allchannels_lock.lock();
+  // get host
+  HostAddress addr = channel->getHostAddress();
+  
+  for (Host2TopicsMap::iterator iter = host2topics.find(addr); iter != host2topics.end(); ++iter) {
+    topic2host.erase((*iter).second);
+  }
+  host2topics.erase(addr);
+  host2channel.erase(addr);
+
+  allchannels.erase(channel); // channel should be deleted here
+
+  allchannels_lock.unlock();
+  host2topics_lock.unlock();
+  host2channel_lock.unlock();
+  topic2host_lock.unlock();
+}

+ 131 - 0
src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.h

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HEDWIG_CLIENT_IMPL_H
+#define HEDWIG_CLIENT_IMPL_H
+
+#include <hedwig/client.h>
+#include <hedwig/protocol.h>
+
+#include <tr1/unordered_map>
+#include <list>
+#include "util.h"
+#include <pthread.h>
+#include "channel.h"
+#include "data.h"
+
+namespace Hedwig {
+  class SyncOperationCallback : public OperationCallback, public WaitConditionBase {
+  public:
+    SyncOperationCallback() : response(PENDING) {}
+    virtual void operationComplete();
+    virtual void operationFailed(const std::exception& exception);
+    
+    virtual bool isTrue();
+
+    void throwExceptionIfNeeded();
+    
+  private:
+    enum { 
+      PENDING, 
+      SUCCESS,
+      NOCONNECT,
+      SERVICEDOWN,
+      NOT_SUBSCRIBED,
+      ALREADY_SUBSCRIBED,
+      UNKNOWN
+    } response;
+  };
+
+  class HedwigClientChannelHandler : public ChannelHandler {
+  public:
+    HedwigClientChannelHandler(ClientImplPtr& client);
+    
+    virtual void messageReceived(DuplexChannel* channel, const PubSubResponse& m);
+    virtual void channelConnected(DuplexChannel* channel);
+    virtual void channelDisconnected(DuplexChannel* channel, const std::exception& e);
+    virtual void exceptionOccurred(DuplexChannel* channel, const std::exception& e);
+    
+  protected:
+    ClientImplPtr client;
+  };
+  
+  class PublisherImpl;
+  class SubscriberImpl;
+  
+  /**
+     Implementation of the hedwig client. This class takes care of globals such as the topic->host map and the transaction id counter.
+  */
+  class ClientImpl {
+  public:
+    static ClientImplPtr& Create(const Configuration& conf);
+    void Destroy();
+
+    Subscriber& getSubscriber();
+    Publisher& getPublisher();
+
+    ClientTxnCounter& counter();
+
+    void redirectRequest(DuplexChannel* channel, PubSubDataPtr& data, const PubSubResponse& response);
+
+    const HostAddress& getHostForTopic(const std::string& topic);
+
+    DuplexChannelPtr createChannelForTopic(const std::string& topic, ChannelHandlerPtr& handler);
+    DuplexChannelPtr getChannelForTopic(const std::string& topic);
+    
+    void setHostForTopic(const std::string& topic, const HostAddress& host);
+
+    void setChannelForHost(const HostAddress& address, DuplexChannel* channel);
+    void channelDied(DuplexChannel* channel);
+    bool shuttingDown() const;
+    
+    SubscriberImpl& getSubscriberImpl();
+    PublisherImpl& getPublisherImpl();
+
+    ~ClientImpl();
+  private:
+    ClientImpl(const Configuration& conf);
+
+    ClientImplPtr selfptr;
+
+    const Configuration& conf;
+    PublisherImpl* publisher;
+    SubscriberImpl* subscriber;
+    ClientTxnCounter counterobj;
+
+
+    typedef std::tr1::unordered_multimap<HostAddress, std::string> Host2TopicsMap;
+    Host2TopicsMap host2topics;
+    Mutex host2topics_lock;
+
+    std::tr1::unordered_map<HostAddress, DuplexChannelPtr> host2channel;
+    Mutex host2channel_lock;
+    std::tr1::unordered_map<std::string, HostAddress> topic2host;
+    Mutex topic2host_lock;
+
+    Mutex publishercreate_lock;
+    Mutex subscribercreate_lock;
+
+    typedef std::tr1::unordered_map<DuplexChannel*, DuplexChannelPtr> ChannelMap;
+    ChannelMap allchannels;
+    Mutex allchannels_lock;
+
+    bool shuttingDownFlag;
+  };
+};
+#endif

+ 95 - 0
src/contrib/hedwig/client/src/main/cpp/lib/data.h

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_H
+#define DATA_H
+
+#include <hedwig/protocol.h>
+#include <hedwig/callback.h>
+
+#include <pthread.h>
+#include <tr1/unordered_set>
+#include "util.h"
+
+namespace Hedwig {
+  /**
+     Simple counter for transaction ids from the client
+  */
+  class ClientTxnCounter {
+  public:
+    ClientTxnCounter();
+    ~ClientTxnCounter();
+    long next();
+    
+  private:
+    long counter;
+    Mutex mutex;
+  };
+
+  class PubSubData;
+  typedef std::tr1::shared_ptr<PubSubData> PubSubDataPtr;
+
+  /**
+     Data structure to hold information about requests and build request messages.
+     Used to store requests which may need to be resent to another server. 
+   */
+  class PubSubData {
+  public:
+    // to be used for publish
+    static PubSubDataPtr forPublishRequest(long txnid, const std::string& topic, const std::string& body, const OperationCallbackPtr& callback);
+    static PubSubDataPtr forSubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback, SubscribeRequest::CreateOrAttach mode);
+    static PubSubDataPtr forUnsubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback);
+    static PubSubDataPtr forConsumeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const MessageSeqId msgid);
+
+    ~PubSubData();
+
+    OperationType getType() const;
+    long getTxnId() const;
+    const std::string& getSubscriberId() const;
+    const std::string& getTopic() const;
+    const std::string& getBody() const;
+
+    void setShouldClaim(bool shouldClaim);
+
+    const PubSubRequest& getRequest();
+    void setCallback(const OperationCallbackPtr& callback);
+    OperationCallbackPtr& getCallback();
+    SubscribeRequest::CreateOrAttach getMode() const;
+
+    void addTriedServer(HostAddress& h);
+    bool hasTriedServer(HostAddress& h);
+    void clearTriedServers();
+  private:
+    PubSubData();
+    PubSubRequest* request;
+    
+    OperationType type;
+    long txnid;
+    std::string subscriberid;
+    std::string topic;
+    std::string body;
+    bool shouldClaim;
+    OperationCallbackPtr callback;
+    SubscribeRequest::CreateOrAttach mode;
+    MessageSeqId msgid;
+    std::tr1::unordered_set<HostAddress> triedservers;
+  };
+  
+
+};
+#endif

+ 27 - 0
src/contrib/hedwig/client/src/main/cpp/lib/exceptions.cpp

@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hedwig/exceptions.h>
+#include <stdlib.h>
+#include <string.h>
+
+using namespace Hedwig;
+
+
+
+  

+ 87 - 0
src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.cpp

@@ -0,0 +1,87 @@
+ /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "publisherimpl.h"
+#include "channel.h"
+
+#include <log4cpp/Category.hh>
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwig."__FILE__);
+
+using namespace Hedwig;
+
+PublishWriteCallback::PublishWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
+
+void PublishWriteCallback::operationComplete() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Successfully wrote transaction: " << data->getTxnId();
+  }
+}
+
+void PublishWriteCallback::operationFailed(const std::exception& exception) {
+  LOG.errorStream() << "Error writing to publisher " << exception.what();
+  
+  //remove txn from channel pending list
+  #warning "Actually do something here"
+}
+
+PublisherImpl::PublisherImpl(ClientImplPtr& client) 
+  : client(client) {
+}
+
+void PublisherImpl::publish(const std::string& topic, const std::string& message) {
+  SyncOperationCallback* cb = new SyncOperationCallback();
+  OperationCallbackPtr callback(cb);
+  asyncPublish(topic, message, callback);
+  cb->wait();
+  
+  cb->throwExceptionIfNeeded();  
+}
+
+void PublisherImpl::asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback) {
+  DuplexChannelPtr channel = client->getChannelForTopic(topic);
+
+  // use release after callback to release the channel after the callback is called
+  PubSubDataPtr data = PubSubData::forPublishRequest(client->counter().next(), topic, message, callback);
+  
+  doPublish(channel, data);
+}
+
+void PublisherImpl::doPublish(const DuplexChannelPtr& channel, const PubSubDataPtr& data) {
+  channel->storeTransaction(data);
+  
+  OperationCallbackPtr writecb(new PublishWriteCallback(client, data));
+  LOG.debugStream() << "dopublish";
+  channel->writeRequest(data->getRequest(), writecb);
+}
+
+void PublisherImpl::messageHandler(const PubSubResponse& m, const PubSubDataPtr& txn) {
+  switch (m.statuscode()) {
+  case SUCCESS:
+    txn->getCallback()->operationComplete();
+    break;
+  case SERVICE_DOWN:
+    LOG.errorStream() << "Server responsed with SERVICE_DOWN for " << txn->getTxnId();
+    txn->getCallback()->operationFailed(ServiceDownException());
+    break;
+  default:
+    LOG.errorStream() << "Unexpected response " << m.statuscode() << " for " << txn->getTxnId();
+    txn->getCallback()->operationFailed(UnexpectedResponseException());
+    break;
+  }
+}

+ 54 - 0
src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.h

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef PUBLISHER_IMPL_H
+#define PUBLISHER_IMPL_H
+
+#include <hedwig/publish.h>
+#include <hedwig/callback.h>
+#include "clientimpl.h"
+
+namespace Hedwig {
+  class PublishWriteCallback : public OperationCallback {
+  public:
+    PublishWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data);
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    ClientImplPtr client;
+    PubSubDataPtr data;
+  };
+
+  class PublisherImpl : public Publisher {
+  public:
+    PublisherImpl(ClientImplPtr& client);
+
+    void publish(const std::string& topic, const std::string& message);
+    void asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback);
+    
+    void messageHandler(const PubSubResponse& m, const PubSubDataPtr& txn);
+
+    void doPublish(const DuplexChannelPtr& channel, const PubSubDataPtr& data);
+
+  private:
+    ClientImplPtr client;
+  };
+
+};
+
+#endif

+ 387 - 0
src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.cpp

@@ -0,0 +1,387 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "subscriberimpl.h"
+#include "util.h"
+#include "channel.h"
+
+#include <log4cpp/Category.hh>
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwig."__FILE__);
+const int SUBSCRIBER_RECONNECT_TIME = 3000; // 3 seconds
+using namespace Hedwig;
+
+SubscriberWriteCallback::SubscriberWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
+
+void SubscriberWriteCallback::operationComplete() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Successfully wrote subscribe transaction: " << data->getTxnId();
+  }
+}
+
+void SubscriberWriteCallback::operationFailed(const std::exception& exception) {
+  LOG.errorStream() << "Error writing to publisher " << exception.what();
+  
+  //remove txn from channel pending list
+  #warning "Actually do something here"
+}
+
+UnsubscribeWriteCallback::UnsubscribeWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
+
+void UnsubscribeWriteCallback::operationComplete() {
+  
+}
+
+void UnsubscribeWriteCallback::operationFailed(const std::exception& exception) {
+  #warning "Actually do something here"
+}
+  
+ConsumeWriteCallback::ConsumeWriteCallback(const PubSubDataPtr& data) 
+  : data(data) {
+}
+
+ConsumeWriteCallback::~ConsumeWriteCallback() {
+}
+
+void ConsumeWriteCallback::operationComplete() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Successfully wrote consume transaction: " << data->getTxnId();
+  }
+}
+
+void ConsumeWriteCallback::operationFailed(const std::exception& exception) {
+  LOG.errorStream() << "Error writing consume transaction: " << data->getTxnId() << " error: " << exception.what();
+}
+
+SubscriberConsumeCallback::SubscriberConsumeCallback(ClientImplPtr& client, const std::string& topic, const std::string& subscriberid, const MessageSeqId& msgid) 
+  : client(client), topic(topic), subscriberid(subscriberid), msgid(msgid)
+{
+}
+
+void SubscriberConsumeCallback::operationComplete() {
+  LOG.errorStream() << "ConsumeCallback::operationComplete";
+  client->getSubscriber().consume(topic, subscriberid, msgid);
+}
+
+void SubscriberConsumeCallback::operationFailed(const std::exception& exception) {
+  LOG.errorStream() << "ConsumeCallback::operationFailed";
+}
+
+SubscriberReconnectCallback::SubscriberReconnectCallback(ClientImplPtr& client, const PubSubDataPtr& origData) 
+  : client(client), origData(origData) {
+}
+
+void SubscriberReconnectCallback::operationComplete() {
+}
+
+void SubscriberReconnectCallback::operationFailed(const std::exception& exception) {
+  
+}
+
+SubscriberClientChannelHandler::SubscriberClientChannelHandler(ClientImplPtr& client, SubscriberImpl& subscriber, const PubSubDataPtr& data)
+  : HedwigClientChannelHandler(client), subscriber(subscriber), origData(data), closed(false)  {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Creating SubscriberClientChannelHandler " << this;
+  }
+}
+
+SubscriberClientChannelHandler::~SubscriberClientChannelHandler() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Cleaning up SubscriberClientChannelHandler " << this;
+  }
+}
+
+void SubscriberClientChannelHandler::messageReceived(DuplexChannel* channel, const PubSubResponse& m) {
+  if (m.has_message()) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "Message received (topic:" << origData->getTopic() << ", subscriberId:" << origData->getSubscriberId() << ")";
+    }
+
+    if (this->handler.get()) {
+      OperationCallbackPtr callback(new SubscriberConsumeCallback(client, origData->getTopic(), origData->getSubscriberId(), m.message().msgid()));
+      this->handler->consume(origData->getTopic(), origData->getSubscriberId(), m.message(), callback);
+    } else {
+      LOG.debugStream() << "putting in queue";
+      queue.push_back(m.message());
+    }
+  } else {
+    HedwigClientChannelHandler::messageReceived(channel, m);
+  }
+}
+
+void SubscriberClientChannelHandler::close() {
+  closed = true;
+  if (channel) {
+    channel->kill();
+  }
+}
+
+void SubscriberClientChannelHandler::channelDisconnected(DuplexChannel* channel, const std::exception& e) {
+  // has subscription been closed
+  if (closed) {
+    return;
+  }
+
+  // Clean up the channel from all maps
+  client->channelDied(channel);
+  if (client->shuttingDown()) {
+    return;
+  }
+  
+  // setup pubsub data for reconnection attempt
+  origData->clearTriedServers();
+  OperationCallbackPtr newcallback(new SubscriberReconnectCallback(client, origData));
+  origData->setCallback(newcallback);
+
+  // Create a new handler for the new channel
+  SubscriberClientChannelHandlerPtr handler(new SubscriberClientChannelHandler(client, subscriber, origData));  
+  ChannelHandlerPtr baseptr = handler;
+  // if there is an error createing the channel, sleep for SUBSCRIBER_RECONNECT_TIME and try again
+  DuplexChannelPtr newchannel;
+  while (true) {
+    try {
+      newchannel = client->createChannelForTopic(origData->getTopic(), baseptr);
+      handler->setChannel(newchannel);
+      break;
+    } catch (ShuttingDownException& e) {
+      LOG.errorStream() << "Shutting down, don't try to reconnect";
+      return; 
+    } catch (ChannelException& e) {
+      LOG.errorStream() << "Couldn't acquire channel, sleeping for " << SUBSCRIBER_RECONNECT_TIME << " before trying again";
+      usleep(SUBSCRIBER_RECONNECT_TIME);
+    }
+  } 
+  handoverDelivery(handler.get());
+  
+  // remove record of the failed channel from the subscriber
+  subscriber.closeSubscription(origData->getTopic(), origData->getSubscriberId());
+
+  // subscriber
+  subscriber.doSubscribe(newchannel, origData, handler);
+}
+
+void SubscriberClientChannelHandler::startDelivery(const MessageHandlerCallbackPtr& handler) {
+  this->handler = handler;
+  
+  while (!queue.empty()) {    
+    LOG.debugStream() << "Taking from queue";
+    Message m = queue.front();
+    queue.pop_front();
+
+    OperationCallbackPtr callback(new SubscriberConsumeCallback(client, origData->getTopic(), origData->getSubscriberId(), m.msgid()));
+
+    this->handler->consume(origData->getTopic(), origData->getSubscriberId(), m, callback);
+  }
+}
+
+void SubscriberClientChannelHandler::stopDelivery() {
+  this->handler = MessageHandlerCallbackPtr();
+}
+
+
+void SubscriberClientChannelHandler::handoverDelivery(SubscriberClientChannelHandler* newHandler) {
+  LOG.debugStream() << "Messages in queue " << queue.size();
+  MessageHandlerCallbackPtr handler = this->handler;
+  stopDelivery(); // resets old handler
+  newHandler->startDelivery(handler);
+}
+
+void SubscriberClientChannelHandler::setChannel(const DuplexChannelPtr& channel) {
+  this->channel = channel;
+}
+
+DuplexChannelPtr& SubscriberClientChannelHandler::getChannel() {
+  return channel;
+}
+
+SubscriberImpl::SubscriberImpl(ClientImplPtr& client) 
+  : client(client) 
+{
+}
+
+SubscriberImpl::~SubscriberImpl() 
+{
+  LOG.debugStream() << "deleting subscriber" << this;
+}
+
+
+void SubscriberImpl::subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode) {
+  SyncOperationCallback* cb = new SyncOperationCallback();
+  OperationCallbackPtr callback(cb);
+  asyncSubscribe(topic, subscriberId, mode, callback);
+  cb->wait();
+  
+  cb->throwExceptionIfNeeded();  
+}
+
+void SubscriberImpl::asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback) {
+  PubSubDataPtr data = PubSubData::forSubscribeRequest(client->counter().next(), subscriberId, topic, callback, mode);
+
+  SubscriberClientChannelHandlerPtr handler(new SubscriberClientChannelHandler(client, *this, data));  
+  ChannelHandlerPtr baseptr = handler;
+  DuplexChannelPtr channel = client->createChannelForTopic(topic, baseptr);
+  
+  handler->setChannel(channel);
+
+  doSubscribe(channel, data, handler);
+}
+
+void SubscriberImpl::doSubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data, const SubscriberClientChannelHandlerPtr& handler) {
+  LOG.debugStream() << "doSubscribe";
+  channel->storeTransaction(data);
+
+  OperationCallbackPtr writecb(new SubscriberWriteCallback(client, data));
+  channel->writeRequest(data->getRequest(), writecb);
+
+  topicsubscriber2handler_lock.lock();
+  TopicSubscriber t(data->getTopic(), data->getSubscriberId());
+  SubscriberClientChannelHandlerPtr oldhandler = topicsubscriber2handler[t];
+  if (oldhandler != NULL) {
+    oldhandler->handoverDelivery(handler.get());
+  }
+  topicsubscriber2handler[t] = handler;
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Set topic subscriber for topic(" << data->getTopic() << ") subscriberId(" << data->getSubscriberId() << ") to " << handler.get() << " topicsubscriber2topic(" << &topicsubscriber2handler << ")";
+  }
+  topicsubscriber2handler_lock.unlock();;
+}
+
+void SubscriberImpl::unsubscribe(const std::string& topic, const std::string& subscriberId) {
+  SyncOperationCallback* cb = new SyncOperationCallback();
+  OperationCallbackPtr callback(cb);
+  asyncUnsubscribe(topic, subscriberId, callback);
+  cb->wait();
+  
+  cb->throwExceptionIfNeeded();
+}
+
+void SubscriberImpl::asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback) {
+  closeSubscription(topic, subscriberId);
+
+  PubSubDataPtr data = PubSubData::forUnsubscribeRequest(client->counter().next(), subscriberId, topic, callback);
+  
+  DuplexChannelPtr channel = client->getChannelForTopic(topic);
+  if (channel.get() == 0) {
+    LOG.errorStream() << "Trying to unsubscribe from (" << topic << ", " << subscriberId << ") but channel is dead";
+    callback->operationFailed(InvalidStateException());
+    return;
+  }
+  
+  doUnsubscribe(channel, data);  
+}
+
+void SubscriberImpl::doUnsubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data) {
+  channel->storeTransaction(data);
+  OperationCallbackPtr writecb(new UnsubscribeWriteCallback(client, data));
+  channel->writeRequest(data->getRequest(), writecb);
+}
+
+void SubscriberImpl::consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId) {
+  TopicSubscriber t(topic, subscriberId);
+
+  topicsubscriber2handler_lock.lock();
+  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
+  topicsubscriber2handler_lock.unlock();
+
+  if (handler.get() == 0) {
+    LOG.errorStream() << "Cannot consume. Bad handler for topic(" << topic << ") subscriberId(" << subscriberId << ") topicsubscriber2topic(" << &topicsubscriber2handler << ")";
+    return;
+  }
+
+  DuplexChannelPtr channel = handler->getChannel();
+  if (channel.get() == 0) {
+    LOG.errorStream() << "Trying to consume a message on a topic/subscriber pair that don't have a channel. Something fishy going on. Topic: " << topic << " SubscriberId: " << subscriberId << " MessageSeqId: " << messageSeqId.localcomponent();
+  }
+  
+  PubSubDataPtr data = PubSubData::forConsumeRequest(client->counter().next(), subscriberId, topic, messageSeqId);  
+  OperationCallbackPtr writecb(new ConsumeWriteCallback(data));
+  channel->writeRequest(data->getRequest(), writecb);
+}
+
+void SubscriberImpl::startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback) {
+  TopicSubscriber t(topic, subscriberId);
+
+  topicsubscriber2handler_lock.lock();
+  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
+  topicsubscriber2handler_lock.unlock();
+
+  if (handler.get() == 0) {
+    LOG.errorStream() << "Trying to start deliver on a non existant handler topic = " << topic << ", subscriber = " << subscriberId;
+  }
+  handler->startDelivery(callback);
+}
+
+void SubscriberImpl::stopDelivery(const std::string& topic, const std::string& subscriberId) {
+  TopicSubscriber t(topic, subscriberId);
+
+  topicsubscriber2handler_lock.lock();
+  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
+  topicsubscriber2handler_lock.unlock();
+
+  if (handler.get() == 0) {
+    LOG.errorStream() << "Trying to start deliver on a non existant handler topic = " << topic << ", subscriber = " << subscriberId;
+  }
+  handler->stopDelivery();
+}
+
+void SubscriberImpl::closeSubscription(const std::string& topic, const std::string& subscriberId) {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "closeSubscription (" << topic << ",  " << subscriberId << ")";
+  }
+  TopicSubscriber t(topic, subscriberId);
+
+  topicsubscriber2handler_lock.lock();;
+  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
+  topicsubscriber2handler.erase(t);
+  topicsubscriber2handler_lock.unlock();;
+  if (handler) {
+    handler->close();
+  }
+}
+
+/**
+   takes ownership of txn
+*/
+void SubscriberImpl::messageHandler(const PubSubResponse& m, const PubSubDataPtr& txn) {
+  if (!txn.get()) {
+    LOG.errorStream() << "Invalid transaction";
+    return;
+  }
+
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "message received with status " << m.statuscode();
+  }
+  switch (m.statuscode()) {
+  case SUCCESS:
+    txn->getCallback()->operationComplete();
+    break;
+  case SERVICE_DOWN:
+    txn->getCallback()->operationFailed(ServiceDownException());
+    break;
+  case CLIENT_ALREADY_SUBSCRIBED:
+  case TOPIC_BUSY:
+    txn->getCallback()->operationFailed(AlreadySubscribedException());
+    break;
+  case CLIENT_NOT_SUBSCRIBED:
+    txn->getCallback()->operationFailed(NotSubscribedException());
+    break;
+  default:
+    txn->getCallback()->operationFailed(UnexpectedResponseException());
+    break;
+  }
+}

+ 149 - 0
src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.h

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef SUBSCRIBE_IMPL_H
+#define SUBSCRIBE_IMPL_H
+
+#include <hedwig/subscribe.h>
+#include <hedwig/callback.h>
+#include "clientimpl.h"
+#include <utility>
+#include <tr1/memory>
+#include <deque>
+
+namespace Hedwig {
+  class SubscriberWriteCallback : public OperationCallback {
+  public:
+    SubscriberWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data);
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    ClientImplPtr client;
+    PubSubDataPtr data;
+  };
+  
+  class UnsubscribeWriteCallback : public OperationCallback {
+  public:
+    UnsubscribeWriteCallback(ClientImplPtr& client, const PubSubDataPtr& data);
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    ClientImplPtr client;
+    PubSubDataPtr data;
+  };
+
+  class ConsumeWriteCallback : public OperationCallback {
+  public:
+    ConsumeWriteCallback(const PubSubDataPtr& data);
+    ~ConsumeWriteCallback();
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    PubSubDataPtr data;
+    };
+
+  class SubscriberReconnectCallback : public OperationCallback {
+  public: 
+    SubscriberReconnectCallback(ClientImplPtr& client, const PubSubDataPtr& origData);
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    ClientImplPtr client;
+    PubSubDataPtr origData;
+  };
+
+  class SubscriberClientChannelHandler;
+  typedef std::tr1::shared_ptr<SubscriberClientChannelHandler> SubscriberClientChannelHandlerPtr;
+
+  class SubscriberConsumeCallback : public OperationCallback {
+  public: 
+    SubscriberConsumeCallback(ClientImplPtr& client, const std::string& topic, const std::string& subscriberid, const MessageSeqId& msgid);
+
+    void operationComplete();
+    void operationFailed(const std::exception& exception);
+  private:
+    ClientImplPtr client;
+    const std::string topic;
+    const std::string subscriberid;
+    MessageSeqId msgid;
+  };
+
+  class SubscriberClientChannelHandler : public HedwigClientChannelHandler {
+  public: 
+    SubscriberClientChannelHandler(ClientImplPtr& client, SubscriberImpl& subscriber, const PubSubDataPtr& data);
+    ~SubscriberClientChannelHandler();
+
+    void messageReceived(DuplexChannel* channel, const PubSubResponse& m);
+    void channelDisconnected(DuplexChannel* channel, const std::exception& e);
+
+    void startDelivery(const MessageHandlerCallbackPtr& handler);
+    void stopDelivery();
+
+    void handoverDelivery(SubscriberClientChannelHandler* newHandler);
+
+    void setChannel(const DuplexChannelPtr& channel);
+    DuplexChannelPtr& getChannel();
+
+    void close();
+  private:
+
+    SubscriberImpl& subscriber;
+#warning "put some limit on this to stop it growing forever"
+    std::deque<Message> queue;
+    MessageHandlerCallbackPtr handler;
+    PubSubDataPtr origData;
+    DuplexChannelPtr channel;
+    bool closed;
+  };
+
+  class SubscriberImpl : public Subscriber {
+  public:
+    SubscriberImpl(ClientImplPtr& client);
+    ~SubscriberImpl();
+
+    void subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode);
+    void asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback);
+    
+    void unsubscribe(const std::string& topic, const std::string& subscriberId);
+    void asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback);
+
+    void consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId);
+
+    void startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback);
+    void stopDelivery(const std::string& topic, const std::string& subscriberId);
+
+    void closeSubscription(const std::string& topic, const std::string& subscriberId);
+
+    void messageHandler(const PubSubResponse& m, const PubSubDataPtr& txn);
+
+    void doSubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data, const SubscriberClientChannelHandlerPtr& handler);
+    void doUnsubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data);
+
+  private:
+    ClientImplPtr client;
+    
+    std::tr1::unordered_map<TopicSubscriber, SubscriberClientChannelHandlerPtr> topicsubscriber2handler;
+    Mutex topicsubscriber2handler_lock;	    
+  };
+
+};
+
+#endif

+ 233 - 0
src/contrib/hedwig/client/src/main/cpp/lib/util.cpp

@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <string>
+
+#include <netdb.h>
+#include <errno.h>
+#include "util.h"
+#include "channel.h"
+#include <log4cpp/Category.hh>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwig."__FILE__);
+
+using namespace Hedwig;
+
+#define MAX_HOSTNAME_LENGTH 256
+const std::string UNITIALISED_HOST("UNINITIALISED HOST");
+
+const int DEFAULT_PORT = 4080;
+const int DEFAULT_SSL_PORT = 9876;
+
+HostAddress::HostAddress() : initialised(false), address_str() {
+  memset(&socket_addr, 0, sizeof(struct sockaddr_in));
+}
+
+HostAddress::~HostAddress() {
+}
+
+bool HostAddress::isNullHost() const {
+  return !initialised;
+}
+
+bool HostAddress::operator==(const HostAddress& other) const {
+  return (other.ip() == ip() && other.port() == port());
+}
+
+const std::string& HostAddress::getAddressString() const {
+  if (!isNullHost()) {
+    return address_str;
+  } else {
+    return UNITIALISED_HOST;
+  }
+}
+   
+uint32_t HostAddress::ip() const {
+  return ntohl(socket_addr.sin_addr.s_addr);;
+}
+
+uint16_t HostAddress::port() const {
+  return ntohs(socket_addr.sin_port);
+}
+
+const struct sockaddr_in& HostAddress::socketAddress() const {
+  return socket_addr;
+}
+
+
+void HostAddress::parse_string() {
+  char* url = strdup(address_str.c_str());
+
+  if (url == NULL) {
+    LOG.errorStream() << "You seems to be out of memory";
+    throw OomException();
+  }
+  int port = DEFAULT_PORT;
+  int sslport = DEFAULT_SSL_PORT;
+
+  char *colon = strchr(url, ':');
+  if (colon) {
+    *colon = 0;
+    colon++;
+    
+    char* sslcolon = strchr(colon, ':');
+    if (sslcolon) {
+      *sslcolon = 0;
+      sslcolon++;
+      
+      sslport = strtol(sslcolon, NULL, 10);
+      if (sslport == 0) {
+	LOG.errorStream() << "Invalid SSL port given: [" << sslcolon << "]";
+	free((void*)url);
+	throw InvalidPortException();
+      }
+    }
+    
+    port = strtol(colon, NULL, 10);
+    if (port == 0) {
+      LOG.errorStream() << "Invalid port given: [" << colon << "]";
+      free((void*)url);
+      throw InvalidPortException();
+    }
+  }
+
+  int err = 0;
+  
+  struct addrinfo *addr;
+  struct addrinfo hints;
+
+  memset(&hints, 0, sizeof(struct addrinfo));
+  hints.ai_family = AF_INET;
+
+  err = getaddrinfo(url, NULL, &hints, &addr);
+  if (err != 0) {
+    LOG.errorStream() << "Couldn't resolve host [" << url << "]:" << hstrerror(err);
+    free((void*)url);
+    throw HostResolutionException();
+  }
+
+  sockaddr_in* sa_ptr = (sockaddr_in*)addr->ai_addr;
+  socket_addr = *sa_ptr;
+  socket_addr.sin_port = htons(port); 
+  //socket_addr.sin_family = AF_INET;
+
+  free((void*)url);
+  free((void*)addr);
+}
+
+HostAddress HostAddress::fromString(std::string str) {
+  HostAddress h;
+  h.address_str = str;
+  h.parse_string();
+  h.initialised = true;
+  return h;
+}
+
+WaitConditionBase::WaitConditionBase() {
+  pthread_mutex_init(&mutex, NULL);
+  pthread_cond_init(&cond, NULL);  
+}
+
+WaitConditionBase::~WaitConditionBase() {
+  pthread_mutex_destroy(&mutex);
+  pthread_cond_destroy(&cond);
+}
+    
+void WaitConditionBase::wait() {
+  pthread_mutex_lock(&mutex);
+  while (!isTrue()) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debugStream() << "wait: condition is false for " << this;
+    }
+
+    pthread_cond_wait(&cond, &mutex); 
+  }
+  pthread_mutex_unlock(&mutex);
+}
+
+void WaitConditionBase::lock() {
+  pthread_mutex_lock(&mutex);
+}
+
+void WaitConditionBase::signalAndUnlock() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "signal: signal " << this;
+  }
+  
+  pthread_cond_signal(&cond);
+  
+  pthread_mutex_unlock(&mutex);
+}
+
+Mutex::Mutex() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Creating mutex " << this;
+  }
+  int error = pthread_mutex_init(&mutex, NULL);
+  if (error != 0) {
+    LOG.errorStream() << "Error initiating mutex " << error;
+  }
+}
+
+Mutex::~Mutex() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Destroying mutex " << this;
+  }
+
+  int error = pthread_mutex_destroy(&mutex);
+  if (error != 0) {
+    LOG.errorStream() << "Error destroying mutex " << this << " " << error;
+  }
+}
+
+void Mutex::lock() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Locking mutex " << this;
+  }
+    
+  int error = pthread_mutex_lock(&mutex);
+  if (error != 0) {
+    LOG.errorStream() << "Error locking mutex " << this << " " << error;
+  }
+}
+
+void Mutex::unlock() {
+  if (LOG.isDebugEnabled()) {
+    LOG.debugStream() << "Unlocking mutex " << this;
+  }
+
+  int error = pthread_mutex_unlock(&mutex);
+  if (error != 0) {
+    LOG.errorStream() << "Error unlocking mutex " << this << " " << error;
+  }
+}
+
+std::size_t std::tr1::hash<HostAddress>::operator()(const HostAddress& address) const {
+  return (address.ip() << 16) & (address.port());
+}
+
+std::size_t std::tr1::hash<DuplexChannel*>::operator()(const DuplexChannel* channel) const {
+  return reinterpret_cast<std::size_t>(channel);
+}
+
+std::size_t std::tr1::hash<TopicSubscriber>::operator()(const TopicSubscriber& topicsub) const {
+  std::string fullstr = topicsub.first + topicsub.second;
+  return std::tr1::hash<std::string>()(fullstr);
+}
+

+ 122 - 0
src/contrib/hedwig/client/src/main/cpp/lib/util.h

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HEDWIG_UTIL_H
+#define HEDWIG_UTIL_H
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <list>
+#include <iostream>
+#include <utility>
+#include <tr1/functional>
+#include <semaphore.h>
+#include <pthread.h>
+
+namespace Hedwig {
+  typedef std::pair<const std::string, const std::string> TopicSubscriber;
+
+  /**
+     Representation of a hosts address
+  */
+  class HostAddress {
+  public:
+    HostAddress();
+    ~HostAddress();
+
+    bool operator==(const HostAddress& other) const;
+    
+    bool isNullHost() const;
+    const std::string& getAddressString() const;
+    uint32_t ip() const;
+    uint16_t port() const;
+    const sockaddr_in& socketAddress() const;
+
+    static HostAddress fromString(std::string host);
+
+  private:
+
+    void parse_string();
+    
+    bool initialised;
+    std::string address_str;
+    struct sockaddr_in socket_addr;
+  };
+
+  class DuplexChannel;  
+  
+  class Mutex {
+  public:
+    Mutex();
+    ~Mutex();
+    
+    void lock();
+    void unlock();
+  private:
+    pthread_mutex_t mutex;
+  };
+
+  class WaitConditionBase {
+  public:
+    WaitConditionBase();
+    virtual ~WaitConditionBase();
+    
+    void wait(); 
+    void lock();
+    void signalAndUnlock();
+
+    virtual bool isTrue() = 0;
+  private:
+
+    pthread_mutex_t mutex;
+    pthread_cond_t cond;    
+  };
+
+};
+
+namespace std 
+{
+  namespace tr1 
+  {
+  /**
+     Hash a host address. Takes the least significant 16-bits of the address and the 16-bits of the
+     port and packs them into one 32-bit number. While collisons are theoretically very possible, they
+     shouldn't happen as the hedwig servers should be in the same subnet.
+  */
+  template <> struct hash<Hedwig::HostAddress> : public unary_function<Hedwig::HostAddress, size_t> {
+    size_t operator()(const Hedwig::HostAddress& address) const;
+  };
+
+  /**
+     Hash a channel pointer, just returns the pointer.
+  */
+  template <> struct hash<Hedwig::DuplexChannel*> : public unary_function<Hedwig::DuplexChannel*, size_t> {
+    size_t operator()(const Hedwig::DuplexChannel* channel) const;
+  };
+
+  /**
+     Hash a channel pointer, just returns the pointer.
+  */
+  template <> struct hash<Hedwig::TopicSubscriber> : public unary_function<Hedwig::TopicSubscriber, size_t> {
+    size_t operator()(const Hedwig::TopicSubscriber& topicsub) const;
+  };
+  }
+}
+#endif

+ 49 - 0
src/contrib/hedwig/client/src/main/cpp/log4cpp.conf

@@ -0,0 +1,49 @@
+#
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# 
+#
+
+log4j.appender.rootAppender=org.apache.log4j.ConsoleAppender
+log4j.appender.rootAppender.layout=org.apache.log4j.BasicLayout
+
+#log4j.appender.hedwig=org.apache.log4j.RollingFileAppender
+log4j.appender.hedwig=org.apache.log4j.ConsoleAppender
+#log4j.appender.hedwig.fileName=./testLog.log
+log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
+log4j.appender.hedwig.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %t %c %p - %m%n
+log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
+log4j.appender.hedwig.layout.ConversionPattern=%.5m%n
+
+log4j.appender.hedwigtest=org.apache.log4j.ConsoleAppender
+#log4j.appender.hedwig.fileName=./testLog.log
+log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
+log4j.appender.hedwigtest.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %c %p - %m%n
+log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
+log4j.appender.hedwigtest.layout.ConversionPattern=%.5m%n
+
+# category
+log4j.category.hedwig=DEBUG, hedwig
+log4j.rootCategory=DEBUG
+
+log4j.category.hedwig.channel=ERROR
+log4j.category.hedwig.util=ERROR
+log4j.category.hedwigtest.servercontrol=ERROR
+
+log4j.category.hedwigtest=DEBUG, hedwigtest
+log4j.rootCategory=DEBUG

+ 533 - 0
src/contrib/hedwig/client/src/main/cpp/m4/ax_doxygen.m4

@@ -0,0 +1,533 @@
+# ===========================================================================
+#      http://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+#   DX_INIT_DOXYGEN(PROJECT-NAME, DOXYFILE-PATH, [OUTPUT-DIR])
+#   DX_DOXYGEN_FEATURE(ON|OFF)
+#   DX_DOT_FEATURE(ON|OFF)
+#   DX_HTML_FEATURE(ON|OFF)
+#   DX_CHM_FEATURE(ON|OFF)
+#   DX_CHI_FEATURE(ON|OFF)
+#   DX_MAN_FEATURE(ON|OFF)
+#   DX_RTF_FEATURE(ON|OFF)
+#   DX_XML_FEATURE(ON|OFF)
+#   DX_PDF_FEATURE(ON|OFF)
+#   DX_PS_FEATURE(ON|OFF)
+#
+# DESCRIPTION
+#
+#   The DX_*_FEATURE macros control the default setting for the given
+#   Doxygen feature. Supported features are 'DOXYGEN' itself, 'DOT' for
+#   generating graphics, 'HTML' for plain HTML, 'CHM' for compressed HTML
+#   help (for MS users), 'CHI' for generating a seperate .chi file by the
+#   .chm file, and 'MAN', 'RTF', 'XML', 'PDF' and 'PS' for the appropriate
+#   output formats. The environment variable DOXYGEN_PAPER_SIZE may be
+#   specified to override the default 'a4wide' paper size.
+#
+#   By default, HTML, PDF and PS documentation is generated as this seems to
+#   be the most popular and portable combination. MAN pages created by
+#   Doxygen are usually problematic, though by picking an appropriate subset
+#   and doing some massaging they might be better than nothing. CHM and RTF
+#   are specific for MS (note that you can't generate both HTML and CHM at
+#   the same time). The XML is rather useless unless you apply specialized
+#   post-processing to it.
+#
+#   The macros mainly control the default state of the feature. The use can
+#   override the default by specifying --enable or --disable. The macros
+#   ensure that contradictory flags are not given (e.g.,
+#   --enable-doxygen-html and --enable-doxygen-chm,
+#   --enable-doxygen-anything with --disable-doxygen, etc.) Finally, each
+#   feature will be automatically disabled (with a warning) if the required
+#   programs are missing.
+#
+#   Once all the feature defaults have been specified, call DX_INIT_DOXYGEN
+#   with the following parameters: a one-word name for the project for use
+#   as a filename base etc., an optional configuration file name (the
+#   default is 'Doxyfile', the same as Doxygen's default), and an optional
+#   output directory name (the default is 'doxygen-doc').
+#
+#   Automake Support
+#
+#   The following is a template aminclude.am file for use with Automake.
+#   Make targets and variables values are controlled by the various
+#   DX_COND_* conditionals set by autoconf.
+#
+#   The provided targets are:
+#
+#     doxygen-doc: Generate all doxygen documentation.
+#
+#     doxygen-run: Run doxygen, which will generate some of the
+#                  documentation (HTML, CHM, CHI, MAN, RTF, XML)
+#                  but will not do the post processing required
+#                  for the rest of it (PS, PDF, and some MAN).
+#
+#     doxygen-man: Rename some doxygen generated man pages.
+#
+#     doxygen-ps:  Generate doxygen PostScript documentation.
+#
+#     doxygen-pdf: Generate doxygen PDF documentation.
+#
+#   Note that by default these are not integrated into the automake targets.
+#   If doxygen is used to generate man pages, you can achieve this
+#   integration by setting man3_MANS to the list of man pages generated and
+#   then adding the dependency:
+#
+#     $(man3_MANS): doxygen-doc
+#
+#   This will cause make to run doxygen and generate all the documentation.
+#
+#   The following variable is intended for use in Makefile.am:
+#
+#     DX_CLEANFILES = everything to clean.
+#
+#   Then add this variable to MOSTLYCLEANFILES.
+#
+#     ----- begin aminclude.am -------------------------------------
+#
+#     ## --------------------------------- ##
+#     ## Format-independent Doxygen rules. ##
+#     ## --------------------------------- ##
+#
+#     if DX_COND_doc
+#
+#     ## ------------------------------- ##
+#     ## Rules specific for HTML output. ##
+#     ## ------------------------------- ##
+#
+#     if DX_COND_html
+#
+#     DX_CLEAN_HTML = @DX_DOCDIR@/html
+#
+#     endif DX_COND_html
+#
+#     ## ------------------------------ ##
+#     ## Rules specific for CHM output. ##
+#     ## ------------------------------ ##
+#
+#     if DX_COND_chm
+#
+#     DX_CLEAN_CHM = @DX_DOCDIR@/chm
+#
+#     if DX_COND_chi
+#
+#     DX_CLEAN_CHI = @DX_DOCDIR@/@PACKAGE@.chi
+#
+#     endif DX_COND_chi
+#
+#     endif DX_COND_chm
+#
+#     ## ------------------------------ ##
+#     ## Rules specific for MAN output. ##
+#     ## ------------------------------ ##
+#
+#     if DX_COND_man
+#
+#     DX_CLEAN_MAN = @DX_DOCDIR@/man
+#
+#     endif DX_COND_man
+#
+#     ## ------------------------------ ##
+#     ## Rules specific for RTF output. ##
+#     ## ------------------------------ ##
+#
+#     if DX_COND_rtf
+#
+#     DX_CLEAN_RTF = @DX_DOCDIR@/rtf
+#
+#     endif DX_COND_rtf
+#
+#     ## ------------------------------ ##
+#     ## Rules specific for XML output. ##
+#     ## ------------------------------ ##
+#
+#     if DX_COND_xml
+#
+#     DX_CLEAN_XML = @DX_DOCDIR@/xml
+#
+#     endif DX_COND_xml
+#
+#     ## ----------------------------- ##
+#     ## Rules specific for PS output. ##
+#     ## ----------------------------- ##
+#
+#     if DX_COND_ps
+#
+#     DX_CLEAN_PS = @DX_DOCDIR@/@PACKAGE@.ps
+#
+#     DX_PS_GOAL = doxygen-ps
+#
+#     doxygen-ps: @DX_DOCDIR@/@PACKAGE@.ps
+#
+#     @DX_DOCDIR@/@PACKAGE@.ps: @DX_DOCDIR@/@PACKAGE@.tag
+#         cd @DX_DOCDIR@/latex; \
+#         rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
+#         $(DX_LATEX) refman.tex; \
+#         $(MAKEINDEX_PATH) refman.idx; \
+#         $(DX_LATEX) refman.tex; \
+#         countdown=5; \
+#         while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
+#                           refman.log > /dev/null 2>&1 \
+#            && test $$countdown -gt 0; do \
+#             $(DX_LATEX) refman.tex; \
+#             countdown=`expr $$countdown - 1`; \
+#         done; \
+#         $(DX_DVIPS) -o ../@PACKAGE@.ps refman.dvi
+#
+#     endif DX_COND_ps
+#
+#     ## ------------------------------ ##
+#     ## Rules specific for PDF output. ##
+#     ## ------------------------------ ##
+#
+#     if DX_COND_pdf
+#
+#     DX_CLEAN_PDF = @DX_DOCDIR@/@PACKAGE@.pdf
+#
+#     DX_PDF_GOAL = doxygen-pdf
+#
+#     doxygen-pdf: @DX_DOCDIR@/@PACKAGE@.pdf
+#
+#     @DX_DOCDIR@/@PACKAGE@.pdf: @DX_DOCDIR@/@PACKAGE@.tag
+#         cd @DX_DOCDIR@/latex; \
+#         rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
+#         $(DX_PDFLATEX) refman.tex; \
+#         $(DX_MAKEINDEX) refman.idx; \
+#         $(DX_PDFLATEX) refman.tex; \
+#         countdown=5; \
+#         while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
+#                           refman.log > /dev/null 2>&1 \
+#            && test $$countdown -gt 0; do \
+#             $(DX_PDFLATEX) refman.tex; \
+#             countdown=`expr $$countdown - 1`; \
+#         done; \
+#         mv refman.pdf ../@PACKAGE@.pdf
+#
+#     endif DX_COND_pdf
+#
+#     ## ------------------------------------------------- ##
+#     ## Rules specific for LaTeX (shared for PS and PDF). ##
+#     ## ------------------------------------------------- ##
+#
+#     if DX_COND_latex
+#
+#     DX_CLEAN_LATEX = @DX_DOCDIR@/latex
+#
+#     endif DX_COND_latex
+#
+#     .PHONY: doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
+#
+#     .INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
+#
+#     doxygen-run: @DX_DOCDIR@/@PACKAGE@.tag
+#
+#     doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
+#
+#     @DX_DOCDIR@/@PACKAGE@.tag: $(DX_CONFIG) $(pkginclude_HEADERS)
+#         rm -rf @DX_DOCDIR@
+#         $(DX_ENV) $(DX_DOXYGEN) $(srcdir)/$(DX_CONFIG)
+#
+#     DX_CLEANFILES = \
+#         @DX_DOCDIR@/@PACKAGE@.tag \
+#         -r \
+#         $(DX_CLEAN_HTML) \
+#         $(DX_CLEAN_CHM) \
+#         $(DX_CLEAN_CHI) \
+#         $(DX_CLEAN_MAN) \
+#         $(DX_CLEAN_RTF) \
+#         $(DX_CLEAN_XML) \
+#         $(DX_CLEAN_PS) \
+#         $(DX_CLEAN_PDF) \
+#         $(DX_CLEAN_LATEX)
+#
+#     endif DX_COND_doc
+#
+#     ----- end aminclude.am ---------------------------------------
+#
+# LICENSE
+#
+#   Copyright (c) 2009 Oren Ben-Kiki <oren@ben-kiki.org>
+#
+#   Copying and distribution of this file, with or without modification, are
+#   permitted in any medium without royalty provided the copyright notice
+#   and this notice are preserved. This file is offered as-is, without any
+#   warranty.
+
+#serial 10
+
+## ----------##
+## Defaults. ##
+## ----------##
+
+DX_ENV=""
+AC_DEFUN([DX_FEATURE_doc],  ON)
+AC_DEFUN([DX_FEATURE_dot],  ON)
+AC_DEFUN([DX_FEATURE_man],  OFF)
+AC_DEFUN([DX_FEATURE_html], ON)
+AC_DEFUN([DX_FEATURE_chm],  OFF)
+AC_DEFUN([DX_FEATURE_chi],  OFF)
+AC_DEFUN([DX_FEATURE_rtf],  OFF)
+AC_DEFUN([DX_FEATURE_xml],  OFF)
+AC_DEFUN([DX_FEATURE_pdf],  ON)
+AC_DEFUN([DX_FEATURE_ps],   ON)
+
+## --------------- ##
+## Private macros. ##
+## --------------- ##
+
+# DX_ENV_APPEND(VARIABLE, VALUE)
+# ------------------------------
+# Append VARIABLE="VALUE" to DX_ENV for invoking doxygen.
+AC_DEFUN([DX_ENV_APPEND], [AC_SUBST([DX_ENV], ["$DX_ENV $1='$2'"])])
+
+# DX_DIRNAME_EXPR
+# ---------------
+# Expand into a shell expression prints the directory part of a path.
+AC_DEFUN([DX_DIRNAME_EXPR],
+         [[expr ".$1" : '\(\.\)[^/]*$' \| "x$1" : 'x\(.*\)/[^/]*$']])
+
+# DX_IF_FEATURE(FEATURE, IF-ON, IF-OFF)
+# -------------------------------------
+# Expands according to the M4 (static) status of the feature.
+AC_DEFUN([DX_IF_FEATURE], [ifelse(DX_FEATURE_$1, ON, [$2], [$3])])
+
+# DX_REQUIRE_PROG(VARIABLE, PROGRAM)
+# ----------------------------------
+# Require the specified program to be found for the DX_CURRENT_FEATURE to work.
+AC_DEFUN([DX_REQUIRE_PROG], [
+AC_PATH_TOOL([$1], [$2])
+if test "$DX_FLAG_[]DX_CURRENT_FEATURE$$1" = 1; then
+    AC_MSG_WARN([$2 not found - will not DX_CURRENT_DESCRIPTION])
+    AC_SUBST(DX_FLAG_[]DX_CURRENT_FEATURE, 0)
+fi
+])
+
+# DX_TEST_FEATURE(FEATURE)
+# ------------------------
+# Expand to a shell expression testing whether the feature is active.
+AC_DEFUN([DX_TEST_FEATURE], [test "$DX_FLAG_$1" = 1])
+
+# DX_CHECK_DEPEND(REQUIRED_FEATURE, REQUIRED_STATE)
+# -------------------------------------------------
+# Verify that a required features has the right state before trying to turn on
+# the DX_CURRENT_FEATURE.
+AC_DEFUN([DX_CHECK_DEPEND], [
+test "$DX_FLAG_$1" = "$2" \
+|| AC_MSG_ERROR([doxygen-DX_CURRENT_FEATURE ifelse([$2], 1,
+                            requires, contradicts) doxygen-DX_CURRENT_FEATURE])
+])
+
+# DX_CLEAR_DEPEND(FEATURE, REQUIRED_FEATURE, REQUIRED_STATE)
+# ----------------------------------------------------------
+# Turn off the DX_CURRENT_FEATURE if the required feature is off.
+AC_DEFUN([DX_CLEAR_DEPEND], [
+test "$DX_FLAG_$1" = "$2" || AC_SUBST(DX_FLAG_[]DX_CURRENT_FEATURE, 0)
+])
+
+# DX_FEATURE_ARG(FEATURE, DESCRIPTION,
+#                CHECK_DEPEND, CLEAR_DEPEND,
+#                REQUIRE, DO-IF-ON, DO-IF-OFF)
+# --------------------------------------------
+# Parse the command-line option controlling a feature. CHECK_DEPEND is called
+# if the user explicitly turns the feature on (and invokes DX_CHECK_DEPEND),
+# otherwise CLEAR_DEPEND is called to turn off the default state if a required
+# feature is disabled (using DX_CLEAR_DEPEND). REQUIRE performs additional
+# requirement tests (DX_REQUIRE_PROG). Finally, an automake flag is set and
+# DO-IF-ON or DO-IF-OFF are called according to the final state of the feature.
+AC_DEFUN([DX_ARG_ABLE], [
+    AC_DEFUN([DX_CURRENT_FEATURE], [$1])
+    AC_DEFUN([DX_CURRENT_DESCRIPTION], [$2])
+    AC_ARG_ENABLE(doxygen-$1,
+                  [AS_HELP_STRING(DX_IF_FEATURE([$1], [--disable-doxygen-$1],
+                                                      [--enable-doxygen-$1]),
+                                  DX_IF_FEATURE([$1], [don't $2], [$2]))],
+                  [
+case "$enableval" in
+#(
+y|Y|yes|Yes|YES)
+    AC_SUBST([DX_FLAG_$1], 1)
+    $3
+;; #(
+n|N|no|No|NO)
+    AC_SUBST([DX_FLAG_$1], 0)
+;; #(
+*)
+    AC_MSG_ERROR([invalid value '$enableval' given to doxygen-$1])
+;;
+esac
+], [
+AC_SUBST([DX_FLAG_$1], [DX_IF_FEATURE([$1], 1, 0)])
+$4
+])
+if DX_TEST_FEATURE([$1]); then
+    $5
+    :
+fi
+if DX_TEST_FEATURE([$1]); then
+    AM_CONDITIONAL(DX_COND_$1, :)
+    $6
+    :
+else
+    AM_CONDITIONAL(DX_COND_$1, false)
+    $7
+    :
+fi
+])
+
+## -------------- ##
+## Public macros. ##
+## -------------- ##
+
+# DX_XXX_FEATURE(DEFAULT_STATE)
+# -----------------------------
+AC_DEFUN([DX_DOXYGEN_FEATURE], [AC_DEFUN([DX_FEATURE_doc],  [$1])])
+AC_DEFUN([DX_MAN_FEATURE],     [AC_DEFUN([DX_FEATURE_man],  [$1])])
+AC_DEFUN([DX_HTML_FEATURE],    [AC_DEFUN([DX_FEATURE_html], [$1])])
+AC_DEFUN([DX_CHM_FEATURE],     [AC_DEFUN([DX_FEATURE_chm],  [$1])])
+AC_DEFUN([DX_CHI_FEATURE],     [AC_DEFUN([DX_FEATURE_chi],  [$1])])
+AC_DEFUN([DX_RTF_FEATURE],     [AC_DEFUN([DX_FEATURE_rtf],  [$1])])
+AC_DEFUN([DX_XML_FEATURE],     [AC_DEFUN([DX_FEATURE_xml],  [$1])])
+AC_DEFUN([DX_XML_FEATURE],     [AC_DEFUN([DX_FEATURE_xml],  [$1])])
+AC_DEFUN([DX_PDF_FEATURE],     [AC_DEFUN([DX_FEATURE_pdf],  [$1])])
+AC_DEFUN([DX_PS_FEATURE],      [AC_DEFUN([DX_FEATURE_ps],   [$1])])
+
+# DX_INIT_DOXYGEN(PROJECT, [CONFIG-FILE], [OUTPUT-DOC-DIR])
+# ---------------------------------------------------------
+# PROJECT also serves as the base name for the documentation files.
+# The default CONFIG-FILE is "Doxyfile" and OUTPUT-DOC-DIR is "doxygen-doc".
+AC_DEFUN([DX_INIT_DOXYGEN], [
+
+# Files:
+AC_SUBST([DX_PROJECT], [$1])
+AC_SUBST([DX_CONFIG], [ifelse([$2], [], Doxyfile, [$2])])
+AC_SUBST([DX_DOCDIR], [ifelse([$3], [], doxygen-doc, [$3])])
+
+# Environment variables used inside doxygen.cfg:
+DX_ENV_APPEND(SRCDIR, $srcdir)
+DX_ENV_APPEND(PROJECT, $DX_PROJECT)
+DX_ENV_APPEND(DOCDIR, $DX_DOCDIR)
+DX_ENV_APPEND(VERSION, $PACKAGE_VERSION)
+
+# Doxygen itself:
+DX_ARG_ABLE(doc, [generate any doxygen documentation],
+            [],
+            [],
+            [DX_REQUIRE_PROG([DX_DOXYGEN], doxygen)
+             DX_REQUIRE_PROG([DX_PERL], perl)],
+            [DX_ENV_APPEND(PERL_PATH, $DX_PERL)])
+
+# Dot for graphics:
+DX_ARG_ABLE(dot, [generate graphics for doxygen documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [DX_REQUIRE_PROG([DX_DOT], dot)],
+            [DX_ENV_APPEND(HAVE_DOT, YES)
+             DX_ENV_APPEND(DOT_PATH, [`DX_DIRNAME_EXPR($DX_DOT)`])],
+            [DX_ENV_APPEND(HAVE_DOT, NO)])
+
+# Man pages generation:
+DX_ARG_ABLE(man, [generate doxygen manual pages],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [],
+            [DX_ENV_APPEND(GENERATE_MAN, YES)],
+            [DX_ENV_APPEND(GENERATE_MAN, NO)])
+
+# RTF file generation:
+DX_ARG_ABLE(rtf, [generate doxygen RTF documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [],
+            [DX_ENV_APPEND(GENERATE_RTF, YES)],
+            [DX_ENV_APPEND(GENERATE_RTF, NO)])
+
+# XML file generation:
+DX_ARG_ABLE(xml, [generate doxygen XML documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [],
+            [DX_ENV_APPEND(GENERATE_XML, YES)],
+            [DX_ENV_APPEND(GENERATE_XML, NO)])
+
+# (Compressed) HTML help generation:
+DX_ARG_ABLE(chm, [generate doxygen compressed HTML help documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [DX_REQUIRE_PROG([DX_HHC], hhc)],
+            [DX_ENV_APPEND(HHC_PATH, $DX_HHC)
+             DX_ENV_APPEND(GENERATE_HTML, YES)
+             DX_ENV_APPEND(GENERATE_HTMLHELP, YES)],
+            [DX_ENV_APPEND(GENERATE_HTMLHELP, NO)])
+
+# Seperate CHI file generation.
+DX_ARG_ABLE(chi, [generate doxygen seperate compressed HTML help index file],
+            [DX_CHECK_DEPEND(chm, 1)],
+            [DX_CLEAR_DEPEND(chm, 1)],
+            [],
+            [DX_ENV_APPEND(GENERATE_CHI, YES)],
+            [DX_ENV_APPEND(GENERATE_CHI, NO)])
+
+# Plain HTML pages generation:
+DX_ARG_ABLE(html, [generate doxygen plain HTML documentation],
+            [DX_CHECK_DEPEND(doc, 1) DX_CHECK_DEPEND(chm, 0)],
+            [DX_CLEAR_DEPEND(doc, 1) DX_CLEAR_DEPEND(chm, 0)],
+            [],
+            [DX_ENV_APPEND(GENERATE_HTML, YES)],
+            [DX_TEST_FEATURE(chm) || DX_ENV_APPEND(GENERATE_HTML, NO)])
+
+# PostScript file generation:
+DX_ARG_ABLE(ps, [generate doxygen PostScript documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [DX_REQUIRE_PROG([DX_LATEX], latex)
+             DX_REQUIRE_PROG([DX_MAKEINDEX], makeindex)
+             DX_REQUIRE_PROG([DX_DVIPS], dvips)
+             DX_REQUIRE_PROG([DX_EGREP], egrep)])
+
+# PDF file generation:
+DX_ARG_ABLE(pdf, [generate doxygen PDF documentation],
+            [DX_CHECK_DEPEND(doc, 1)],
+            [DX_CLEAR_DEPEND(doc, 1)],
+            [DX_REQUIRE_PROG([DX_PDFLATEX], pdflatex)
+             DX_REQUIRE_PROG([DX_MAKEINDEX], makeindex)
+             DX_REQUIRE_PROG([DX_EGREP], egrep)])
+
+# LaTeX generation for PS and/or PDF:
+if DX_TEST_FEATURE(ps) || DX_TEST_FEATURE(pdf); then
+    AM_CONDITIONAL(DX_COND_latex, :)
+    DX_ENV_APPEND(GENERATE_LATEX, YES)
+else
+    AM_CONDITIONAL(DX_COND_latex, false)
+    DX_ENV_APPEND(GENERATE_LATEX, NO)
+fi
+
+# Paper size for PS and/or PDF:
+AC_ARG_VAR(DOXYGEN_PAPER_SIZE,
+           [a4wide (default), a4, letter, legal or executive])
+case "$DOXYGEN_PAPER_SIZE" in
+#(
+"")
+    AC_SUBST(DOXYGEN_PAPER_SIZE, "")
+;; #(
+a4wide|a4|letter|legal|executive)
+    DX_ENV_APPEND(PAPER_SIZE, $DOXYGEN_PAPER_SIZE)
+;; #(
+*)
+    AC_MSG_ERROR([unknown DOXYGEN_PAPER_SIZE='$DOXYGEN_PAPER_SIZE'])
+;;
+esac
+
+#For debugging:
+#echo DX_FLAG_doc=$DX_FLAG_doc
+#echo DX_FLAG_dot=$DX_FLAG_dot
+#echo DX_FLAG_man=$DX_FLAG_man
+#echo DX_FLAG_html=$DX_FLAG_html
+#echo DX_FLAG_chm=$DX_FLAG_chm
+#echo DX_FLAG_chi=$DX_FLAG_chi
+#echo DX_FLAG_rtf=$DX_FLAG_rtf
+#echo DX_FLAG_xml=$DX_FLAG_xml
+#echo DX_FLAG_pdf=$DX_FLAG_pdf
+#echo DX_FLAG_ps=$DX_FLAG_ps
+#echo DX_ENV=$DX_ENV
+])

+ 6 - 0
src/contrib/hedwig/client/src/main/cpp/test/Makefile.am

@@ -0,0 +1,6 @@
+bin_PROGRAMS = hedwigtest
+hedwigtest_SOURCES = main.cpp utiltest.cpp pubsubdatatest.cpp publishtest.cpp subscribetest.cpp servercontrol.cpp pubsubtest.cpp
+hedwigtest_CPPFLAGS = -I../inc $(DEPS_CFLAGS)
+hedwigtest_LDADD = $(DEPS_LIBS) -L../lib -lhedwig01
+hedwigtest_LDFLAGS = -no-undefined
+

+ 64 - 0
src/contrib/hedwig/client/src/main/cpp/test/main.cpp

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../lib/channel.h"
+#include "../lib/util.h"
+#include <hedwig/protocol.h>
+#include <hedwig/callback.h>
+#include <iostream>
+#include <log4cpp/PropertyConfigurator.hh>
+#include <log4cpp/Category.hh>
+#include "servercontrol.h"
+
+#include <cppunit/extensions/TestFactoryRegistry.h>
+#include <cppunit/ui/text/TextTestRunner.h>
+
+int main( int argc, char **argv)
+{
+  try {
+    log4cpp::PropertyConfigurator::configure("../log4cpp.conf");
+  } catch (log4cpp::ConfigureFailure &e) {
+    std::cerr << "log4cpp configuration failure while loading : " << e.what() << std::endl;
+  } catch (std::exception &e) {
+    std::cerr << "exception caught while configuring log4cpp via : " << e.what() << std::endl;
+  } catch (...) {
+    std::cerr << "unknown exception while configuring log4cpp vi'." << std::endl;
+  }
+  std::string testPath = (argc > 2) ? std::string(argv[2]) : "";
+
+  CppUnit::TextTestRunner runner;
+
+  if (argc > 1) {
+    CppUnit::TestFactoryRegistry &registry = CppUnit::TestFactoryRegistry::getRegistry(argv[1]);
+    
+    runner.addTest( registry.makeTest() );
+  } else {
+    CppUnit::TestFactoryRegistry &registry = CppUnit::TestFactoryRegistry::getRegistry("*");
+    registry.addRegistry("Util");
+    registry.addRegistry("Subscribe");
+    registry.addRegistry("Publish"); 
+    registry.addRegistry("PubSub");
+    
+    runner.addTest( registry.makeTest() );
+  }
+  int ret =  runner.run(testPath);
+  google::protobuf::ShutdownProtobufLibrary();
+  
+  log4cpp::Category::shutdown();
+  
+  return ret;
+}

+ 268 - 0
src/contrib/hedwig/client/src/main/cpp/test/publishtest.cpp

@@ -0,0 +1,268 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cppunit/Test.h>
+#include <cppunit/TestSuite.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "../lib/clientimpl.h"
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <stdexcept>
+#include <pthread.h>
+
+#include <log4cpp/Category.hh>
+
+#include "servercontrol.h"
+#include "util.h"
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwigtest."__FILE__);
+
+using namespace CppUnit;
+
+class PublishTestSuite : public CppUnit::TestFixture {
+private:
+  HedwigTest::ServerControl* control;
+  HedwigTest::TestServerPtr zk;
+  HedwigTest::TestServerPtr bk1;
+  HedwigTest::TestServerPtr bk2;
+  HedwigTest::TestServerPtr bk3;
+  HedwigTest::TestServerPtr hw1;
+  HedwigTest::TestServerPtr hw2;
+
+  CPPUNIT_TEST_SUITE( PublishTestSuite );
+  CPPUNIT_TEST(testSyncPublish);
+  CPPUNIT_TEST(testAsyncPublish);
+  CPPUNIT_TEST(testMultipleAsyncPublish);
+  //  CPPUNIT_TEST(simplePublish);
+  //CPPUNIT_TEST(simplePublishAndSubscribe);
+  //CPPUNIT_TEST(publishAndSubscribeWithRedirect);
+  CPPUNIT_TEST_SUITE_END();
+
+public:
+  PublishTestSuite() {
+
+  }
+
+  ~PublishTestSuite() {
+
+  }
+
+  void setUp()
+  {
+    control = new HedwigTest::ServerControl(HedwigTest::DEFAULT_CONTROLSERVER_PORT);
+    zk = control->startZookeeperServer(12345);
+    bk1 = control->startBookieServer(12346, zk);
+    bk2 = control->startBookieServer(12347, zk);
+    bk3 = control->startBookieServer(12348, zk);
+    
+    std::string region("testRegion");
+    hw1 = control->startPubSubServer(12349, region, zk);
+    hw2 = control->startPubSubServer(12350, region, zk);
+  }
+  
+  void tearDown() 
+  {
+    hw2->kill();
+    hw1->kill();
+    
+    bk1->kill();
+    bk2->kill();
+    bk3->kill();
+    
+    zk->kill();
+    delete control;
+  }
+
+  void testSyncPublish() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    Hedwig::Publisher& pub = client->getPublisher();
+    
+    pub.publish("testTopic", "testMessage 1");
+    
+    delete client;
+    delete conf;
+  }
+
+  void testAsyncPublish() {
+    SimpleWaitCondition* cond = new SimpleWaitCondition();
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    Hedwig::Publisher& pub = client->getPublisher();
+    
+    Hedwig::OperationCallbackPtr testcb(new TestCallback(cond));
+    pub.asyncPublish("testTopic", "async test message", testcb);
+    
+    cond->wait();
+    delete cond;
+    delete client;
+    delete conf;
+  }
+
+  void testMultipleAsyncPublish() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond2 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond3 = new SimpleWaitCondition();
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    Hedwig::Publisher& pub = client->getPublisher();
+   
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+    Hedwig::OperationCallbackPtr testcb2(new TestCallback(cond2));
+    Hedwig::OperationCallbackPtr testcb3(new TestCallback(cond3));
+
+    pub.asyncPublish("testTopic", "async test message #1", testcb1);
+    pub.asyncPublish("testTopic", "async test message #2", testcb2);
+    pub.asyncPublish("testTopic", "async test message #3", testcb3);
+    
+    cond3->wait();
+    cond2->wait();
+    cond1->wait();
+
+    delete cond3; delete cond2; delete cond1;
+    delete client;
+    delete conf;
+  }
+  /*  void simplePublish() {
+    LOG.debugStream() << ">>> simplePublish";
+    SimpleWaitCondition* cond = new SimpleWaitCondition();
+
+    Hedwig::Configuration* conf = new Configuration1();
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    Hedwig::Publisher& pub = client->getPublisher();
+    
+    Hedwig::OperationCallbackPtr testcb(new TestCallback(cond));
+    pub.asyncPublish("foobar", "barfoo", testcb);
+    
+    LOG.debugStream() << "wait for response";
+    cond->wait();
+    delete cond;
+    LOG.debugStream() << "got response";
+    
+
+    delete client;
+    delete conf;
+    LOG.debugStream() << "<<< simplePublish";
+  }
+
+  class MyMessageHandler : public Hedwig::MessageHandlerCallback {
+  public:
+    MyMessageHandler(SimpleWaitCondition* cond) : cond(cond) {}
+
+    void consume(const std::string& topic, const std::string& subscriberId, const Hedwig::Message& msg, Hedwig::OperationCallbackPtr& callback) {
+      LOG.debugStream() << "Topic: " << topic << "  subscriberId: " << subscriberId;
+      LOG.debugStream() << " Message: " << msg.body();
+      
+      callback->operationComplete();
+      cond->setTrue();
+      cond->signal();
+    }
+  private:
+    SimpleWaitCondition* cond;
+    };*/
+  /*
+  void simplePublishAndSubscribe() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond2 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond3 = new SimpleWaitCondition();
+
+    Hedwig::Configuration* conf = new Configuration1();
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    Hedwig::Publisher& pub = client->getPublisher();
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    
+    std::string topic("foobar");
+    std::string sid("mysubscriber");
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+    sub.asyncSubscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH, testcb1);
+    Hedwig::MessageHandlerCallbackPtr messagecb(new MyMessageHandler(cond2));
+    sub.startDelivery(topic, sid, messagecb);
+    cond1->wait();
+    
+    Hedwig::OperationCallbackPtr testcb2(new TestCallback(cond3));
+    pub.asyncPublish("foobar", "barfoo", testcb2);
+    cond3->wait();
+    cond2->wait();
+
+    delete cond1;
+    delete cond3;
+    delete cond2;
+
+    delete client;
+    delete conf;
+  }
+
+  void publishAndSubscribeWithRedirect() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond2 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond3 = new SimpleWaitCondition();
+    SimpleWaitCondition* cond4 = new SimpleWaitCondition();
+
+    Hedwig::Configuration* publishconf = new Configuration1();
+    Hedwig::Configuration* subscribeconf = new Configuration2();
+
+    Hedwig::Client* publishclient = new Hedwig::Client(*publishconf);
+    Hedwig::Publisher& pub = publishclient->getPublisher();
+
+    Hedwig::Client* subscribeclient = new Hedwig::Client(*subscribeconf);
+    Hedwig::Subscriber& sub = subscribeclient->getSubscriber();
+    
+    LOG.debugStream() << "publishing";
+    Hedwig::OperationCallbackPtr testcb2(new TestCallback(cond3));
+    pub.asyncPublish("foobar", "barfoo", testcb2);
+    cond3->wait();
+    
+    LOG.debugStream() << "Subscribing";
+    std::string topic("foobar");
+    std::string sid("mysubscriber");
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+    sub.asyncSubscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH, testcb1);
+    LOG.debugStream() << "Starting delivery";
+    Hedwig::MessageHandlerCallbackPtr messagecb(new MyMessageHandler(cond2));
+    sub.startDelivery(topic, sid, messagecb);
+
+    LOG.debugStream() << "Subscribe wait";
+    cond1->wait();
+
+    Hedwig::OperationCallbackPtr testcb3(new TestCallback(cond4));
+    pub.asyncPublish("foobar", "barfoo", testcb3);
+    cond4->wait();
+
+
+    LOG.debugStream() << "Delivery wait";
+
+    cond2->wait();
+
+    sub.stopDelivery(topic, sid);
+
+    delete cond1;
+    delete cond3;
+    delete cond2;
+    delete cond4;
+
+    delete subscribeclient;
+    delete publishclient;
+    delete publishconf;
+    delete subscribeconf;
+    }*/
+};
+
+CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( PublishTestSuite, "Publish");

+ 47 - 0
src/contrib/hedwig/client/src/main/cpp/test/pubsubdatatest.cpp

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cppunit/Test.h>
+#include <cppunit/TestSuite.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "../lib/clientimpl.h"
+#include <hedwig/exceptions.h>
+#include <stdexcept>
+
+using namespace CppUnit;
+
+class PubSubDataTestSuite : public CppUnit::TestFixture {
+  CPPUNIT_TEST_SUITE( PubSubDataTestSuite );
+  CPPUNIT_TEST(createPubSubData);
+  CPPUNIT_TEST_SUITE_END();
+
+public:
+  void setUp()
+  {
+  }
+
+  void tearDown() 
+  {
+  }
+
+  void createPubSubData() {
+    
+  }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION( PubSubDataTestSuite );

+ 322 - 0
src/contrib/hedwig/client/src/main/cpp/test/pubsubtest.cpp

@@ -0,0 +1,322 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cppunit/Test.h>
+#include <cppunit/TestSuite.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "../lib/clientimpl.h"
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <stdexcept>
+#include <pthread.h>
+
+#include <log4cpp/Category.hh>
+
+#include "servercontrol.h"
+#include "util.h"
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwigtest."__FILE__);
+
+class PubSubTestSuite : public CppUnit::TestFixture {
+private:
+  HedwigTest::ServerControl* control;
+  HedwigTest::TestServerPtr zk;
+  HedwigTest::TestServerPtr bk1;
+  HedwigTest::TestServerPtr bk2;
+  HedwigTest::TestServerPtr bk3;
+  HedwigTest::TestServerPtr hw1;
+
+			       
+  CPPUNIT_TEST_SUITE( PubSubTestSuite );
+  CPPUNIT_TEST(testPubSubContinuousOverClose);
+  //  CPPUNIT_TEST(testPubSubContinuousOverServerDown);
+  CPPUNIT_TEST(testMultiTopic);
+  CPPUNIT_TEST(testMultiTopicMultiSubscriber);
+  CPPUNIT_TEST_SUITE_END();
+
+public:
+  PubSubTestSuite() {
+    
+  }
+
+  ~PubSubTestSuite() {
+  }
+
+  void setUp()
+  {
+    control = new HedwigTest::ServerControl(HedwigTest::DEFAULT_CONTROLSERVER_PORT);
+    zk = control->startZookeeperServer(12345);
+    bk1 = control->startBookieServer(12346, zk);
+    bk2 = control->startBookieServer(12347, zk);
+    bk3 = control->startBookieServer(12348, zk);
+    
+    std::string region("testRegion");
+    hw1 = control->startPubSubServer(12349, region, zk);
+  }
+  
+  void tearDown() 
+  {
+    try {
+      hw1->kill();
+    
+      bk1->kill();
+      bk2->kill();
+      bk3->kill();
+      
+      zk->kill();
+    } catch (std::exception& e) {
+      // don't allow an exception to break everything, we're going deleting the control no matter what
+    }
+    delete control;
+  }
+
+  class MyMessageHandlerCallback : public Hedwig::MessageHandlerCallback {
+  public:
+    MyMessageHandlerCallback(const std::string& topic, const std::string& subscriberId) : messagesReceived(0), topic(topic), subscriberId(subscriberId) {
+      
+    }
+
+    virtual void consume(const std::string& topic, const std::string& subscriberId, const Hedwig::Message& msg, Hedwig::OperationCallbackPtr& callback) {
+      if (topic == this->topic && subscriberId == this->subscriberId) {
+	mutex.lock();
+	messagesReceived++;
+	lastMessage = msg.body();
+	callback->operationComplete();
+	mutex.unlock();
+      }
+    }
+    
+    std::string getLastMessage() {
+      mutex.lock();
+      std::string s = lastMessage;
+      mutex.unlock();
+      return s;
+    }
+
+    int numMessagesReceived() {
+      mutex.lock();
+      int i = messagesReceived;
+      mutex.unlock();
+      return i;
+    }    
+    
+  protected:
+    Hedwig::Mutex mutex;
+    int messagesReceived;
+    std::string lastMessage;
+    std::string topic;
+    std::string subscriberId;
+  };
+ 
+  void testPubSubContinuousOverClose() {
+    std::string topic = "pubSubTopic";
+    std::string sid = "MySubscriberid-1";
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    Hedwig::Publisher& pub = client->getPublisher();
+
+    sub.subscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    MyMessageHandlerCallback* cb = new MyMessageHandlerCallback(topic, sid);
+    Hedwig::MessageHandlerCallbackPtr handler(cb);
+
+    sub.startDelivery(topic, sid, handler);
+    pub.publish(topic, "Test Message 1");
+    bool pass = false;
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cb->numMessagesReceived() > 0) {
+	if (cb->getLastMessage() == "Test Message 1") {
+	  pass = true;
+	  break;
+	}
+      }
+    }
+    CPPUNIT_ASSERT(pass);
+    sub.closeSubscription(topic, sid);
+
+    pub.publish(topic, "Test Message 2");
+    
+    sub.subscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.startDelivery(topic, sid, handler);
+    pass = false;
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cb->numMessagesReceived() > 0) {
+	if (cb->getLastMessage() == "Test Message 2") {
+	  pass = true;
+	  break;
+	}
+      }
+    }
+    CPPUNIT_ASSERT(pass);
+  }
+
+  /*  void testPubSubContinuousOverServerDown() {
+    std::string topic = "pubSubTopic";
+    std::string sid = "MySubscriberid-1";
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    Hedwig::Publisher& pub = client->getPublisher();
+
+    sub.subscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    MyMessageHandlerCallback* cb = new MyMessageHandlerCallback(topic, sid);
+    Hedwig::MessageHandlerCallbackPtr handler(cb);
+
+    sub.startDelivery(topic, sid, handler);
+    pub.publish(topic, "Test Message 1");
+    bool pass = false;
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cb->numMessagesReceived() > 0) {
+	if (cb->getLastMessage() == "Test Message 1") {
+	  pass = true;
+	  break;
+	}
+      }
+    }
+    CPPUNIT_ASSERT(pass);
+    sub.closeSubscription(topic, sid);
+
+    pub.publish(topic, "Test Message 2");
+    
+    sub.subscribe(topic, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.startDelivery(topic, sid, handler);
+    pass = false;
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cb->numMessagesReceived() > 0) {
+	if (cb->getLastMessage() == "Test Message 2") {
+	  pass = true;
+	  break;
+	}
+      }
+    }
+    CPPUNIT_ASSERT(pass);
+    }*/
+
+  void testMultiTopic() {
+    std::string topicA = "pubSubTopicA";
+    std::string topicB = "pubSubTopicB";
+    std::string sid = "MySubscriberid-3";
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    Hedwig::Publisher& pub = client->getPublisher();
+
+    sub.subscribe(topicA, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.subscribe(topicB, sid, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+   
+    MyMessageHandlerCallback* cbA = new MyMessageHandlerCallback(topicA, sid);
+    Hedwig::MessageHandlerCallbackPtr handlerA(cbA);
+    sub.startDelivery(topicA, sid, handlerA);
+
+    MyMessageHandlerCallback* cbB = new MyMessageHandlerCallback(topicB, sid);
+    Hedwig::MessageHandlerCallbackPtr handlerB(cbB);
+    sub.startDelivery(topicB, sid, handlerB);
+
+    pub.publish(topicA, "Test Message A");
+    pub.publish(topicB, "Test Message B");
+    int passA = false, passB = false;
+    
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cbA->numMessagesReceived() > 0) {
+	if (cbA->getLastMessage() == "Test Message A") {
+	  passA = true;
+	}
+      }
+      if (cbB->numMessagesReceived() > 0) {
+	if (cbB->getLastMessage() == "Test Message B") {
+	  passB = true;
+	}
+      }
+      if (passA && passB) {
+	break;
+      }
+    }
+    CPPUNIT_ASSERT(passA && passB);
+  }
+
+  void testMultiTopicMultiSubscriber() {
+    std::string topicA = "pubSubTopicA";
+    std::string topicB = "pubSubTopicB";
+    std::string sidA = "MySubscriberid-4";
+    std::string sidB = "MySubscriberid-5";
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    Hedwig::Publisher& pub = client->getPublisher();
+
+    sub.subscribe(topicA, sidA, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.subscribe(topicB, sidB, Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+   
+    MyMessageHandlerCallback* cbA = new MyMessageHandlerCallback(topicA, sidA);
+    Hedwig::MessageHandlerCallbackPtr handlerA(cbA);
+    sub.startDelivery(topicA, sidA, handlerA);
+
+    MyMessageHandlerCallback* cbB = new MyMessageHandlerCallback(topicB, sidB);
+    Hedwig::MessageHandlerCallbackPtr handlerB(cbB);
+    sub.startDelivery(topicB, sidB, handlerB);
+
+    pub.publish(topicA, "Test Message A");
+    pub.publish(topicB, "Test Message B");
+    int passA = false, passB = false;
+    
+    for (int i = 0; i < 10; i++) {
+      sleep(3);
+      if (cbA->numMessagesReceived() > 0) {
+	if (cbA->getLastMessage() == "Test Message A") {
+	  passA = true;
+	}
+      }
+      if (cbB->numMessagesReceived() > 0) {
+	if (cbB->getLastMessage() == "Test Message B") {
+	  passB = true;
+	}
+      }
+      if (passA && passB) {
+	break;
+      }
+    }
+    CPPUNIT_ASSERT(passA && passB);
+  }
+};
+
+CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( PubSubTestSuite, "PubSub" );

+ 175 - 0
src/contrib/hedwig/client/src/main/cpp/test/servercontrol.cpp

@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netinet/tcp.h>
+
+#include <string>
+#include <string.h>
+#include <stdlib.h>
+#include "servercontrol.h"
+
+#include <log4cpp/Category.hh>
+#include <sstream>   
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwigtest."__FILE__);
+
+using namespace HedwigTest;
+
+const int MAX_COMMAND_LN = 256;
+
+class TestServerImpl : public TestServer {
+public:
+  TestServerImpl(std::string& address, ServerControl& sc);
+  ~TestServerImpl();
+  void kill();
+  std::string& getAddress();
+
+private:
+  std::string address;
+  ServerControl& sc;
+};
+
+TestServerImpl::TestServerImpl(std::string& address, ServerControl& sc) : address(address), sc(sc)  {
+}
+
+TestServerImpl::~TestServerImpl() {
+}
+
+void TestServerImpl::kill() {
+  std::ostringstream sstr;
+  sstr << "KILL " << address << std::endl;
+  ServerControl::ServerResponse resp = sc.requestResponse(sstr.str());
+  if (resp.status != "OK") {
+    LOG.errorStream() << "Error killing Server " << resp.message;
+    throw ErrorKillingServerException();
+  }
+}
+
+std::string& TestServerImpl::getAddress() {
+  return address;
+}
+ 
+ServerControl::ServerControl(int port) {
+  socketfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+  
+  if (-1 == socketfd) {
+    LOG.errorStream() << "Couldn't create socket";
+    throw CantConnectToServerControlDaemonException();
+  }
+
+  sockaddr_in addr;
+  addr.sin_family = AF_INET; 
+  addr.sin_port = htons(port);
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1"); 
+    
+  if (-1 == ::connect(socketfd, (const sockaddr *)&addr, sizeof(struct sockaddr))) {
+    LOG.errorStream() << "Couldn't connect socket";
+    close(socketfd);
+    throw CantConnectToServerControlDaemonException();
+  }
+}
+
+ServerControl::~ServerControl() {
+  close(socketfd);
+}
+  
+
+ServerControl::ServerResponse ServerControl::requestResponse(std::string request) {
+  socketlock.lock();
+  char response[MAX_COMMAND_LN];
+
+  LOG.debugStream() << "REQ: " << request.c_str() << " " << request.length();
+  send(socketfd, request.c_str(), request.length(), 0);
+  
+  memset(response, 0, MAX_COMMAND_LN);
+  recv(socketfd, response, MAX_COMMAND_LN, 0);
+  LOG.debugStream() << "RESP: " << response;
+
+  socketlock.unlock();
+
+  char* space = strchr(response, ' ');
+  if (space == NULL) {
+    throw InvalidServerControlDaemonResponseException();
+  }
+  char* status = response;
+  *space = 0;
+  
+  char* message = space+1;
+  char* cr = strchr(message, '\n');
+  if (cr != NULL) {
+    *cr = 0;
+  }
+  if (strlen(message) < 1) {
+    throw InvalidServerControlDaemonResponseException();
+  }
+  LOG.debugStream() << "$" << message << "$";
+  ServerControl::ServerResponse resp = { std::string(status), std::string(message) };
+  return resp;
+}
+  
+TestServerPtr ServerControl::startZookeeperServer(int port) {  
+  std::ostringstream sstr;
+  sstr << "START ZOOKEEPER " << port << std::endl;
+
+  std::string req(sstr.str());
+  LOG.debugStream() << req;
+
+  ServerControl::ServerResponse resp = requestResponse(req);
+  if (resp.status == "OK") {
+    return TestServerPtr(new TestServerImpl(resp.message, *this));
+  } else {
+    LOG.errorStream() << "Error creating zookeeper on port " << port << " " << resp.message;
+    throw ErrorCreatingServerException();
+  }
+}
+
+TestServerPtr ServerControl::startBookieServer(int port, TestServerPtr& zookeeperServer) {
+  std::ostringstream sstr;
+  sstr << "START BOOKKEEPER " << port << " " << zookeeperServer->getAddress() << std::endl;
+
+  std::string req(sstr.str());
+  LOG.debugStream() << req;
+
+  ServerControl::ServerResponse resp = requestResponse(req);
+  if (resp.status == "OK") {
+    return TestServerPtr(new TestServerImpl(resp.message, *this));
+  } else {
+    LOG.errorStream() << "Error creating bookkeeper on port " << port << " " << resp.message;
+    throw ErrorCreatingServerException();
+  }
+}
+
+TestServerPtr ServerControl::startPubSubServer(int port, std::string& region, TestServerPtr& zookeeperServer) {
+  std::ostringstream sstr;
+  sstr << "START HEDWIG " << port << " " << region << " " << zookeeperServer->getAddress() << std::endl;
+
+  std::string req(sstr.str());
+  LOG.debugStream() << req;
+
+  ServerControl::ServerResponse resp = requestResponse(req);
+  if (resp.status == "OK") {
+    return TestServerPtr(new TestServerImpl(resp.message, *this));
+  } else {
+    LOG.errorStream() << "Error creating hedwig on port " << port << " " << resp.message;
+    throw ErrorCreatingServerException();
+  }
+}

+ 64 - 0
src/contrib/hedwig/client/src/main/cpp/test/servercontrol.h

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERVERCONTROL_H
+#define SERVERCONTROL_H
+
+#include <tr1/memory>
+#include <exception>
+#include "../lib/util.h"
+
+namespace HedwigTest {
+    const int DEFAULT_CONTROLSERVER_PORT = 5672;
+
+  class TestException : public std::exception {};
+  class CantConnectToServerControlDaemonException : public TestException {};
+  class InvalidServerControlDaemonResponseException : public TestException {};
+  class ErrorCreatingServerException : public TestException {};
+  class ErrorKillingServerException : public TestException {};
+
+  class TestServer {
+  public:
+    virtual void kill() = 0;
+    virtual std::string& getAddress() = 0;
+  };
+  
+  typedef std::tr1::shared_ptr<TestServer> TestServerPtr;
+
+  class ServerControl {
+  public:
+    ServerControl(int port);
+    ~ServerControl();
+    
+    TestServerPtr startZookeeperServer(int port);
+    TestServerPtr startBookieServer(int port, TestServerPtr& zookeeperServer);
+    TestServerPtr startPubSubServer(int port, std::string& region, TestServerPtr& zookeeperServer);
+    
+    struct ServerResponse {
+      std::string status;
+      std::string message; 
+    };
+    ServerResponse requestResponse(std::string request);
+
+  public:
+    int socketfd;
+    Hedwig::Mutex socketlock;
+  };
+};
+
+#endif

+ 222 - 0
src/contrib/hedwig/client/src/main/cpp/test/subscribetest.cpp

@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cppunit/Test.h>
+#include <cppunit/TestSuite.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "../lib/clientimpl.h"
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <stdexcept>
+#include <pthread.h>
+
+#include <log4cpp/Category.hh>
+
+#include "servercontrol.h"
+#include "util.h"
+
+static log4cpp::Category &LOG = log4cpp::Category::getInstance("hedwigtest."__FILE__);
+
+class SubscribeTestSuite : public CppUnit::TestFixture {
+private:
+  HedwigTest::ServerControl* control;
+  HedwigTest::TestServerPtr zk;
+  HedwigTest::TestServerPtr bk1;
+  HedwigTest::TestServerPtr bk2;
+  HedwigTest::TestServerPtr bk3;
+  HedwigTest::TestServerPtr hw1;
+  HedwigTest::TestServerPtr hw2;
+
+			       
+  CPPUNIT_TEST_SUITE( SubscribeTestSuite );
+  CPPUNIT_TEST(testSyncSubscribe);
+  CPPUNIT_TEST(testSyncSubscribeAttach);
+  CPPUNIT_TEST(testAsyncSubscribe);
+  CPPUNIT_TEST(testAsyncSubcribeAndUnsubscribe);
+  CPPUNIT_TEST(testAsyncSubcribeAndSyncUnsubscribe);
+  CPPUNIT_TEST(testAsyncSubcribeCloseSubscriptionAndThenResubscribe);
+  CPPUNIT_TEST(testUnsubscribeWithoutSubscribe);
+  CPPUNIT_TEST(testSubscribeTwice);      
+  CPPUNIT_TEST_SUITE_END();
+
+public:
+  SubscribeTestSuite() {
+    
+  }
+
+  ~SubscribeTestSuite() {
+  }
+
+  void setUp()
+  {
+    control = new HedwigTest::ServerControl(HedwigTest::DEFAULT_CONTROLSERVER_PORT);
+    zk = control->startZookeeperServer(12345);
+    bk1 = control->startBookieServer(12346, zk);
+    bk2 = control->startBookieServer(12347, zk);
+    bk3 = control->startBookieServer(12348, zk);
+    
+    std::string region("testRegion");
+    hw1 = control->startPubSubServer(12349, region, zk);
+    hw2 = control->startPubSubServer(12350, region, zk);
+  }
+  
+  void tearDown() 
+  {
+    try {
+      hw1->kill();
+    
+      bk1->kill();
+      bk2->kill();
+      bk3->kill();
+      
+      zk->kill();
+    } catch (std::exception& e) {
+      // don't allow an exception to break everything, we're going deleting the control no matter what
+    }
+    delete control;
+  }
+
+  void testSyncSubscribe() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    
+    sub.subscribe("testTopic", "mySubscriberId-1", Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+  }
+
+  void testSyncSubscribeAttach() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    
+    CPPUNIT_ASSERT_THROW(sub.subscribe("iAmATopicWhoDoesNotExist", "mySubscriberId-2", Hedwig::SubscribeRequest::ATTACH), Hedwig::ClientException);
+  }
+
+  void testAsyncSubscribe() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    std::auto_ptr<SimpleWaitCondition> cond1ptr(cond1);
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+   
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+
+    sub.asyncSubscribe("testTopic", "mySubscriberId-3", Hedwig::SubscribeRequest::CREATE_OR_ATTACH, testcb1);
+    
+    cond1->wait();
+  }
+  
+  void testAsyncSubcribeAndUnsubscribe() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    std::auto_ptr<SimpleWaitCondition> cond1ptr(cond1);
+    SimpleWaitCondition* cond2 = new SimpleWaitCondition();
+    std::auto_ptr<SimpleWaitCondition> cond2ptr(cond2);
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+   
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+    Hedwig::OperationCallbackPtr testcb2(new TestCallback(cond2));
+
+    sub.asyncSubscribe("testTopic", "mySubscriberId-4", Hedwig::SubscribeRequest::CREATE_OR_ATTACH, testcb1);
+    cond1->wait();
+    
+    sub.asyncUnsubscribe("testTopic", "mySubscriberId-4", testcb2);
+    cond2->wait();
+  }
+
+  void testAsyncSubcribeAndSyncUnsubscribe() {
+    SimpleWaitCondition* cond1 = new SimpleWaitCondition();
+    std::auto_ptr<SimpleWaitCondition> cond1ptr(cond1);
+
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+   
+    Hedwig::OperationCallbackPtr testcb1(new TestCallback(cond1));
+    
+    sub.asyncSubscribe("testTopic", "mySubscriberId-5", Hedwig::SubscribeRequest::CREATE_OR_ATTACH, testcb1);
+    cond1->wait();
+
+    sub.unsubscribe("testTopic", "mySubscriberId-5");
+  }
+
+  void testAsyncSubcribeCloseSubscriptionAndThenResubscribe() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+   
+    sub.subscribe("testTopic", "mySubscriberId-6", Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.closeSubscription("testTopic", "mySubscriberId-6");
+    sub.subscribe("testTopic", "mySubscriberId-6", Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    sub.unsubscribe("testTopic", "mySubscriberId-6");
+  }
+
+  void testUnsubscribeWithoutSubscribe() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    
+    CPPUNIT_ASSERT_THROW(sub.unsubscribe("testTopic", "mySubscriberId-7"), Hedwig::NotSubscribedException);
+  }
+
+  void testSubscribeTwice() {
+    Hedwig::Configuration* conf = new TestServerConfiguration(hw1);
+    std::auto_ptr<Hedwig::Configuration> confptr(conf);
+    
+    Hedwig::Client* client = new Hedwig::Client(*conf);
+    std::auto_ptr<Hedwig::Client> clientptr(client);
+
+    Hedwig::Subscriber& sub = client->getSubscriber();
+    
+    sub.subscribe("testTopic", "mySubscriberId-8", Hedwig::SubscribeRequest::CREATE_OR_ATTACH);
+    CPPUNIT_ASSERT_THROW(sub.subscribe("testTopic", "mySubscriberId-8", Hedwig::SubscribeRequest::CREATE_OR_ATTACH), Hedwig::AlreadySubscribedException);
+  }
+};
+
+CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( SubscribeTestSuite, "Subscribe" );

+ 21 - 0
src/contrib/hedwig/client/src/main/cpp/test/test.sh

@@ -0,0 +1,21 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#   
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export LD_LIBRARY_PATH=/usr/lib/jvm/java-6-sun/jre/lib/i386/server/:/usr/lib/jvm/java-6-sun/jre/lib/i386/
+export CLASSPATH=$HOME/src/hedwig/server/target/test-classes:$HOME/src/hedwig/server/lib/bookkeeper-SNAPSHOT.jar:$HOME/src/hedwig/server/lib/zookeeper-SNAPSHOT.jar:$HOME/src/hedwig/server/target/classes:$HOME/src/hedwig/protocol/target/classes:$HOME/src/hedwig/client/target/classes:$HOME/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:$HOME/.m2/repository/org/jboss/netty/netty/3.1.2.GA/netty-3.1.2.GA.jar:$HOME/.m2/repository/commons-lang/commons-lang/2.4/commons-lang-2.4.jar:$HOME/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:$HOME/.m2/repository/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar:$HOME/.m2/repository/com/google/protobuf/protobuf-java/2.3.0/protobuf-java-2.3.0.jar:$HOME/.m2/repository/log4j/log4j/1.2.14/log4j-1.2.14.jar:$HOME/src/hedwig/client/target/classes/
+
+./hedwigtest

+ 76 - 0
src/contrib/hedwig/client/src/main/cpp/test/util.h

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../lib/clientimpl.h"
+#include <hedwig/exceptions.h>
+#include <hedwig/callback.h>
+#include <stdexcept>
+#include <pthread.h>
+
+static log4cpp::Category &UTILLOG = log4cpp::Category::getInstance("hedwigtest."__FILE__);
+
+
+class SimpleWaitCondition : public Hedwig::WaitConditionBase {
+public:
+  SimpleWaitCondition() : flag(false) {};
+  ~SimpleWaitCondition() { wait(); }
+
+  void setTrue() { UTILLOG.debugStream() << "Setting flag " << &flag << " to true"; flag=true; UTILLOG.debugStream() << "Flag now " << flag; }
+  bool isTrue() {
+    UTILLOG.debugStream() << &flag << " isTrue? " << flag;
+    return flag;
+  }
+private:
+  bool flag;
+};
+
+class TestCallback : public Hedwig::OperationCallback {
+public:
+  TestCallback(SimpleWaitCondition* cond) 
+    : cond(cond) {
+  }
+
+  virtual void operationComplete() {
+    UTILLOG.debugStream() << "operationComplete";
+    cond->lock();
+    cond->setTrue();
+    cond->signalAndUnlock();
+  }
+  
+  virtual void operationFailed(const std::exception& exception) {
+    UTILLOG.debugStream() << "operationFailed: " << exception.what();
+    cond->lock();
+    cond->setTrue();
+    cond->signalAndUnlock();
+  }    
+private:
+  SimpleWaitCondition *cond;
+};
+
+
+class TestServerConfiguration : public Hedwig::Configuration {
+public:
+  TestServerConfiguration(HedwigTest::TestServerPtr& server) : server(server), address(server->getAddress()) {}
+  
+  virtual const std::string& getDefaultServer() const {
+    return address;
+  }
+  
+private:
+  HedwigTest::TestServerPtr server;
+  const std::string address;
+};

+ 90 - 0
src/contrib/hedwig/client/src/main/cpp/test/utiltest.cpp

@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cppunit/Test.h>
+#include <cppunit/TestSuite.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "../lib/util.h"
+#include <hedwig/exceptions.h>
+#include <stdexcept>
+
+using namespace CppUnit;
+
+class UtilTestSuite : public CppUnit::TestFixture {
+  CPPUNIT_TEST_SUITE( UtilTestSuite );
+  CPPUNIT_TEST(testHostAddress);
+  CPPUNIT_TEST_SUITE_END();
+
+public:
+  void setUp()
+  {
+  }
+
+  void tearDown() 
+  {
+  }
+
+  void testHostAddress() {
+    // good address (no ports)
+    Hedwig::HostAddress a1 = Hedwig::HostAddress::fromString("www.yahoo.com");
+    CPPUNIT_ASSERT(a1.port() == 4080);
+
+    // good address with ip (no ports)
+    Hedwig::HostAddress a2 = Hedwig::HostAddress::fromString("127.0.0.1");
+    CPPUNIT_ASSERT(a2.port() == 4080);
+    CPPUNIT_ASSERT(a2.ip() == ((127 << 24) | 1));
+
+    // good address
+    Hedwig::HostAddress a3 = Hedwig::HostAddress::fromString("www.yahoo.com:80");
+    CPPUNIT_ASSERT(a3.port() == 80);
+
+    // good address with ip
+    Hedwig::HostAddress a4 = Hedwig::HostAddress::fromString("127.0.0.1:80");
+    CPPUNIT_ASSERT(a4.port() == 80);
+    CPPUNIT_ASSERT(a4.ip() == ((127 << 24) | 1));
+
+    // good address (with ssl)
+    Hedwig::HostAddress a5 = Hedwig::HostAddress::fromString("www.yahoo.com:80:443");
+    CPPUNIT_ASSERT(a5.port() == 80);
+
+    // good address with ip
+    Hedwig::HostAddress a6 = Hedwig::HostAddress::fromString("127.0.0.1:80:443");
+    CPPUNIT_ASSERT(a6.port() == 80);
+    CPPUNIT_ASSERT(a6.ip() == ((127 << 24) | 1));
+
+    // nothing
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString(""), Hedwig::HostResolutionException);
+    
+    // nothing but colons
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString("::::::::::::::::"), Hedwig::ConfigurationException);
+    
+    // only port number
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString(":80"), Hedwig::HostResolutionException);
+ 
+    // text after colon (isn't supported)
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString("www.yahoo.com:http"), Hedwig::ConfigurationException);
+    
+    // invalid hostname
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString("com.oohay.www:80"), Hedwig::HostResolutionException);
+    
+    // null
+    CPPUNIT_ASSERT_THROW(Hedwig::HostAddress::fromString(NULL), std::logic_error);
+  }
+};
+
+CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( UtilTestSuite, "Util" );

+ 48 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/MessageHandler.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.api;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * Interface to define the client handler logic to consume messages it is
+ * subscribed to.
+ * 
+ */
+public interface MessageHandler {
+
+    /**
+     * Consumes a message it is subscribed to and has been delivered to it.
+     * 
+     * @param topic
+     *            The topic name where the message came from.
+     * @param subscriberId
+     *            ID of the subscriber.
+     * @param msg
+     *            The message object to consume.
+     * @param callback
+     *            Callback to invoke when the message consumption has been done.
+     * @param context
+     *            Calling context that the Callback needs since this is done
+     *            asynchronously.
+     */
+    public void consume(ByteString topic, ByteString subscriberId, Message msg, Callback<Void> callback, Object context);
+
+}

+ 63 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/Publisher.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.api;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.exceptions.PubSubException.CouldNotConnectException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * Interface to define the client Publisher API.
+ * 
+ */
+public interface Publisher {
+
+    /**
+     * Publishes a message on the given topic.
+     * 
+     * @param topic
+     *            Topic name to publish on
+     * @param msg
+     *            Message object to serialize and publish
+     * @throws CouldNotConnectException
+     *             If we are not able to connect to the server host
+     * @throws ServiceDownException
+     *             If we are unable to publish the message to the topic.
+     */
+    public void publish(ByteString topic, Message msg) throws CouldNotConnectException, ServiceDownException;
+
+    /**
+     * Publishes a message asynchronously on the given topic.
+     * 
+     * @param topic
+     *            Topic name to publish on
+     * @param msg
+     *            Message object to serialize and publish
+     * @param callback
+     *            Callback to invoke when the publish to the server has actually
+     *            gone through. This will have to deal with error conditions on
+     *            the async publish request.
+     * @param context
+     *            Calling context that the Callback needs since this is done
+     *            asynchronously.
+     */
+    public void asyncPublish(ByteString topic, Message msg, Callback<Void> callback, Object context);
+
+}

+ 237 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/api/Subscriber.java

@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.api;
+
+import java.util.List;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.exceptions.InvalidSubscriberIdException;
+import org.apache.hedwig.exceptions.PubSubException.ClientAlreadySubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.ClientNotSubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.CouldNotConnectException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.MessageSeqId;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest.CreateOrAttach;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * Interface to define the client Subscriber API.
+ * 
+ */
+public interface Subscriber {
+
+    /**
+     * Subscribe to the given topic for the inputted subscriberId.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param mode
+     *            Whether to prohibit, tolerate, or require an existing
+     *            subscription.
+     * @throws CouldNotConnectException
+     *             If we are not able to connect to the server host
+     * @throws ClientAlreadySubscribedException
+     *             If client is already subscribed to the topic
+     * @throws ServiceDownException
+     *             If unable to subscribe to topic
+     * @throws InvalidSubscriberIdException
+     *             If the subscriberId is not valid. We may want to set aside
+     *             certain formats of subscriberId's for different purposes.
+     *             e.g. local vs. hub subscriber
+     */
+    public void subscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode)
+            throws CouldNotConnectException, ClientAlreadySubscribedException, ServiceDownException,
+            InvalidSubscriberIdException;
+
+    /**
+     * Subscribe to the given topic asynchronously for the inputted subscriberId
+     * disregarding if the topic has been created yet or not.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param mode
+     *            Whether to prohibit, tolerate, or require an existing
+     *            subscription.
+     * @param callback
+     *            Callback to invoke when the subscribe request to the server
+     *            has actually gone through. This will have to deal with error
+     *            conditions on the async subscribe request.
+     * @param context
+     *            Calling context that the Callback needs since this is done
+     *            asynchronously.
+     */
+    public void asyncSubscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode, Callback<Void> callback,
+            Object context);
+
+    /**
+     * Unsubscribe from a topic that the subscriberId user has previously
+     * subscribed to.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @throws CouldNotConnectException
+     *             If we are not able to connect to the server host
+     * @throws ClientNotSubscribedException
+     *             If the client is not currently subscribed to the topic
+     * @throws ServiceDownException
+     *             If the server was down and unable to complete the request
+     * @throws InvalidSubscriberIdException
+     *             If the subscriberId is not valid. We may want to set aside
+     *             certain formats of subscriberId's for different purposes.
+     *             e.g. local vs. hub subscriber
+     */
+    public void unsubscribe(ByteString topic, ByteString subscriberId) throws CouldNotConnectException,
+            ClientNotSubscribedException, ServiceDownException, InvalidSubscriberIdException;
+
+    /**
+     * Unsubscribe from a topic asynchronously that the subscriberId user has
+     * previously subscribed to.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param callback
+     *            Callback to invoke when the unsubscribe request to the server
+     *            has actually gone through. This will have to deal with error
+     *            conditions on the async unsubscribe request.
+     * @param context
+     *            Calling context that the Callback needs since this is done
+     *            asynchronously.
+     */
+    public void asyncUnsubscribe(ByteString topic, ByteString subscriberId, Callback<Void> callback, Object context);
+
+    /**
+     * Manually send a consume message to the server for the given inputs.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param messageSeqId
+     *            Message Sequence ID for the latest message that the client app
+     *            has successfully consumed. All messages up to that point will
+     *            also be considered as consumed.            
+     * @throws ClientNotSubscribedException
+     *             If the client is not currently subscribed to the topic based
+     *             on the client's local state.
+     */
+    public void consume(ByteString topic, ByteString subscriberId, MessageSeqId messageSeqId)
+            throws ClientNotSubscribedException;
+
+    /**
+     * Checks if the subscriberId client is currently subscribed to the given
+     * topic.
+     * 
+     * @param topic
+     *            Topic name of the subscription.
+     * @param subscriberId
+     *            ID of the subscriber
+     * @throws CouldNotConnectException
+     *             If we are not able to connect to the server host
+     * @throws ServiceDownException
+     *             If there is an error checking the server if the client has a
+     *             subscription
+     * @return Boolean indicating if the client has a subscription or not.
+     */
+    public boolean hasSubscription(ByteString topic, ByteString subscriberId) throws CouldNotConnectException,
+            ServiceDownException;
+
+    /**
+     * Fills the input List with the subscriptions this subscriberId client is
+     * subscribed to.
+     * 
+     * @param subscriberId
+     *            ID of the subscriber
+     * @return List filled with subscription name (topic) strings.
+     * @throws CouldNotConnectException
+     *             If we are not able to connect to the server host
+     * @throws ServiceDownException
+     *             If there is an error retrieving the list of topics
+     */
+    public List<ByteString> getSubscriptionList(ByteString subscriberId) throws CouldNotConnectException,
+            ServiceDownException;
+
+    /**
+     * Begin delivery of messages from the server to us for this topic and
+     * subscriberId.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param messageHandler
+     *            Message Handler that will consume the subscribed messages
+     * @throws ClientNotSubscribedException
+     *             If the client is not currently subscribed to the topic
+     */
+    public void startDelivery(ByteString topic, ByteString subscriberId, MessageHandler messageHandler)
+            throws ClientNotSubscribedException;
+
+    /**
+     * Stop delivery of messages for this topic and subscriberId.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @throws ClientNotSubscribedException
+     *             If the client is not currently subscribed to the topic
+     */
+    public void stopDelivery(ByteString topic, ByteString subscriberId) throws ClientNotSubscribedException;
+
+    /**
+     * Closes all of the client side cached data for this subscription without
+     * actually sending an unsubscribe request to the server. This will close
+     * the subscribe channel synchronously (if it exists) for the topic.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @throws ServiceDownException
+     *             If the subscribe channel was not able to be closed
+     *             successfully
+     */
+    public void closeSubscription(ByteString topic, ByteString subscriberId) throws ServiceDownException;
+
+    /**
+     * Closes all of the client side cached data for this subscription without
+     * actually sending an unsubscribe request to the server. This will close
+     * the subscribe channel asynchronously (if it exists) for the topic.
+     * 
+     * @param topic
+     *            Topic name of the subscription
+     * @param subscriberId
+     *            ID of the subscriber
+     * @param callback
+     *            Callback to invoke when the subscribe channel has been closed.
+     * @param context
+     *            Calling context that the Callback needs since this is done
+     *            asynchronously.
+     */
+    public void asyncCloseSubscription(ByteString topic, ByteString subscriberId, Callback<Void> callback,
+            Object context);
+
+}

+ 133 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkPublisher.java

@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.benchmark;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.api.MessageHandler;
+import org.apache.hedwig.client.api.Publisher;
+import org.apache.hedwig.client.api.Subscriber;
+import org.apache.hedwig.client.benchmark.BenchmarkUtils.BenchmarkCallback;
+import org.apache.hedwig.client.benchmark.BenchmarkUtils.ThroughputLatencyAggregator;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest.CreateOrAttach;
+import org.apache.hedwig.util.Callback;
+
+public class BenchmarkPublisher extends BenchmarkWorker {
+    Publisher publisher;
+    Subscriber subscriber;
+    int msgSize;
+    int nParallel;
+    double rate;
+
+    public BenchmarkPublisher(int numTopics, int numMessages, int numRegions, int startTopicLabel, int partitionIndex,
+            int numPartitions, Publisher publisher, Subscriber subscriber, int msgSize, int nParallel, int rate) {
+        super(numTopics, numMessages, numRegions, startTopicLabel, partitionIndex, numPartitions);
+        this.publisher = publisher;
+        this.msgSize = msgSize;
+        this.subscriber = subscriber;
+        this.nParallel = nParallel;
+
+        this.rate = rate / (numRegions * numPartitions + 0.0);
+    }
+
+    public void warmup(int nWarmup) throws Exception {
+        ByteString topic = ByteString.copyFromUtf8("warmup" + partitionIndex);
+        ByteString subId = ByteString.copyFromUtf8("sub");
+        subscriber.subscribe(topic, subId, CreateOrAttach.CREATE_OR_ATTACH);
+
+        subscriber.startDelivery(topic, subId, new MessageHandler() {
+            @Override
+            public void consume(ByteString topic, ByteString subscriberId, Message msg, Callback<Void> callback,
+                    Object context) {
+                // noop
+                callback.operationFinished(context, null);
+            }
+        });
+
+        // picking constants arbitarily for warmup phase
+        ThroughputLatencyAggregator agg = new ThroughputLatencyAggregator("acked pubs", nWarmup, 100);
+        Message msg = getMsg(1024);
+        for (int i = 0; i < nWarmup; i++) {
+            publisher.asyncPublish(topic, msg, new BenchmarkCallback(agg), null);
+        }
+
+        if (agg.tpAgg.queue.take() > 0) {
+            throw new RuntimeException("Warmup publishes failed!");
+        }
+
+    }
+
+    public Message getMsg(int size) {
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < size; i++) {
+            sb.append('a');
+        }
+        final ByteString body = ByteString.copyFromUtf8(sb.toString());
+        Message msg = Message.newBuilder().setBody(body).build();
+        return msg;
+    }
+
+    public Void call() throws Exception {
+        Message msg = getMsg(msgSize);
+
+        // Single warmup for every topic
+        int myPublishCount = 0;
+        for (int i = 0; i < numTopics; i++) {
+            if (!HedwigBenchmark.amIResponsibleForTopic(startTopicLabel + i, partitionIndex, numPartitions)){
+                continue;
+            }
+            ByteString topic = ByteString.copyFromUtf8(HedwigBenchmark.TOPIC_PREFIX + (startTopicLabel + i));
+            publisher.publish(topic, msg);
+            myPublishCount++;
+        }
+
+        long startTime = System.currentTimeMillis();
+        int myPublishLimit = numMessages / numRegions / numPartitions - myPublishCount;
+        myPublishCount = 0;
+        ThroughputLatencyAggregator agg = new ThroughputLatencyAggregator("acked pubs", myPublishLimit, nParallel);
+
+        int topicLabel = 0;
+
+        while (myPublishCount < myPublishLimit) {
+            int topicNum = startTopicLabel + topicLabel;
+            topicLabel = (topicLabel + 1) % numTopics;
+
+            if (!HedwigBenchmark.amIResponsibleForTopic(topicNum, partitionIndex, numPartitions)) {
+                continue;
+            }
+
+            ByteString topic = ByteString.copyFromUtf8(HedwigBenchmark.TOPIC_PREFIX + topicNum);
+
+            if (rate > 0) {
+                long delay = startTime + (long) (1000 * myPublishCount / rate) - System.currentTimeMillis();
+                if (delay > 0)
+                    Thread.sleep(delay);
+            }
+            publisher.asyncPublish(topic, msg, new BenchmarkCallback(agg), null);
+            myPublishCount++;
+        }
+
+        System.out.println("Finished unacked pubs: tput = " + BenchmarkUtils.calcTp(myPublishLimit, startTime)
+                + " ops/s");
+        // Wait till the benchmark test has completed 
+        agg.tpAgg.queue.take();
+        System.out.println(agg.summarize(startTime));
+        return null;
+    }
+
+}

+ 136 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkSubscriber.java

@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.benchmark;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+import org.apache.log4j.Logger;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.api.MessageHandler;
+import org.apache.hedwig.client.api.Subscriber;
+import org.apache.hedwig.client.benchmark.BenchmarkUtils.BenchmarkCallback;
+import org.apache.hedwig.client.benchmark.BenchmarkUtils.ThroughputAggregator;
+import org.apache.hedwig.client.benchmark.BenchmarkUtils.ThroughputLatencyAggregator;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.protocol.PubSubProtocol.RegionSpecificSeqId;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest.CreateOrAttach;
+import org.apache.hedwig.util.Callback;
+
+public class BenchmarkSubscriber extends BenchmarkWorker implements Callable<Void>{
+    static final Logger logger = Logger.getLogger(BenchmarkSubscriber.class);
+    Subscriber subscriber;
+    ByteString subId;
+    
+
+    public BenchmarkSubscriber(int numTopics, int numMessages, int numRegions,
+            int startTopicLabel, int partitionIndex, int numPartitions, Subscriber subscriber, ByteString subId) {
+        super(numTopics, numMessages, numRegions, startTopicLabel, partitionIndex, numPartitions);
+        this.subscriber = subscriber;
+        this.subId = subId;        
+    }
+
+    public void warmup(int numWarmup) throws InterruptedException {
+        /*
+         * multiplying the number of ops by numParitions because we end up
+         * skipping many because of the partitioning logic
+         */
+        multiSub("warmup", "warmup", 0, numWarmup, numWarmup * numPartitions);
+    }
+
+    public Void call() throws Exception {
+
+        final ThroughputAggregator agg = new ThroughputAggregator("recvs", numMessages);
+        final Map<String, Long> lastSeqIdSeenMap = new HashMap<String, Long>();
+
+        for (int i = startTopicLabel; i < startTopicLabel + numTopics; i++) {
+
+            if (!HedwigBenchmark.amIResponsibleForTopic(i, partitionIndex, numPartitions)) {
+                continue;
+            }
+
+            final String topic = HedwigBenchmark.TOPIC_PREFIX + i;
+
+            subscriber.subscribe(ByteString.copyFromUtf8(topic), subId, CreateOrAttach.CREATE_OR_ATTACH);
+            subscriber.startDelivery(ByteString.copyFromUtf8(topic), subId, new MessageHandler() {
+
+                @Override
+                public void consume(ByteString thisTopic, ByteString subscriberId, Message msg,
+                        Callback<Void> callback, Object context) {
+                    if (logger.isDebugEnabled())
+                        logger.debug("Got message from src-region: " + msg.getSrcRegion() + " with seq-id: "
+                                + msg.getMsgId());
+
+                    String mapKey = topic + msg.getSrcRegion().toStringUtf8();
+                    Long lastSeqIdSeen = lastSeqIdSeenMap.get(mapKey);
+                    if (lastSeqIdSeen == null) {
+                        lastSeqIdSeen = (long) 0;
+                    }
+
+                    if (getSrcSeqId(msg) <= lastSeqIdSeen) {
+                        logger.info("Redelivery of message, src-region: " + msg.getSrcRegion() + "seq-id: "
+                                + msg.getMsgId());
+                    } else {
+                        agg.ding(false);
+                    }
+
+                    callback.operationFinished(context, null);
+                }
+            });
+        }
+        System.out.println("Finished subscribing to topics and now waiting for messages to come in...");
+        // Wait till the benchmark test has completed
+        agg.queue.take();            
+        System.out.println(agg.summarize(agg.earliest.get()));
+        return null;
+    }
+
+    long getSrcSeqId(Message msg) {
+        if (msg.getMsgId().getRemoteComponentsCount() == 0) {
+            return msg.getMsgId().getLocalComponent();
+        }
+
+        for (RegionSpecificSeqId rseqId : msg.getMsgId().getRemoteComponentsList()) {
+            if (rseqId.getRegion().equals(msg.getSrcRegion()))
+                return rseqId.getSeqId();
+        }
+
+        return msg.getMsgId().getLocalComponent();
+    }
+
+    void multiSub(String label, String topicPrefix, int start, final int npar, final int count)
+            throws InterruptedException {
+        long startTime = System.currentTimeMillis();
+        ThroughputLatencyAggregator agg = new ThroughputLatencyAggregator(label, count / numPartitions, npar);
+        int end = start + count;
+        for (int i = start; i < end; ++i) {
+            if (!HedwigBenchmark.amIResponsibleForTopic(i, partitionIndex, numPartitions)){
+                continue;
+            }
+            subscriber.asyncSubscribe(ByteString.copyFromUtf8(topicPrefix + i), subId, CreateOrAttach.CREATE_OR_ATTACH,
+                    new BenchmarkCallback(agg), null);
+        }
+        // Wait till the benchmark test has completed
+        agg.tpAgg.queue.take();
+        if (count > 1)
+            System.out.println(agg.summarize(startTime));
+    }
+
+}

+ 176 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkUtils.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.benchmark;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.log4j.Logger;
+
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.util.Callback;
+
+public class BenchmarkUtils {
+    static final Logger logger = Logger.getLogger(BenchmarkUtils.class);
+
+    public static double calcTp(final int count, long startTime) {
+        return 1000. * count / (System.currentTimeMillis() - startTime);
+    }
+
+    /**
+     * Stats aggregator for callback (round-trip) operations. Measures both
+     * throughput and latency.
+     */
+    public static class ThroughputLatencyAggregator {
+        int numBuckets;
+        final ThroughputAggregator tpAgg;
+        final Semaphore outstanding;
+        final AtomicLong sum = new AtomicLong();
+
+        final AtomicLong[] latencyBuckets;
+
+        // bucket[i] is count of number of operations that took >= i ms and <
+        // (i+1) ms.
+
+        public ThroughputLatencyAggregator(String label, int count, int limit) throws InterruptedException {
+            numBuckets = Integer.getInteger("numBuckets", 101);
+            latencyBuckets = new AtomicLong[numBuckets];
+            tpAgg = new ThroughputAggregator(label, count);
+            outstanding = new Semaphore(limit);
+            for (int i = 0; i < numBuckets; i++) {
+                latencyBuckets[i] = new AtomicLong();
+            }
+        }
+
+        public void reportLatency(long latency) {
+            sum.addAndGet(latency);
+
+            int bucketIndex;
+            if (latency >= numBuckets) {
+                bucketIndex = (int) numBuckets - 1;
+            } else {
+                bucketIndex = (int) latency;
+            }
+            latencyBuckets[bucketIndex].incrementAndGet();
+        }
+
+        private String getPercentile(double percentile) {
+            int numInliersNeeded = (int) (percentile / 100 * tpAgg.count);
+            int numInliersFound = 0;
+            for (int i = 0; i < numBuckets - 1; i++) {
+                numInliersFound += latencyBuckets[i].intValue();
+                if (numInliersFound > numInliersNeeded) {
+                    return i + "";
+                }
+            }
+            return " >= " + (numBuckets - 1);
+        }
+
+        public String summarize(long startTime) {
+            double percentile = Double.parseDouble(System.getProperty("percentile", "99.9"));
+            return tpAgg.summarize(startTime) + ", avg latency = " + sum.get() / tpAgg.count + ", " + percentile
+                    + "%ile latency = " + getPercentile(percentile);
+        }
+    }
+
+    /**
+     * Stats aggregator for non-callback (single-shot) operations. Measures just
+     * throughput.
+     */
+    public static class ThroughputAggregator {
+        final String label;
+        final int count;
+        final AtomicInteger done = new AtomicInteger();
+        final AtomicLong earliest = new AtomicLong();
+        final AtomicInteger numFailed = new AtomicInteger();
+        final LinkedBlockingQueue<Integer> queue = new LinkedBlockingQueue<Integer>();
+
+        public ThroughputAggregator(final String label, final int count) {
+            this.label = label;
+            this.count = count;
+            if (count == 0)
+                queue.add(0);
+            if (Boolean.getBoolean("progress")) {
+                new Thread(new Runnable() {
+                    @Override
+                    public void run() {
+                        try {
+                            for (int doneSnap = 0, prev = 0; doneSnap < count; prev = doneSnap, doneSnap = done.get()) {
+                                if (doneSnap > prev) {
+                                    System.out.println(label + " progress: " + doneSnap + " of " + count);
+                                }
+                                Thread.sleep(1000);
+                            }
+                        } catch (Exception ex) {
+                            throw new RuntimeException(ex);
+                        }
+                    }
+                }).start();
+            }
+        }
+
+        public void ding(boolean failed) {
+            int snapDone = done.incrementAndGet();
+            earliest.compareAndSet(0, System.currentTimeMillis());
+            if (failed)
+                numFailed.incrementAndGet();
+            if (logger.isDebugEnabled())
+                logger.debug(label + " " + (failed ? "failed" : "succeeded") + ", done so far = " + snapDone);
+            if (snapDone == count) {
+                queue.add(numFailed.get());
+            }
+        }
+
+        public String summarize(long startTime) {
+            return "Finished " + label + ": count = " + done.get() + ", tput = " + calcTp(count, startTime)
+                    + " ops/s, numFailed = " + numFailed;
+        }
+    }
+
+    public static class BenchmarkCallback implements Callback<Void> {
+
+        final ThroughputLatencyAggregator agg;
+        final long startTime;
+
+        public BenchmarkCallback(ThroughputLatencyAggregator agg) throws InterruptedException {
+            this.agg = agg;
+            agg.outstanding.acquire();
+            // Must set the start time *after* taking acquiring on outstanding.
+            startTime = System.currentTimeMillis();
+        }
+
+        private void finish(boolean failed) {
+            agg.reportLatency(System.currentTimeMillis() - startTime);
+            agg.tpAgg.ding(failed);
+            agg.outstanding.release();
+        }
+
+        @Override
+        public void operationFinished(Object ctx, Void resultOfOperation) {
+            finish(false);
+        }
+
+        @Override
+        public void operationFailed(Object ctx, PubSubException exception) {
+            finish(true);
+        }
+    };
+
+}

+ 46 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/BenchmarkWorker.java

@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.benchmark;
+
+public class BenchmarkWorker {
+    int numTopics;
+    int numMessages;
+    int numRegions;
+    int startTopicLabel;
+    int partitionIndex;
+    int numPartitions;
+
+    public BenchmarkWorker(int numTopics, int numMessages, int numRegions,
+            int startTopicLabel, int partitionIndex, int numPartitions) {
+        this.numTopics = numTopics;
+        this.numMessages = numMessages;
+        this.numRegions = numRegions;
+        this.startTopicLabel = startTopicLabel;
+        this.partitionIndex = partitionIndex;
+        this.numPartitions = numPartitions;
+
+        if (numMessages % (numTopics * numRegions) != 0) {
+            throw new RuntimeException("Number of messages not equally divisible among regions and topics");
+        }
+
+        if (numTopics % numPartitions != 0) {
+            throw new RuntimeException("Number of topics not equally divisible among partitions");
+        }
+
+    }
+}

+ 127 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/benchmark/HedwigBenchmark.java

@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.benchmark;
+
+import java.io.File;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.log4j.Logger;
+import org.jboss.netty.logging.InternalLoggerFactory;
+import org.jboss.netty.logging.Log4JLoggerFactory;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.client.netty.HedwigPublisher;
+import org.apache.hedwig.client.netty.HedwigSubscriber;
+
+public class HedwigBenchmark implements Callable<Void> {
+    protected static final Logger logger = Logger.getLogger(HedwigBenchmark.class);
+
+    static final String TOPIC_PREFIX = "topic-";
+
+    private final HedwigClient client;
+    private final HedwigPublisher publisher;
+    private final HedwigSubscriber subscriber;
+
+    public HedwigBenchmark(ClientConfiguration cfg) {
+        client = new HedwigClient(cfg);
+        publisher = client.getPublisher();
+        subscriber = client.getSubscriber();
+    }
+
+    static boolean amIResponsibleForTopic(int topicNum, int partitionIndex, int numPartitions) {
+        return topicNum % numPartitions == partitionIndex;
+    }
+
+    @Override
+    public Void call() throws Exception {
+
+        //
+        // Parameters.
+        //
+
+        // What program to run: pub, sub (subscription benchmark), recv.
+        final String mode = System.getProperty("mode","");
+
+        // Number of requests to make (publishes or subscribes).
+        int numTopics = Integer.getInteger("nTopics", 50);
+        int numMessages = Integer.getInteger("nMsgs", 1000);
+        int numRegions = Integer.getInteger("nRegions", 1);
+        int startTopicLabel = Integer.getInteger("startTopicLabel", 0);
+        int partitionIndex = Integer.getInteger("partitionIndex", 0);
+        int numPartitions = Integer.getInteger("nPartitions", 1);
+
+        int replicaIndex = Integer.getInteger("replicaIndex", 0);
+
+        int rate = Integer.getInteger("rate", 0);
+        int nParallel = Integer.getInteger("npar", 100);
+        int msgSize = Integer.getInteger("msgSize", 1024);
+
+        // Number of warmup subscriptions to make.
+        final int nWarmups = Integer.getInteger("nwarmups", 1000);
+
+        if (mode.equals("sub")) {
+            BenchmarkSubscriber benchmarkSub = new BenchmarkSubscriber(numTopics, 0, 1, startTopicLabel, 0, 1,
+                    subscriber, ByteString.copyFromUtf8("mySub"));
+
+            benchmarkSub.warmup(nWarmups);
+            benchmarkSub.call();
+
+        } else if (mode.equals("recv")) {
+
+            BenchmarkSubscriber benchmarkSub = new BenchmarkSubscriber(numTopics, numMessages, numRegions,
+                    startTopicLabel, partitionIndex, numPartitions, subscriber, ByteString.copyFromUtf8("sub-"
+                            + replicaIndex));
+
+            benchmarkSub.call();
+
+        } else if (mode.equals("pub")) {
+            // Offered load in msgs/second.
+            BenchmarkPublisher benchmarkPub = new BenchmarkPublisher(numTopics, numMessages, numRegions,
+                    startTopicLabel, partitionIndex, numPartitions, publisher, subscriber, msgSize, nParallel, rate);
+            benchmarkPub.warmup(nWarmups);
+            benchmarkPub.call();
+            
+        } else {
+            throw new Exception("unknown mode: " + mode);
+        }
+
+        return null;
+    }
+
+    public static void main(String[] args) throws Exception {
+        ClientConfiguration cfg = new ClientConfiguration();
+        if (args.length > 0) {
+            String confFile = args[0];
+            try {
+                cfg.loadConf(new File(confFile).toURI().toURL());
+            } catch (ConfigurationException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        InternalLoggerFactory.setDefaultFactory(new Log4JLoggerFactory());
+
+        HedwigBenchmark app = new HedwigBenchmark(cfg);
+        app.call();
+        System.exit(0);
+    }
+
+}

+ 148 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/conf/ClientConfiguration.java

@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.conf;
+
+import java.net.InetSocketAddress;
+
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.log4j.Logger;
+
+import org.apache.hedwig.conf.AbstractConfiguration;
+import org.apache.hedwig.util.HedwigSocketAddress;
+
+public class ClientConfiguration extends AbstractConfiguration {
+    Logger logger = Logger.getLogger(ClientConfiguration.class);
+
+    // Protected member variables for configuration parameter names
+    protected static final String DEFAULT_SERVER_HOST = "default_server_host";
+    protected static final String MAX_MESSAGE_SIZE = "max_message_size";
+    protected static final String MAX_SERVER_REDIRECTS = "max_server_redirects";
+    protected static final String AUTO_SEND_CONSUME_MESSAGE_ENABLED = "auto_send_consume_message_enabled";
+    protected static final String CONSUMED_MESSAGES_BUFFER_SIZE = "consumed_messages_buffer_size";
+    protected static final String MESSAGE_CONSUME_RETRY_WAIT_TIME = "message_consume_retry_wait_time";
+    protected static final String SUBSCRIBE_RECONNECT_RETRY_WAIT_TIME = "subscribe_reconnect_retry_wait_time";
+    protected static final String MAX_OUTSTANDING_MESSAGES = "max_outstanding_messages";
+    protected static final String SERVER_ACK_RESPONSE_TIMEOUT = "server_ack_response_timeout";
+    protected static final String TIMEOUT_THREAD_RUN_INTERVAL = "timeout_thread_run_interval";
+    protected static final String SSL_ENABLED = "ssl_enabled";
+
+    // Singletons we want to instantiate only once per ClientConfiguration
+    protected HedwigSocketAddress myDefaultServerAddress = null;
+
+    // Getters for the various Client Configuration parameters.
+    // This should point to the default server host, or the VIP fronting all of
+    // the server hubs. This will return the HedwigSocketAddress which
+    // encapsulates both the regular and SSL port connection to the server host.
+    protected HedwigSocketAddress getDefaultServerHedwigSocketAddress() {
+        if (myDefaultServerAddress == null)
+            myDefaultServerAddress = new HedwigSocketAddress(conf.getString(DEFAULT_SERVER_HOST, "localhost:4080:9876"));
+        return myDefaultServerAddress;
+    }
+
+    // This will get the default server InetSocketAddress based on if SSL is
+    // enabled or not.
+    public InetSocketAddress getDefaultServerHost() {
+        if (isSSLEnabled())
+            return getDefaultServerHedwigSocketAddress().getSSLSocketAddress();
+        else
+            return getDefaultServerHedwigSocketAddress().getSocketAddress();
+    }
+
+    public int getMaximumMessageSize() {
+        return conf.getInt(MAX_MESSAGE_SIZE, 2 * 1024 * 1024);
+    }
+
+    // This parameter is for setting the maximum number of server redirects to
+    // allow before we consider it as an error condition. This is to stop
+    // infinite redirect loops in case there is a problem with the hub servers
+    // topic mastership.
+    public int getMaximumServerRedirects() {
+        return conf.getInt(MAX_SERVER_REDIRECTS, 2);
+    }
+
+    // This parameter is a boolean flag indicating if the client library should
+    // automatically send the consume message to the server based on the
+    // configured amount of messages consumed by the client app. The client app
+    // could choose to override this behavior and instead, manually send the
+    // consume message to the server via the client library using its own 
+    // logic and policy.
+    public boolean isAutoSendConsumeMessageEnabled() {
+        return conf.getBoolean(AUTO_SEND_CONSUME_MESSAGE_ENABLED, true);
+    }
+
+    // This parameter is to set how many consumed messages we'll buffer up
+    // before we send the Consume message to the server indicating that all
+    // of the messages up to that point have been successfully consumed by
+    // the client.
+    public int getConsumedMessagesBufferSize() {
+        return conf.getInt(CONSUMED_MESSAGES_BUFFER_SIZE, 5);
+    }
+
+    // This parameter is used to determine how long we wait before retrying the
+    // client app's MessageHandler to consume a subscribed messages sent to us
+    // from the server. The time to wait is in milliseconds.
+    public long getMessageConsumeRetryWaitTime() {
+        return conf.getLong(MESSAGE_CONSUME_RETRY_WAIT_TIME, 10000);
+    }
+
+    // This parameter is used to determine how long we wait before retrying the
+    // Subscribe Reconnect request. This is done when the connection to a server
+    // disconnects and we attempt to connect to it. We'll keep on trying but
+    // in case the server(s) is down for a longer time, we want to throttle
+    // how often we do the subscribe reconnect request. The time to wait is in
+    // milliseconds.
+    public long getSubscribeReconnectRetryWaitTime() {
+        return conf.getLong(SUBSCRIBE_RECONNECT_RETRY_WAIT_TIME, 10000);
+    }
+
+    // This parameter is for setting the maximum number of outstanding messages
+    // the client app can be consuming at a time for topic subscription before
+    // we throttle things and stop reading from the Netty Channel.
+    public int getMaximumOutstandingMessages() {
+        return conf.getInt(MAX_OUTSTANDING_MESSAGES, 10);
+    }
+
+    // This parameter is used to determine how long we wait (in milliseconds)
+    // before we time out outstanding PubSubRequests that were written to the
+    // server successfully but haven't yet received the ack response.
+    public long getServerAckResponseTimeout() {
+        return conf.getLong(SERVER_ACK_RESPONSE_TIMEOUT, 30000);
+    }
+
+    // This parameter is used to determine how often we run the server ack
+    // response timeout cleaner thread (in milliseconds).
+    public long getTimeoutThreadRunInterval() {
+        return conf.getLong(TIMEOUT_THREAD_RUN_INTERVAL, 60000);
+    }
+
+    // This parameter is a boolean flag indicating if communication with the
+    // server should be done via SSL for encryption. This is needed for
+    // cross-colo hub clients listening to non-local servers.
+    public boolean isSSLEnabled() {
+        return conf.getBoolean(SSL_ENABLED, false);
+    }
+
+    // Validate that the configuration properties are valid.
+    public void validate() throws ConfigurationException {
+        if (isSSLEnabled() && getDefaultServerHedwigSocketAddress().getSSLSocketAddress() == null) {
+            throw new ConfigurationException("SSL is enabled but a default server SSL port not given!");
+        }
+        // Add other validation checks here
+    }
+
+}

+ 58 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/MessageConsumeData.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.data;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+
+/**
+ * Wrapper class to store all of the data points needed to encapsulate Message
+ * Consumption in the Subscribe flow for consuming a message sent from the
+ * server for a given TopicSubscriber. This will be used as the Context in the
+ * VoidCallback for the MessageHandlers once they've completed consuming the
+ * message.
+ * 
+ */
+public class MessageConsumeData {
+
+    // Member variables
+    public final ByteString topic;
+    public final ByteString subscriberId;
+    // This is the Message sent from the server for Subscribes for consumption
+    // by the client.
+    public final Message msg;
+
+    // Constructor
+    public MessageConsumeData(final ByteString topic, final ByteString subscriberId, final Message msg) {
+        this.topic = topic;
+        this.subscriberId = subscriberId;
+        this.msg = msg;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        if (topic != null)
+            sb.append("Topic: " + topic.toStringUtf8());
+        if (subscriberId != null)
+            sb.append(PubSubData.COMMA).append("SubscriberId: " + subscriberId.toStringUtf8());
+        if (msg != null)
+            sb.append(PubSubData.COMMA).append("Message: " + msg);
+        return sb.toString();
+    }
+}

+ 149 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/PubSubData.java

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.data;
+
+import java.util.List;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.protocol.PubSubProtocol.OperationType;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest.CreateOrAttach;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * Wrapper class to store all of the data points needed to encapsulate all
+ * PubSub type of request operations the client will do. This includes knowing
+ * all of the information needed if we need to redo the publish/subscribe
+ * request in case of a server redirect. This will be used for all sync/async
+ * calls, and for all the known types of request messages to send to the server
+ * hubs: Publish, Subscribe, Unsubscribe, and Consume.
+ * 
+ */
+public class PubSubData {
+    // Static string constants
+    protected static final String COMMA = ", ";
+
+    // Member variables needed during object construction time.
+    public final ByteString topic;
+    public final Message msg;
+    public final ByteString subscriberId;
+    // Enum to indicate what type of operation this PubSub request data object
+    // is for.
+    public final OperationType operationType;
+    // Enum for subscribe requests to indicate if this is a CREATE, ATTACH, or
+    // CREATE_OR_ATTACH subscription request. For non-subscribe requests,
+    // this will be null.
+    public final CreateOrAttach createOrAttach;
+    // These two variables are not final since we might override them
+    // in the case of a Subscribe reconnect.
+    public Callback<Void> callback;
+    public Object context;
+
+    // Member variables used after object has been constructed.
+    // List of all servers we've sent the PubSubRequest to successfully.
+    // This is to keep track of redirected servers that responded back to us.
+    public List<ByteString> triedServers;
+    // List of all servers that we've tried to connect or write to but
+    // was unsuccessful. We'll retry sending the PubSubRequest but will
+    // quit if we're trying to connect or write to a server that we've
+    // attempted to previously.
+    public List<ByteString> connectFailedServers;
+    public List<ByteString> writeFailedServers;
+    // Boolean to the hub server indicating if it should claim ownership
+    // of the topic the PubSubRequest is for. This is mainly used after
+    // a server redirect. Defaults to false.
+    public boolean shouldClaim = false;
+    // TxnID for the PubSubData if it was sent as a PubSubRequest to the hub
+    // server. This is used in the WriteCallback in case of failure. We want
+    // to remove it from the ResponseHandler.txn2PubSubData map since the
+    // failed PubSubRequest will not get an ack response from the server.
+    // This is set later in the PubSub flows only when we write the actual
+    // request. Therefore it is not an argument in the constructor.
+    public long txnId;
+    // Time in milliseconds using the System.currentTimeMillis() call when the
+    // PubSubRequest was written on the netty Channel to the server.
+    public long requestWriteTime;
+    // For synchronous calls, this variable is used to know when the background
+    // async process for it has completed, set in the VoidCallback.
+    public boolean isDone = false;
+
+    // Constructor for all types of PubSub request data to send to the server
+    public PubSubData(final ByteString topic, final Message msg, final ByteString subscriberId,
+            final OperationType operationType, final CreateOrAttach createOrAttach, final Callback<Void> callback,
+            final Object context) {
+        this.topic = topic;
+        this.msg = msg;
+        this.subscriberId = subscriberId;
+        this.operationType = operationType;
+        this.createOrAttach = createOrAttach;
+        this.callback = callback;
+        this.context = context;
+    }
+
+    // Clear all of the stored servers we've contacted or attempted to in this
+    // request.
+    public void clearServersList() {
+        if (triedServers != null)
+            triedServers.clear();
+        if (connectFailedServers != null)
+            connectFailedServers.clear();
+        if (writeFailedServers != null)
+            writeFailedServers.clear();
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        if (topic != null)
+            sb.append("Topic: " + topic.toStringUtf8());
+        if (msg != null)
+            sb.append(COMMA).append("Message: " + msg);
+        if (subscriberId != null)
+            sb.append(COMMA).append("SubscriberId: " + subscriberId.toStringUtf8());
+        if (operationType != null)
+            sb.append(COMMA).append("Operation Type: " + operationType.toString());
+        if (createOrAttach != null)
+            sb.append(COMMA).append("Create Or Attach: " + createOrAttach.toString());
+        if (triedServers != null && triedServers.size() > 0) {
+            sb.append(COMMA).append("Tried Servers: ");
+            for (ByteString triedServer : triedServers) {
+                sb.append(triedServer.toStringUtf8()).append(COMMA);
+            }
+        }
+        if (connectFailedServers != null && connectFailedServers.size() > 0) {
+            sb.append(COMMA).append("Connect Failed Servers: ");
+            for (ByteString connectFailedServer : connectFailedServers) {
+                sb.append(connectFailedServer.toStringUtf8()).append(COMMA);
+            }
+        }
+        if (writeFailedServers != null && writeFailedServers.size() > 0) {
+            sb.append(COMMA).append("Write Failed Servers: ");
+            for (ByteString writeFailedServer : writeFailedServers) {
+                sb.append(writeFailedServer.toStringUtf8()).append(COMMA);
+            }
+        }
+        sb.append(COMMA).append("Should Claim: " + shouldClaim);
+        if (txnId != 0)
+            sb.append(COMMA).append("TxnID: " + txnId);
+        if (requestWriteTime != 0)
+            sb.append(COMMA).append("Request Write Time: " + requestWriteTime);
+        sb.append(COMMA).append("Is Done: " + isDone);
+        return sb.toString();
+    }
+
+}

+ 74 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/data/TopicSubscriber.java

@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.data;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Wrapper class object for the Topic + SubscriberId combination. Since the
+ * Subscribe flows always use the Topic + SubscriberId as the logical entity,
+ * we'll create a simple class to encapsulate that.
+ * 
+ */
+public class TopicSubscriber {
+    private final ByteString topic;
+    private final ByteString subscriberId;
+    private final int hashCode;
+
+    public TopicSubscriber(final ByteString topic, final ByteString subscriberId) {
+        this.topic = topic;
+        this.subscriberId = subscriberId;
+        hashCode = new HashCodeBuilder().append(topic).append(subscriberId).toHashCode();
+    }
+
+    @Override
+    public boolean equals(final Object o) {
+        if (o == this)
+            return true;
+        if (!(o instanceof TopicSubscriber))
+            return false;
+        final TopicSubscriber obj = (TopicSubscriber) o;
+        return topic.equals(obj.topic) && subscriberId.equals(obj.subscriberId);
+    }
+
+    @Override
+    public int hashCode() {
+        return hashCode;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        if (topic != null)
+            sb.append("Topic: " + topic.toStringUtf8());
+        if (subscriberId != null)
+            sb.append(PubSubData.COMMA).append("SubscriberId: " + subscriberId.toStringUtf8());
+        return sb.toString();
+    }
+    
+    public ByteString getTopic() {
+        return topic;
+    }
+    
+    public ByteString getSubscriberId() {
+        return subscriberId;
+    }
+
+}

+ 37 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/InvalidSubscriberIdException.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.exceptions;
+
+/**
+ * This is a Hedwig client side exception when the local client wants to do
+ * subscribe type of operations. Currently, to distinguish between local and hub
+ * subscribers, the subscriberId will have a specific format.
+ */
+public class InvalidSubscriberIdException extends Exception {
+
+    private static final long serialVersionUID = 873259807218723523L;
+
+    public InvalidSubscriberIdException(String message) {
+        super(message);
+    }
+
+    public InvalidSubscriberIdException(String message, Throwable t) {
+        super(message, t);
+    }
+
+}

+ 38 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/ServerRedirectLoopException.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.exceptions;
+
+/**
+ * This is a Hedwig client side exception when the PubSubRequest is being
+ * redirected to a server where the request has already been sent to previously. 
+ * To avoid having a cyclical redirect loop, this condition is checked for
+ * and this exception will be thrown to the client caller. 
+ */
+public class ServerRedirectLoopException extends Exception {
+
+    private static final long serialVersionUID = 98723508723152897L;
+
+    public ServerRedirectLoopException(String message) {
+        super(message);
+    }
+
+    public ServerRedirectLoopException(String message, Throwable t) {
+        super(message, t);
+    }
+
+}

+ 39 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/exceptions/TooManyServerRedirectsException.java

@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.exceptions;
+
+/**
+ * This is a Hedwig client side exception when there have been too many server
+ * redirects during a publish/subscribe call. We only allow a certain number of
+ * server redirects to find the topic master. If we have exceeded this
+ * configured amount, the publish/subscribe will fail with this exception.
+ * 
+ */
+public class TooManyServerRedirectsException extends Exception {
+
+    private static final long serialVersionUID = 2341192937965635310L;
+
+    public TooManyServerRedirectsException(String message) {
+        super(message);
+    }
+
+    public TooManyServerRedirectsException(String message, Throwable t) {
+        super(message, t);
+    }
+
+}

+ 95 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/MessageConsumeCallback.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import java.util.TimerTask;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+
+import org.apache.hedwig.client.data.MessageConsumeData;
+import org.apache.hedwig.client.data.TopicSubscriber;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * This is the Callback used by the MessageHandlers on the client app when
+ * they've finished consuming a subscription message sent from the server
+ * asynchronously. This callback back to the client libs will be stateless so we
+ * can use a singleton for the class. The object context used should be the
+ * MessageConsumeData type. That will contain all of the information needed to
+ * call the message consume logic in the client lib ResponseHandler.
+ * 
+ */
+public class MessageConsumeCallback implements Callback<Void> {
+
+    private static Logger logger = Logger.getLogger(MessageConsumeCallback.class);
+
+    private final HedwigClient client;
+
+    public MessageConsumeCallback(HedwigClient client) {
+        this.client = client;
+    }
+
+    class MessageConsumeRetryTask extends TimerTask {
+        private final MessageConsumeData messageConsumeData;
+        private final TopicSubscriber topicSubscriber;
+
+        public MessageConsumeRetryTask(MessageConsumeData messageConsumeData, TopicSubscriber topicSubscriber) {
+            this.messageConsumeData = messageConsumeData;
+            this.topicSubscriber = topicSubscriber;
+        }
+
+        @Override
+        public void run() {
+            // Try to consume the message again
+            Channel topicSubscriberChannel = client.getSubscriber().getChannelForTopic(topicSubscriber);
+            HedwigClient.getResponseHandlerFromChannel(topicSubscriberChannel).getSubscribeResponseHandler()
+                    .asyncMessageConsume(messageConsumeData.msg);
+        }
+    }
+
+    public void operationFinished(Object ctx, Void resultOfOperation) {
+        MessageConsumeData messageConsumeData = (MessageConsumeData) ctx;
+        TopicSubscriber topicSubscriber = new TopicSubscriber(messageConsumeData.topic, messageConsumeData.subscriberId);
+        // Message has been successfully consumed by the client app so callback
+        // to the ResponseHandler indicating that the message is consumed.
+        Channel topicSubscriberChannel = client.getSubscriber().getChannelForTopic(topicSubscriber);
+        HedwigClient.getResponseHandlerFromChannel(topicSubscriberChannel).getSubscribeResponseHandler()
+                .messageConsumed(messageConsumeData.msg);
+    }
+
+    public void operationFailed(Object ctx, PubSubException exception) {
+        // Message has NOT been successfully consumed by the client app so
+        // callback to the ResponseHandler to try the async MessageHandler
+        // Consume logic again.
+        MessageConsumeData messageConsumeData = (MessageConsumeData) ctx;
+        TopicSubscriber topicSubscriber = new TopicSubscriber(messageConsumeData.topic, messageConsumeData.subscriberId);
+        logger.error("Message was not consumed successfully by client MessageHandler: " + messageConsumeData);
+
+        // Sleep a pre-configured amount of time (in milliseconds) before we
+        // do the retry. In the future, we can have more dynamic logic on
+        // what duration to sleep based on how many times we've retried, or
+        // perhaps what the last amount of time we slept was. We could stick
+        // some of this meta-data into the MessageConsumeData when we retry.
+        client.getClientTimer().schedule(new MessageConsumeRetryTask(messageConsumeData, topicSubscriber),
+                client.getConfiguration().getMessageConsumeRetryWaitTime());
+    }
+
+}

+ 87 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/PubSubCallback.java

@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import org.apache.log4j.Logger;
+
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * This class is used when we are doing synchronous type of operations. All
+ * underlying client ops in Hedwig are async so this is just a way to make the
+ * async calls synchronous.
+ * 
+ */
+public class PubSubCallback implements Callback<Void> {
+
+    private static Logger logger = Logger.getLogger(PubSubCallback.class);
+
+    // Private member variables
+    private PubSubData pubSubData;
+    // Boolean indicator to see if the sync PubSub call was successful or not.
+    private boolean isCallSuccessful;
+    // For sync callbacks, we'd like to know what the PubSubException is thrown
+    // on failure. This is so we can have a handle to the exception and rethrow
+    // it later.
+    private PubSubException failureException;
+
+    // Constructor
+    public PubSubCallback(PubSubData pubSubData) {
+        this.pubSubData = pubSubData;
+    }
+
+    public void operationFinished(Object ctx, Void resultOfOperation) {
+        if (logger.isDebugEnabled())
+            logger.debug("PubSub call succeeded for pubSubData: " + pubSubData);
+        // Wake up the main sync PubSub thread that is waiting for us to
+        // complete.
+        synchronized (pubSubData) {
+            isCallSuccessful = true;
+            pubSubData.isDone = true;
+            pubSubData.notify();
+        }
+    }
+
+    public void operationFailed(Object ctx, PubSubException exception) {
+        if (logger.isDebugEnabled())
+            logger.debug("PubSub call failed with exception: " + exception + ", pubSubData: " + pubSubData);
+        // Wake up the main sync PubSub thread that is waiting for us to
+        // complete.
+        synchronized (pubSubData) {
+            isCallSuccessful = false;
+            failureException = exception;
+            pubSubData.isDone = true;
+            pubSubData.notify();
+        }
+    }
+
+    // Public getter to determine if the PubSub callback is successful or not
+    // based on the PubSub ack response from the server.
+    public boolean getIsCallSuccessful() {
+        return isCallSuccessful;
+    }
+
+    // Public getter to retrieve what the PubSubException was that occurred when
+    // the operation failed.
+    public PubSubException getFailureException() {
+        return failureException;
+    }
+
+}

+ 70 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/PublishResponseHandler.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.client.netty.ResponseHandler;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubResponse;
+
+public class PublishResponseHandler {
+
+    private static Logger logger = Logger.getLogger(PublishResponseHandler.class);
+
+    private final ResponseHandler responseHandler;
+
+    public PublishResponseHandler(ResponseHandler responseHandler) {
+        this.responseHandler = responseHandler;
+    }
+
+    // Main method to handle Publish Response messages from the server.
+    public void handlePublishResponse(PubSubResponse response, PubSubData pubSubData, Channel channel) throws Exception {
+        if (logger.isDebugEnabled())
+            logger.debug("Handling a Publish response: " + response + ", pubSubData: " + pubSubData + ", host: "
+                    + HedwigClient.getHostFromChannel(channel));
+        switch (response.getStatusCode()) {
+        case SUCCESS:
+            // Response was success so invoke the callback's operationFinished
+            // method.
+            pubSubData.callback.operationFinished(pubSubData.context, null);
+            break;
+        case SERVICE_DOWN:
+            // Response was service down failure so just invoke the callback's
+            // operationFailed method.
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a SERVICE_DOWN status"));
+            break;
+        case NOT_RESPONSIBLE_FOR_TOPIC:
+            // Redirect response so we'll need to repost the original Publish
+            // Request
+            responseHandler.handleRedirectResponse(response, pubSubData, channel);
+            break;
+        default:
+            // Consider all other status codes as errors, operation failed
+            // cases.
+            logger.error("Unexpected error response from server for PubSubResponse: " + response);
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a status code of: " + response.getStatusCode()));
+            break;
+        }
+    }
+}

+ 113 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/SubscribeReconnectCallback.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import java.util.TimerTask;
+
+import org.apache.log4j.Logger;
+
+import org.apache.hedwig.client.api.MessageHandler;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.client.netty.HedwigSubscriber;
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.exceptions.PubSubException.ClientNotSubscribedException;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * This class is used when a Subscribe channel gets disconnected and we attempt
+ * to re-establish the connection. Once the connection to the server host for
+ * the topic is completed, we need to restart delivery for that topic if that
+ * was the case before the original channel got disconnected. This async
+ * callback will be the hook for this.
+ * 
+ */
+public class SubscribeReconnectCallback implements Callback<Void> {
+
+    private static Logger logger = Logger.getLogger(SubscribeReconnectCallback.class);
+
+    // Private member variables
+    private final PubSubData origSubData;
+    private final HedwigClient client;
+    private final HedwigSubscriber sub;
+    private final ClientConfiguration cfg;
+    private final MessageHandler messageHandler;
+
+    // Constructor
+    public SubscribeReconnectCallback(PubSubData origSubData, HedwigClient client, MessageHandler messageHandler) {
+        this.origSubData = origSubData;
+        this.client = client;
+        this.sub = client.getSubscriber();
+        this.cfg = client.getConfiguration();
+        this.messageHandler = messageHandler;
+    }
+
+    class SubscribeReconnectRetryTask extends TimerTask {
+        @Override
+        public void run() {
+            if (logger.isDebugEnabled())
+                logger.debug("Retrying subscribe reconnect request for origSubData: " + origSubData);
+            // Clear out all of the servers we've contacted or attempted to from
+            // this request.
+            origSubData.clearServersList();
+            client.doConnect(origSubData, cfg.getDefaultServerHost());
+        }
+    }
+
+    public void operationFinished(Object ctx, Void resultOfOperation) {
+        if (logger.isDebugEnabled())
+            logger.debug("Subscribe reconnect succeeded for origSubData: " + origSubData);
+        // Now we want to restart delivery for the subscription channel only
+        // if delivery was started at the time the original subscribe channel
+        // was disconnected.
+        if (messageHandler != null) {
+            try {
+                sub.startDelivery(origSubData.topic, origSubData.subscriberId, messageHandler);
+            } catch (ClientNotSubscribedException e) {
+                // This exception should never be thrown here but just in case,
+                // log an error and just keep retrying the subscribe request.
+                logger.error("Subscribe was successful but error starting delivery for topic: "
+                        + origSubData.topic.toStringUtf8() + ", subscriberId: "
+                        + origSubData.subscriberId.toStringUtf8(), e);
+                retrySubscribeRequest();
+            }
+        }
+    }
+
+    public void operationFailed(Object ctx, PubSubException exception) {
+        // If the subscribe reconnect fails, just keep retrying the subscribe
+        // request. There isn't a way to flag to the application layer that
+        // a topic subscription has failed. So instead, we'll just keep
+        // retrying in the background until success.
+        logger.error("Subscribe reconnect failed with error: " + exception.getMessage());
+        retrySubscribeRequest();
+    }
+
+    private void retrySubscribeRequest() {
+        // If the client has stopped, there is no need to proceed with any
+        // callback logic here.
+        if (client.hasStopped())
+            return;
+
+        // Retry the subscribe request but only after waiting for a
+        // preconfigured amount of time.
+        client.getClientTimer().schedule(new SubscribeReconnectRetryTask(),
+                client.getConfiguration().getSubscribeReconnectRetryWaitTime());
+    }
+}

+ 329 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/SubscribeResponseHandler.java

@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+
+import org.apache.hedwig.client.api.MessageHandler;
+import org.apache.hedwig.client.data.MessageConsumeData;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.data.TopicSubscriber;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.client.netty.ResponseHandler;
+import org.apache.hedwig.exceptions.PubSubException.ClientAlreadySubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.protocol.PubSubProtocol.MessageSeqId;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubResponse;
+import org.apache.hedwig.protocol.PubSubProtocol.StatusCode;
+
+public class SubscribeResponseHandler {
+
+    private static Logger logger = Logger.getLogger(SubscribeResponseHandler.class);
+
+    private final ResponseHandler responseHandler;
+
+    // Member variables used when this ResponseHandler is for a Subscribe
+    // channel. We need to be able to consume messages sent back to us from
+    // the server, and to also recreate the Channel connection if it ever goes
+    // down. For that, we need to store the original PubSubData for the
+    // subscribe request, and also the MessageHandler that was registered when
+    // delivery of messages started for the subscription.
+    private PubSubData origSubData;
+    private Channel subscribeChannel;
+    private MessageHandler messageHandler;
+    // Counter for the number of consumed messages so far to buffer up before we
+    // send the Consume message back to the server along with the last/largest
+    // message seq ID seen so far in that batch.
+    private int numConsumedMessagesInBuffer = 0;
+    private MessageSeqId lastMessageSeqId;
+    // Queue used for subscribes when the MessageHandler hasn't been registered
+    // yet but we've already received subscription messages from the server.
+    // This will be lazily created as needed.
+    private Queue<Message> subscribeMsgQueue;
+    // Set to store all of the outstanding subscribed messages that are pending
+    // to be consumed by the client app's MessageHandler. If this ever grows too
+    // big (e.g. problem at the client end for message consumption), we can
+    // throttle things by temporarily setting the Subscribe Netty Channel
+    // to not be readable. When the Set has shrunk sufficiently, we can turn the
+    // channel back on to read new messages.
+    private Set<Message> outstandingMsgSet;
+
+    public SubscribeResponseHandler(ResponseHandler responseHandler) {
+        this.responseHandler = responseHandler;
+    }
+
+    // Public getter to retrieve the original PubSubData used for the Subscribe
+    // request.
+    public PubSubData getOrigSubData() {
+        return origSubData;
+    }
+
+    // Main method to handle Subscribe responses from the server that we sent
+    // a Subscribe Request to.
+    public void handleSubscribeResponse(PubSubResponse response, PubSubData pubSubData, Channel channel)
+            throws Exception {
+        // If this was not a successful response to the Subscribe request, we
+        // won't be using the Netty Channel created so just close it.
+        if (!response.getStatusCode().equals(StatusCode.SUCCESS)) {
+            HedwigClient.getResponseHandlerFromChannel(channel).channelClosedExplicitly = true;
+            channel.close();
+        }
+
+        if (logger.isDebugEnabled())
+            logger.debug("Handling a Subscribe response: " + response + ", pubSubData: " + pubSubData + ", host: "
+                    + HedwigClient.getHostFromChannel(channel));
+        switch (response.getStatusCode()) {
+        case SUCCESS:
+            // For successful Subscribe requests, store this Channel locally
+            // and set it to not be readable initially.
+            // This way we won't be delivering messages for this topic
+            // subscription until the client explicitly says so.
+            subscribeChannel = channel;
+            subscribeChannel.setReadable(false);
+            // Store the original PubSubData used to create this successful
+            // Subscribe request.
+            origSubData = pubSubData;
+            // Store the mapping for the TopicSubscriber to the Channel.
+            // This is so we can control the starting and stopping of
+            // message deliveries from the server on that Channel. Store
+            // this only on a successful ack response from the server.
+            TopicSubscriber topicSubscriber = new TopicSubscriber(pubSubData.topic, pubSubData.subscriberId);
+            responseHandler.getSubscriber().setChannelForTopic(topicSubscriber, channel);
+            // Lazily create the Set to keep track of outstanding Messages
+            // to be consumed by the client app. At this stage, delivery for
+            // that topic hasn't started yet so creation of this Set should
+            // be thread safe. We'll create the Set with an initial capacity
+            // equal to the configured parameter for the maximum number of
+            // outstanding messages to allow. The load factor will be set to
+            // 1.0f which means we'll only rehash and allocate more space if
+            // we ever exceed the initial capacity. That should be okay
+            // because when that happens, things are slow already and piling
+            // up on the client app side to consume messages.
+            outstandingMsgSet = new HashSet<Message>(
+                    responseHandler.getConfiguration().getMaximumOutstandingMessages(), 1.0f);
+            // Response was success so invoke the callback's operationFinished
+            // method.
+            pubSubData.callback.operationFinished(pubSubData.context, null);
+            break;
+        case CLIENT_ALREADY_SUBSCRIBED:
+            // For Subscribe requests, the server says that the client is
+            // already subscribed to it.
+            pubSubData.callback.operationFailed(pubSubData.context, new ClientAlreadySubscribedException(
+                    "Client is already subscribed for topic: " + pubSubData.topic.toStringUtf8() + ", subscriberId: "
+                            + pubSubData.subscriberId.toStringUtf8()));
+            break;
+        case SERVICE_DOWN:
+            // Response was service down failure so just invoke the callback's
+            // operationFailed method.
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a SERVICE_DOWN status"));
+            break;
+        case NOT_RESPONSIBLE_FOR_TOPIC:
+            // Redirect response so we'll need to repost the original Subscribe
+            // Request
+            responseHandler.handleRedirectResponse(response, pubSubData, channel);
+            break;
+        default:
+            // Consider all other status codes as errors, operation failed
+            // cases.
+            logger.error("Unexpected error response from server for PubSubResponse: " + response);
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a status code of: " + response.getStatusCode()));
+            break;
+        }
+    }
+
+    // Main method to handle consuming a message for a topic that the client is
+    // subscribed to.
+    public void handleSubscribeMessage(PubSubResponse response) {
+        if (logger.isDebugEnabled())
+            logger.debug("Handling a Subscribe message in response: " + response + ", topic: "
+                    + origSubData.topic.toStringUtf8() + ", subscriberId: " + origSubData.subscriberId.toStringUtf8());
+        Message message = response.getMessage();
+        // Consume the message asynchronously that the client is subscribed
+        // to. Do this only if delivery for the subscription has started and
+        // a MessageHandler has been registered for the TopicSubscriber.
+        if (messageHandler != null) {
+            asyncMessageConsume(message);
+        } else {
+            // MessageHandler has not yet been registered so queue up these
+            // messages for the Topic Subscription. Make the initial lazy
+            // creation of the message queue thread safe just so we don't
+            // run into a race condition where two simultaneous threads process
+            // a received message and both try to create a new instance of
+            // the message queue. Performance overhead should be okay
+            // because the delivery of the topic has not even started yet
+            // so these messages are not consumed and just buffered up here.
+            synchronized (this) {
+                if (subscribeMsgQueue == null)
+                    subscribeMsgQueue = new LinkedList<Message>();
+            }
+            if (logger.isDebugEnabled())
+                logger
+                        .debug("Message has arrived but Subscribe channel does not have a registered MessageHandler yet so queueing up the message: "
+                                + message);
+            subscribeMsgQueue.add(message);
+        }
+    }
+
+    /**
+     * Method called when a message arrives for a subscribe Channel and we want
+     * to consume it asynchronously via the registered MessageHandler (should
+     * not be null when called here).
+     * 
+     * @param message
+     *            Message from Subscribe Channel we want to consume.
+     */
+    protected void asyncMessageConsume(Message message) {
+        if (logger.isDebugEnabled())
+            logger.debug("Call the client app's MessageHandler asynchronously to consume the message: " + message
+                    + ", topic: " + origSubData.topic.toStringUtf8() + ", subscriberId: "
+                    + origSubData.subscriberId.toStringUtf8());
+        // Add this "pending to be consumed" message to the outstandingMsgSet.
+        outstandingMsgSet.add(message);
+        // Check if we've exceeded the max size for the outstanding message set.
+        if (outstandingMsgSet.size() >= responseHandler.getConfiguration().getMaximumOutstandingMessages()
+                && subscribeChannel.isReadable()) {
+            // Too many outstanding messages so throttle it by setting the Netty
+            // Channel to not be readable.
+            if (logger.isDebugEnabled())
+                logger.debug("Too many outstanding messages (" + outstandingMsgSet.size()
+                        + ") so throttling the subscribe netty Channel");
+            subscribeChannel.setReadable(false);
+        }
+        MessageConsumeData messageConsumeData = new MessageConsumeData(origSubData.topic, origSubData.subscriberId,
+                message);
+        messageHandler.consume(origSubData.topic, origSubData.subscriberId, message, responseHandler.getClient()
+                .getConsumeCallback(), messageConsumeData);
+    }
+
+    /**
+     * Method called when the client app's MessageHandler has asynchronously
+     * completed consuming a subscribed message sent from the server. The
+     * contract with the client app is that messages sent to the handler to be
+     * consumed will have the callback response done in the same order. So if we
+     * asynchronously call the MessageHandler to consume messages #1-5, that
+     * should call the messageConsumed method here via the VoidCallback in the
+     * same order. To make this thread safe, since multiple outstanding messages
+     * could be consumed by the client app and then called back to here, make
+     * this method synchronized.
+     * 
+     * @param message
+     *            Message sent from server for topic subscription that has been
+     *            consumed by the client.
+     */
+    protected synchronized void messageConsumed(Message message) {
+        if (logger.isDebugEnabled())
+            logger.debug("Message has been successfully consumed by the client app for message: " + message
+                    + ", topic: " + origSubData.topic.toStringUtf8() + ", subscriberId: "
+                    + origSubData.subscriberId.toStringUtf8());
+        // Update the consumed messages buffer variables
+        if (responseHandler.getConfiguration().isAutoSendConsumeMessageEnabled()) {
+            // Update these variables only if we are auto-sending consume
+            // messages to the server. Otherwise the onus is on the client app
+            // to call the Subscriber consume API to let the server know which
+            // messages it has successfully consumed.
+            numConsumedMessagesInBuffer++;
+            lastMessageSeqId = message.getMsgId();
+        }
+        // Remove this consumed message from the outstanding Message Set.
+        outstandingMsgSet.remove(message);
+
+        // For consume response to server, there is a config param on how many
+        // messages to consume and buffer up before sending the consume request.
+        // We just need to keep a count of the number of messages consumed
+        // and the largest/latest msg ID seen so far in this batch. Messages
+        // should be delivered in order and without gaps. Do this only if
+        // auto-sending of consume messages is enabled.
+        if (responseHandler.getConfiguration().isAutoSendConsumeMessageEnabled()
+                && numConsumedMessagesInBuffer >= responseHandler.getConfiguration().getConsumedMessagesBufferSize()) {
+            // Send the consume request and reset the consumed messages buffer
+            // variables. We will use the same Channel created from the
+            // subscribe request for the TopicSubscriber.
+            if (logger.isDebugEnabled())
+                logger
+                        .debug("Consumed message buffer limit reached so send the Consume Request to the server with lastMessageSeqId: "
+                                + lastMessageSeqId);
+            responseHandler.getSubscriber().doConsume(origSubData, subscribeChannel, lastMessageSeqId);
+            numConsumedMessagesInBuffer = 0;
+            lastMessageSeqId = null;
+        }
+
+        // Check if we throttled message consumption previously when the
+        // outstanding message limit was reached. For now, only turn the
+        // delivery back on if there are no more outstanding messages to
+        // consume. We could make this a configurable parameter if needed.
+        if (!subscribeChannel.isReadable() && outstandingMsgSet.size() == 0) {
+            if (logger.isDebugEnabled())
+                logger
+                        .debug("Message consumption has caught up so okay to turn off throttling of messages on the subscribe channel for topic: "
+                                + origSubData.topic.toStringUtf8()
+                                + ", subscriberId: "
+                                + origSubData.subscriberId.toStringUtf8());
+            subscribeChannel.setReadable(true);
+        }
+    }
+
+    /**
+     * Setter used for Subscribe flows when delivery for the subscription is
+     * started. This is used to register the MessageHandler needed to consumer
+     * the subscribed messages for the topic.
+     * 
+     * @param messageHandler
+     *            MessageHandler to register for this ResponseHandler instance.
+     */
+    public void setMessageHandler(MessageHandler messageHandler) {
+        if (logger.isDebugEnabled())
+            logger.debug("Setting the messageHandler for topic: " + origSubData.topic.toStringUtf8()
+                    + ", subscriberId: " + origSubData.subscriberId.toStringUtf8());
+        this.messageHandler = messageHandler;
+        // Once the MessageHandler is registered, see if we have any queued up
+        // subscription messages sent to us already from the server. If so,
+        // consume those first. Do this only if the MessageHandler registered is
+        // not null (since that would be the HedwigSubscriber.stopDelivery
+        // call).
+        if (messageHandler != null && subscribeMsgQueue != null && subscribeMsgQueue.size() > 0) {
+            if (logger.isDebugEnabled())
+                logger.debug("Consuming " + subscribeMsgQueue.size() + " queued up messages for topic: "
+                        + origSubData.topic.toStringUtf8() + ", subscriberId: "
+                        + origSubData.subscriberId.toStringUtf8());
+            for (Message message : subscribeMsgQueue) {
+                asyncMessageConsume(message);
+            }
+            // Now we can remove the queued up messages since they are all
+            // consumed.
+            subscribeMsgQueue.clear();
+        }
+    }
+
+    /**
+     * Getter for the MessageHandler that is set for this subscribe channel.
+     * 
+     * @return The MessageHandler for consuming messages
+     */
+    public MessageHandler getMessageHandler() {
+        return messageHandler;
+    }
+}

+ 83 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/handlers/UnsubscribeResponseHandler.java

@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.handlers;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.netty.HedwigClient;
+import org.apache.hedwig.client.netty.ResponseHandler;
+import org.apache.hedwig.exceptions.PubSubException.ClientNotSubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubResponse;
+
+public class UnsubscribeResponseHandler {
+
+    private static Logger logger = Logger.getLogger(UnsubscribeResponseHandler.class);
+
+    private final ResponseHandler responseHandler;
+
+    public UnsubscribeResponseHandler(ResponseHandler responseHandler) {
+        this.responseHandler = responseHandler;
+    }
+
+    // Main method to handle Unsubscribe Response messages from the server.
+    public void handleUnsubscribeResponse(PubSubResponse response, PubSubData pubSubData, Channel channel)
+            throws Exception {
+        if (logger.isDebugEnabled())
+            logger.debug("Handling an Unsubscribe response: " + response + ", pubSubData: " + pubSubData + ", host: "
+                    + HedwigClient.getHostFromChannel(channel));
+        switch (response.getStatusCode()) {
+        case SUCCESS:
+            // For successful Unsubscribe requests, we can now safely close the
+            // Subscribe Channel and any cached data for that TopicSubscriber.
+            responseHandler.getSubscriber().closeSubscription(pubSubData.topic, pubSubData.subscriberId);
+            // Response was success so invoke the callback's operationFinished
+            // method.
+            pubSubData.callback.operationFinished(pubSubData.context, null);
+            break;
+        case CLIENT_NOT_SUBSCRIBED:
+            // For Unsubscribe requests, the server says that the client was
+            // never subscribed to the topic.
+            pubSubData.callback.operationFailed(pubSubData.context, new ClientNotSubscribedException(
+                    "Client was never subscribed to topic: " + pubSubData.topic.toStringUtf8() + ", subscriberId: "
+                            + pubSubData.subscriberId.toStringUtf8()));
+            break;
+        case SERVICE_DOWN:
+            // Response was service down failure so just invoke the callback's
+            // operationFailed method.
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a SERVICE_DOWN status"));
+            break;
+        case NOT_RESPONSIBLE_FOR_TOPIC:
+            // Redirect response so we'll need to repost the original
+            // Unsubscribe Request
+            responseHandler.handleRedirectResponse(response, pubSubData, channel);
+            break;
+        default:
+            // Consider all other status codes as errors, operation failed
+            // cases.
+            logger.error("Unexpected error response from server for PubSubResponse: " + response);
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    "Server responded with a status code of: " + response.getStatusCode()));
+            break;
+        }
+    }
+
+}

+ 58 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ClientChannelPipelineFactory.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder;
+import org.jboss.netty.handler.codec.frame.LengthFieldPrepender;
+import org.jboss.netty.handler.codec.protobuf.ProtobufDecoder;
+import org.jboss.netty.handler.codec.protobuf.ProtobufEncoder;
+import org.jboss.netty.handler.ssl.SslHandler;
+
+import org.apache.hedwig.protocol.PubSubProtocol;
+
+public class ClientChannelPipelineFactory implements ChannelPipelineFactory {
+
+    private HedwigClient client;
+
+    public ClientChannelPipelineFactory(HedwigClient client) {
+        this.client = client;
+    }
+
+    // Retrieve a ChannelPipeline from the factory.
+    public ChannelPipeline getPipeline() throws Exception {
+        // Create a new ChannelPipline using the factory method from the
+        // Channels helper class.
+        ChannelPipeline pipeline = Channels.pipeline();        
+        if (client.getSslFactory() != null) {
+            pipeline.addLast("ssl", new SslHandler(client.getSslFactory().getEngine()));
+        }        
+        pipeline.addLast("lengthbaseddecoder", new LengthFieldBasedFrameDecoder(client.getConfiguration()
+                .getMaximumMessageSize(), 0, 4, 0, 4));
+        pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));
+
+        pipeline.addLast("protobufdecoder", new ProtobufDecoder(PubSubProtocol.PubSubResponse.getDefaultInstance()));
+        pipeline.addLast("protobufencoder", new ProtobufEncoder());
+
+        pipeline.addLast("responsehandler", new ResponseHandler(client));
+        return pipeline;
+    }
+
+}

+ 122 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ConnectCallback.java

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.LinkedList;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.exceptions.PubSubException.CouldNotConnectException;
+import org.apache.hedwig.protocol.PubSubProtocol.OperationType;
+import org.apache.hedwig.util.HedwigSocketAddress;
+
+public class ConnectCallback implements ChannelFutureListener {
+
+    private static Logger logger = Logger.getLogger(ConnectCallback.class);
+
+    // Private member variables
+    private PubSubData pubSubData;
+    private InetSocketAddress host;
+    private final HedwigClient client;
+    private final HedwigPublisher pub;
+    private final HedwigSubscriber sub;
+    private final ClientConfiguration cfg;
+
+    // Constructor
+    public ConnectCallback(PubSubData pubSubData, InetSocketAddress host, HedwigClient client) {
+        super();
+        this.pubSubData = pubSubData;
+        this.host = host;
+        this.client = client;
+        this.pub = client.getPublisher();
+        this.sub = client.getSubscriber();
+        this.cfg = client.getConfiguration();
+    }
+
+    public void operationComplete(ChannelFuture future) throws Exception {
+        // If the client has stopped, there is no need to proceed with any
+        // callback logic here.
+        if (client.hasStopped())
+            return;
+
+        // Check if the connection to the server was done successfully.
+        if (!future.isSuccess()) {
+            logger.error("Error connecting to host: " + host);
+            // If we were not able to connect to the host, it could be down.
+            ByteString hostString = ByteString.copyFromUtf8(HedwigSocketAddress.sockAddrStr(host));
+            if (pubSubData.connectFailedServers != null && pubSubData.connectFailedServers.contains(hostString)) {
+                // We've already tried to connect to this host before so just
+                // invoke the operationFailed callback.
+                logger.error("Error connecting to host more than once so just invoke the operationFailed callback!");
+                pubSubData.callback.operationFailed(pubSubData.context, new CouldNotConnectException(
+                        "Could not connect to host: " + host));
+            } else {
+                if (logger.isDebugEnabled())
+                    logger.debug("Try to connect to server: " + host + " again for pubSubData: " + pubSubData);
+                // Keep track of this current server that we failed to connect
+                // to but retry the request on the default server host/VIP.
+                // The topic2Host mapping might need to be updated.
+                if (pubSubData.connectFailedServers == null)
+                    pubSubData.connectFailedServers = new LinkedList<ByteString>();
+                pubSubData.connectFailedServers.add(hostString);
+                client.doConnect(pubSubData, cfg.getDefaultServerHost());
+            }
+            // Finished with failure logic so just return.
+            return;
+        }
+
+        // Now that we have connected successfully to the server, see what type
+        // of PubSub request this was.
+        if (logger.isDebugEnabled())
+            logger.debug("Connection to host: " + host + " was successful for pubSubData: " + pubSubData);
+        if (pubSubData.operationType.equals(OperationType.PUBLISH)) {
+            // Publish Request so store this Channel connection in the
+            // HedwigPublisher Map (if it doesn't exist yet) and then
+            // do the publish on the cached channel mapped to the host.
+            // Note that due to race concurrency situations, it is
+            // possible that the cached channel is not the same one
+            // as the channel established here. If that is the case,
+            // this channel will be closed but we'll always publish on the
+            // cached channel in the HedwigPublisher.host2Channel map.
+            pub.storeHost2ChannelMapping(future.getChannel());
+            pub.doPublish(pubSubData, pub.host2Channel.get(HedwigClient.getHostFromChannel(future.getChannel())));
+        } else if (pubSubData.operationType.equals(OperationType.UNSUBSCRIBE)) {
+            // Unsubscribe Request so store this Channel connection in the
+            // HedwigPublisher Map (if it doesn't exist yet) and then do the
+            // unsubscribe. Unsubscribe requests will share and reuse
+            // the netty Channel connections that Publish requests use.
+            pub.storeHost2ChannelMapping(future.getChannel());
+            sub.doSubUnsub(pubSubData, pub.host2Channel.get(HedwigClient.getHostFromChannel(future.getChannel())));
+        } else {
+            // Subscribe Request. We do not store the Channel connection yet for
+            // Subscribes here. This will be done only when we've found the
+            // right server topic master. That is only determined when we
+            // receive a successful server ack response to the Subscribe
+            // request (handled in ResponseHandler). There is no need to store
+            // the Unsubscribe channel connection as we won't use it again.
+            sub.doSubUnsub(pubSubData, future.getChannel());
+        }
+    }
+
+}

+ 359 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigClient.java

@@ -0,0 +1,359 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.handlers.MessageConsumeCallback;
+import org.apache.hedwig.client.ssl.SslClientContextFactory;
+import org.apache.hedwig.exceptions.PubSubException.UncertainStateException;
+
+/**
+ * This is a top level Hedwig Client class that encapsulates the common
+ * functionality needed for both Publish and Subscribe operations.
+ * 
+ */
+public class HedwigClient {
+
+    private static final Logger logger = Logger.getLogger(HedwigClient.class);
+
+    // Global counter used for generating unique transaction ID's for
+    // publish and subscribe requests
+    protected final AtomicLong globalCounter = new AtomicLong();
+    // Static String constants
+    protected static final String COLON = ":";
+
+    // The Netty socket factory for making connections to the server.
+    protected final ChannelFactory socketFactory;
+    // Whether the socket factory is one we created or is owned by whoever
+    // instantiated us.
+    protected boolean ownChannelFactory = false;
+
+    // PipelineFactory to create netty client channels to the appropriate server
+    private ClientChannelPipelineFactory pipelineFactory;
+
+    // Concurrent Map to store the mapping from the Topic to the Host.
+    // This could change over time since servers can drop mastership of topics
+    // for load balancing or failover. If a server host ever goes down, we'd
+    // also want to remove all topic mappings the host was responsible for.
+    // The second Map is used as the inverted version of the first one.
+    protected final ConcurrentMap<ByteString, InetSocketAddress> topic2Host = new ConcurrentHashMap<ByteString, InetSocketAddress>();
+    private final ConcurrentMap<InetSocketAddress, List<ByteString>> host2Topics = new ConcurrentHashMap<InetSocketAddress, List<ByteString>>();
+
+    // Each client instantiation will have a Timer for running recurring
+    // threads. One such timer task thread to is to timeout long running
+    // PubSubRequests that are waiting for an ack response from the server.
+    private final Timer clientTimer = new Timer(true);
+
+    // Boolean indicating if the client is running or has stopped.
+    // Once we stop the client, we should sidestep all of the connect,
+    // write callback and channel disconnected logic.
+    private boolean isStopped = false;
+
+    private HedwigSubscriber sub;
+    private final HedwigPublisher pub;
+    private final ClientConfiguration cfg;
+    private final MessageConsumeCallback consumeCb;
+    private SslClientContextFactory sslFactory = null;
+
+    // Base constructor that takes in a Configuration object.
+    // This will create its own client socket channel factory.
+    public HedwigClient(ClientConfiguration cfg) {
+        this(cfg, new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
+        ownChannelFactory = true;
+    }
+
+    // Constructor that takes in a Configuration object and a ChannelFactory
+    // that has already been instantiated by the caller.
+    public HedwigClient(ClientConfiguration cfg, ChannelFactory socketFactory) {
+        this.cfg = cfg;
+        this.socketFactory = socketFactory;
+        pub = new HedwigPublisher(this);
+        sub = new HedwigSubscriber(this);
+        pipelineFactory = new ClientChannelPipelineFactory(this);
+        consumeCb = new MessageConsumeCallback(this);
+        if (cfg.isSSLEnabled()) {
+            sslFactory = new SslClientContextFactory(cfg);
+        }
+        // Schedule all of the client timer tasks. Currently we only have the
+        // Request Timeout task.
+        clientTimer.schedule(new PubSubRequestTimeoutTask(), 0, cfg.getTimeoutThreadRunInterval());
+    }
+
+    // Public getters for the various components of a client.
+    public ClientConfiguration getConfiguration() {
+        return cfg;
+    }
+
+    public HedwigSubscriber getSubscriber() {
+        return sub;
+    }
+
+    // Protected method to set the subscriber. This is needed currently for hub
+    // versions of the client subscriber.
+    protected void setSubscriber(HedwigSubscriber sub) {
+        this.sub = sub;
+    }
+
+    public HedwigPublisher getPublisher() {
+        return pub;
+    }
+
+    public MessageConsumeCallback getConsumeCallback() {
+        return consumeCb;
+    }
+
+    public SslClientContextFactory getSslFactory() {
+        return sslFactory;
+    }
+
+    // We need to deal with the possible problem of a PubSub request being
+    // written to successfully to the server host but for some reason, the
+    // ack message back never comes. What could happen is that the VoidCallback
+    // stored in the ResponseHandler.txn2PublishData map will never be called.
+    // We should have a configured timeout so if that passes from the time a
+    // write was successfully done to the server, we can fail this async PubSub
+    // transaction. The caller could possibly redo the transaction if needed at
+    // a later time. Creating a timeout cleaner TimerTask to do this here.
+    class PubSubRequestTimeoutTask extends TimerTask {
+        /**
+         * Implement the TimerTask's abstract run method.
+         */
+        @Override
+        public void run() {
+            if (logger.isDebugEnabled())
+                logger.debug("Running the PubSubRequest Timeout Task");
+            // Loop through all outstanding PubSubData requests and check if
+            // the requestWriteTime has timed out compared to the current time.
+            long curTime = System.currentTimeMillis();
+            long timeoutInterval = cfg.getServerAckResponseTimeout();
+
+            // First check the ResponseHandlers associated with cached
+            // channels in HedwigPublisher.host2Channel. This stores the
+            // channels used for Publish and Unsubscribe requests.
+            for (Channel channel : pub.host2Channel.values()) {
+                ResponseHandler responseHandler = getResponseHandlerFromChannel(channel);
+                for (PubSubData pubSubData : responseHandler.txn2PubSubData.values()) {
+                    checkPubSubDataToTimeOut(pubSubData, responseHandler, curTime, timeoutInterval);
+                }
+            }
+            // Now do the same for the cached channels in
+            // HedwigSubscriber.topicSubscriber2Channel. This stores the
+            // channels used exclusively for Subscribe requests.
+            for (Channel channel : sub.topicSubscriber2Channel.values()) {
+                ResponseHandler responseHandler = getResponseHandlerFromChannel(channel);
+                for (PubSubData pubSubData : responseHandler.txn2PubSubData.values()) {
+                    checkPubSubDataToTimeOut(pubSubData, responseHandler, curTime, timeoutInterval);
+                }
+            }
+        }
+
+        private void checkPubSubDataToTimeOut(PubSubData pubSubData, ResponseHandler responseHandler, long curTime,
+                long timeoutInterval) {
+            if (curTime > pubSubData.requestWriteTime + timeoutInterval) {
+                // Current PubSubRequest has timed out so remove it from the
+                // ResponseHandler's map and invoke the VoidCallback's
+                // operationFailed method.
+                logger.error("Current PubSubRequest has timed out for pubSubData: " + pubSubData);
+                responseHandler.txn2PubSubData.remove(pubSubData.txnId);
+                pubSubData.callback.operationFailed(pubSubData.context, new UncertainStateException(
+                        "Server ack response never received so PubSubRequest has timed out!"));
+            }
+        }
+    }
+
+    // When we are done with the client, this is a clean way to gracefully close
+    // all channels/sockets created by the client and to also release all
+    // resources used by netty.
+    public void stop() {
+        logger.info("Stopping the client!");
+        // Set the client boolean flag to indicate the client has stopped.
+        isStopped = true;
+        // Stop the timer and all timer task threads.
+        clientTimer.cancel();
+        // Close all of the open Channels.
+        for (Channel channel : pub.host2Channel.values()) {
+            getResponseHandlerFromChannel(channel).channelClosedExplicitly = true;
+            channel.close().awaitUninterruptibly();
+        }
+        for (Channel channel : sub.topicSubscriber2Channel.values()) {
+            getResponseHandlerFromChannel(channel).channelClosedExplicitly = true;
+            channel.close().awaitUninterruptibly();
+        }
+        // Clear out all Maps.
+        topic2Host.clear();
+        host2Topics.clear();
+        pub.host2Channel.clear();
+        sub.topicSubscriber2Channel.clear();
+        // Release resources used by the ChannelFactory on the client if we are
+        // the owner that created it.
+        if (ownChannelFactory) {
+            socketFactory.releaseExternalResources();
+        }
+        logger.info("Completed stopping the client!");
+    }
+
+    /**
+     * This is a helper method to do the connect attempt to the server given the
+     * inputted host/port. This can be used to connect to the default server
+     * host/port which is the VIP. That will pick a server in the cluster at
+     * random to connect to for the initial PubSub attempt (with redirect logic
+     * being done at the server side). Additionally, this could be called after
+     * the client makes an initial PubSub attempt at a server, and is redirected
+     * to the one that is responsible for the topic. Once the connect to the
+     * server is done, we will perform the corresponding PubSub write on that
+     * channel.
+     * 
+     * @param pubSubData
+     *            PubSub call's data wrapper object
+     * @param serverHost
+     *            Input server host to connect to of type InetSocketAddress
+     */
+    public void doConnect(PubSubData pubSubData, InetSocketAddress serverHost) {
+        if (logger.isDebugEnabled())
+            logger.debug("Connecting to host: " + serverHost + " with pubSubData: " + pubSubData);
+        // Set up the ClientBootStrap so we can create a new Channel connection
+        // to the server.
+        ClientBootstrap bootstrap = new ClientBootstrap(socketFactory);
+        bootstrap.setPipelineFactory(pipelineFactory);
+        bootstrap.setOption("tcpNoDelay", true);
+        bootstrap.setOption("keepAlive", true);
+
+        // Start the connection attempt to the input server host.
+        ChannelFuture future = bootstrap.connect(serverHost);
+        future.addListener(new ConnectCallback(pubSubData, serverHost, this));
+    }
+
+    /**
+     * Helper method to store the topic2Host mapping in the HedwigClient cache
+     * map. This method is assumed to be called when we've done a successful
+     * connection to the correct server topic master.
+     * 
+     * @param pubSubData
+     *            PubSub wrapper data
+     * @param channel
+     *            Netty Channel
+     */
+    protected void storeTopic2HostMapping(PubSubData pubSubData, Channel channel) {
+        // Retrieve the server host that we've connected to and store the
+        // mapping from the topic to this host. For all other non-redirected
+        // server statuses, we consider that as a successful connection to the
+        // correct topic master.
+        InetSocketAddress host = getHostFromChannel(channel);
+        if (topic2Host.containsKey(pubSubData.topic) && topic2Host.get(pubSubData.topic).equals(host)) {
+            // Entry in map exists for the topic but it is the same as the
+            // current host. In this case there is nothing to do.
+            return;
+        }
+
+        // Store the relevant mappings for this topic and host combination.
+        if (logger.isDebugEnabled())
+            logger.debug("Storing info for topic: " + pubSubData.topic.toStringUtf8() + ", old host: "
+                    + topic2Host.get(pubSubData.topic) + ", new host: " + host);
+        topic2Host.put(pubSubData.topic, host);
+        if (host2Topics.containsKey(host)) {
+            host2Topics.get(host).add(pubSubData.topic);
+        } else {
+            LinkedList<ByteString> topicsList = new LinkedList<ByteString>();
+            topicsList.add(pubSubData.topic);
+            host2Topics.put(host, topicsList);
+        }
+    }
+
+    /**
+     * Helper static method to get the String Hostname:Port from a netty
+     * Channel. Assumption is that the netty Channel was originally created with
+     * an InetSocketAddress. This is true with the Hedwig netty implementation.
+     * 
+     * @param channel
+     *            Netty channel to extract the hostname and port from.
+     * @return String representation of the Hostname:Port from the Netty Channel
+     */
+    public static InetSocketAddress getHostFromChannel(Channel channel) {
+        return (InetSocketAddress) channel.getRemoteAddress();
+    }
+
+    /**
+     * Helper static method to get the ResponseHandler instance from a Channel
+     * via the ChannelPipeline it is associated with. The assumption is that the
+     * last ChannelHandler tied to the ChannelPipeline is the ResponseHandler.
+     * 
+     * @param channel
+     *            Channel we are retrieving the ResponseHandler instance for
+     * @return ResponseHandler Instance tied to the Channel's Pipeline
+     */
+    public static ResponseHandler getResponseHandlerFromChannel(Channel channel) {
+        return (ResponseHandler) channel.getPipeline().getLast();
+    }
+
+    // Public getter for entries in the topic2Host Map.
+    public InetSocketAddress getHostForTopic(ByteString topic) {
+        return topic2Host.get(topic);
+    }
+
+    // If a server host goes down or the channel to it gets disconnected,
+    // we want to clear out all relevant cached information. We'll
+    // need to remove all of the topic mappings that the host was
+    // responsible for.
+    public void clearAllTopicsForHost(InetSocketAddress host) {
+        if (logger.isDebugEnabled())
+            logger.debug("Clearing all topics for host: " + host);
+        // For each of the topics that the host was responsible for,
+        // remove it from the topic2Host mapping.
+        if (host2Topics.containsKey(host)) {
+            for (ByteString topic : host2Topics.get(host)) {
+                if (logger.isDebugEnabled())
+                    logger.debug("Removing mapping for topic: " + topic.toStringUtf8() + " from host: " + host);
+                topic2Host.remove(topic);
+            }
+            // Now it is safe to remove the host2Topics mapping entry.
+            host2Topics.remove(host);
+        }
+    }
+
+    // Public getter to see if the client has been stopped.
+    public boolean hasStopped() {
+        return isStopped;
+    }
+
+    // Public getter to get the client's Timer object.
+    // This is so we can reuse this and not have to create multiple Timer
+    // objects.
+    public Timer getClientTimer() {
+        return clientTimer;
+    }
+
+}

+ 224 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigPublisher.java

@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFuture;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.api.Publisher;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.handlers.PubSubCallback;
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.exceptions.PubSubException.CouldNotConnectException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.protocol.PubSubProtocol.OperationType;
+import org.apache.hedwig.protocol.PubSubProtocol.ProtocolVersion;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubRequest;
+import org.apache.hedwig.protocol.PubSubProtocol.PublishRequest;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * This is the Hedwig Netty specific implementation of the Publisher interface.
+ * 
+ */
+public class HedwigPublisher implements Publisher {
+
+    private static Logger logger = Logger.getLogger(HedwigPublisher.class);
+
+    // Concurrent Map to store the mappings for a given Host (Hostname:Port) to
+    // the Channel that has been established for it previously. This channel
+    // will be used whenever we publish on a topic that the server is the master
+    // of currently. The channels used here will only be used for publish and
+    // unsubscribe requests.
+    protected final ConcurrentMap<InetSocketAddress, Channel> host2Channel = new ConcurrentHashMap<InetSocketAddress, Channel>();
+
+    private final HedwigClient client;
+    private final ClientConfiguration cfg;
+
+    protected HedwigPublisher(HedwigClient client) {
+        this.client = client;
+        this.cfg = client.getConfiguration();
+    }
+
+    public void publish(ByteString topic, Message msg) throws CouldNotConnectException, ServiceDownException {
+        if (logger.isDebugEnabled())
+            logger.debug("Calling a sync publish for topic: " + topic.toStringUtf8() + ", msg: " + msg);
+        PubSubData pubSubData = new PubSubData(topic, msg, null, OperationType.PUBLISH, null, null, null);
+        synchronized (pubSubData) {
+            PubSubCallback pubSubCallback = new PubSubCallback(pubSubData);
+            asyncPublish(topic, msg, pubSubCallback, null);
+            try {
+                while (!pubSubData.isDone)
+                    pubSubData.wait();
+            } catch (InterruptedException e) {
+                throw new ServiceDownException("Interrupted Exception while waiting for async publish call");
+            }
+            // Check from the PubSubCallback if it was successful or not.
+            if (!pubSubCallback.getIsCallSuccessful()) {
+                // See what the exception was that was thrown when the operation
+                // failed.
+                PubSubException failureException = pubSubCallback.getFailureException();
+                if (failureException == null) {
+                    // This should not happen as the operation failed but a null
+                    // PubSubException was passed. Log a warning message but
+                    // throw a generic ServiceDownException.
+                    logger.error("Sync Publish operation failed but no PubSubException was passed!");
+                    throw new ServiceDownException("Server ack response to publish request is not successful");
+                }
+                // For the expected exceptions that could occur, just rethrow
+                // them.
+                else if (failureException instanceof CouldNotConnectException) {
+                    throw (CouldNotConnectException) failureException;
+                } else if (failureException instanceof ServiceDownException) {
+                    throw (ServiceDownException) failureException;
+                } else {
+                    // For other types of PubSubExceptions, just throw a generic
+                    // ServiceDownException but log a warning message.
+                    logger.error("Unexpected exception type when a sync publish operation failed: " + failureException);
+                    throw new ServiceDownException("Server ack response to publish request is not successful");
+                }
+            }
+        }
+    }
+
+    public void asyncPublish(ByteString topic, Message msg, Callback<Void> callback, Object context) {
+        if (logger.isDebugEnabled())
+            logger.debug("Calling an async publish for topic: " + topic.toStringUtf8() + ", msg: " + msg);
+        // Check if we already have a Channel connection set up to the server
+        // for the given Topic.
+        PubSubData pubSubData = new PubSubData(topic, msg, null, OperationType.PUBLISH, null, callback, context);
+        if (client.topic2Host.containsKey(topic)) {
+            InetSocketAddress host = client.topic2Host.get(topic);
+            if (host2Channel.containsKey(host)) {
+                // We already have the Channel connection for the server host so
+                // do the publish directly. We will deal with redirect logic
+                // later on if that server is no longer the current host for
+                // the topic.
+                doPublish(pubSubData, host2Channel.get(host));
+            } else {
+                // We have a mapping for the topic to host but don't have a
+                // Channel for that server. This can happen if the Channel
+                // is disconnected for some reason. Do the connect then to
+                // the specified server host to create a new Channel connection.
+                client.doConnect(pubSubData, host);
+            }
+        } else {
+            // Server host for the given topic is not known yet so use the
+            // default server host/port as defined in the configs. This should
+            // point to the server VIP which would redirect to a random server
+            // (which might not be the server hosting the topic).
+            client.doConnect(pubSubData, cfg.getDefaultServerHost());
+        }
+    }
+
+    /**
+     * This is a helper method to write the actual publish message once the
+     * client is connected to the server and a Channel is available.
+     * 
+     * @param pubSubData
+     *            Publish call's data wrapper object
+     * @param channel
+     *            Netty I/O channel for communication between the client and
+     *            server
+     */
+    protected void doPublish(PubSubData pubSubData, Channel channel) {
+        // Create a PubSubRequest
+        PubSubRequest.Builder pubsubRequestBuilder = PubSubRequest.newBuilder();
+        pubsubRequestBuilder.setProtocolVersion(ProtocolVersion.VERSION_ONE);
+        pubsubRequestBuilder.setType(OperationType.PUBLISH);
+        if (pubSubData.triedServers != null && pubSubData.triedServers.size() > 0) {
+            pubsubRequestBuilder.addAllTriedServers(pubSubData.triedServers);
+        }
+        long txnId = client.globalCounter.incrementAndGet();
+        pubsubRequestBuilder.setTxnId(txnId);
+        pubsubRequestBuilder.setShouldClaim(pubSubData.shouldClaim);
+        pubsubRequestBuilder.setTopic(pubSubData.topic);
+
+        // Now create the PublishRequest
+        PublishRequest.Builder publishRequestBuilder = PublishRequest.newBuilder();
+
+        publishRequestBuilder.setMsg(pubSubData.msg);
+
+        // Set the PublishRequest into the outer PubSubRequest
+        pubsubRequestBuilder.setPublishRequest(publishRequestBuilder);
+
+        // Update the PubSubData with the txnId and the requestWriteTime
+        pubSubData.txnId = txnId;
+        pubSubData.requestWriteTime = System.currentTimeMillis();
+
+        // Before we do the write, store this information into the
+        // ResponseHandler so when the server responds, we know what
+        // appropriate Callback Data to invoke for the given txn ID.
+        HedwigClient.getResponseHandlerFromChannel(channel).txn2PubSubData.put(txnId, pubSubData);
+
+        // Finally, write the Publish request through the Channel.
+        if (logger.isDebugEnabled())
+            logger.debug("Writing a Publish request to host: " + HedwigClient.getHostFromChannel(channel)
+                    + " for pubSubData: " + pubSubData);
+        ChannelFuture future = channel.write(pubsubRequestBuilder.build());
+        future.addListener(new WriteCallback(pubSubData, client));
+    }
+
+    // Synchronized method to store the host2Channel mapping (if it doesn't
+    // exist yet). Retrieve the hostname info from the Channel created via the
+    // RemoteAddress tied to it.
+    protected synchronized void storeHost2ChannelMapping(Channel channel) {
+        InetSocketAddress host = HedwigClient.getHostFromChannel(channel);
+        if (!host2Channel.containsKey(host)) {
+            if (logger.isDebugEnabled())
+                logger.debug("Storing a new Channel mapping for host: " + host);
+            host2Channel.put(host, channel);
+        } else {
+            // If we've reached here, that means we already have a Channel
+            // mapping for the given host. This should ideally not happen
+            // and it means we are creating another Channel to a server host
+            // to publish on when we could have used an existing one. This could
+            // happen due to a race condition if initially multiple concurrent
+            // threads are publishing on the same topic and no Channel exists
+            // currently to the server. We are not synchronizing this initial
+            // creation of Channels to a given host for performance.
+            // Another possible way to have redundant Channels created is if
+            // a new topic is being published to, we connect to the default
+            // server host which should be a VIP that redirects to a "real"
+            // server host. Since we don't know beforehand what is the full
+            // set of server hosts, we could be redirected to a server that
+            // we already have a channel connection to from a prior existing
+            // topic. Close these redundant channels as they won't be used.
+            if (logger.isDebugEnabled())
+                logger.debug("Channel mapping to host: " + host + " already exists so no need to store it.");
+            HedwigClient.getResponseHandlerFromChannel(channel).channelClosedExplicitly = true;
+            channel.close();
+        }
+    }
+
+    // Public getter for entries in the host2Channel Map.
+    // This is used for classes that need this information but are not in the
+    // same classpath.
+    public Channel getChannelForHost(InetSocketAddress host) {
+        return host2Channel.get(host);
+    }
+
+}

+ 585 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/HedwigSubscriber.java

@@ -0,0 +1,585 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.api.MessageHandler;
+import org.apache.hedwig.client.api.Subscriber;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.data.TopicSubscriber;
+import org.apache.hedwig.client.exceptions.InvalidSubscriberIdException;
+import org.apache.hedwig.client.handlers.PubSubCallback;
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.exceptions.PubSubException.ClientAlreadySubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.ClientNotSubscribedException;
+import org.apache.hedwig.exceptions.PubSubException.CouldNotConnectException;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.protocol.PubSubProtocol.ConsumeRequest;
+import org.apache.hedwig.protocol.PubSubProtocol.MessageSeqId;
+import org.apache.hedwig.protocol.PubSubProtocol.OperationType;
+import org.apache.hedwig.protocol.PubSubProtocol.ProtocolVersion;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubRequest;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest;
+import org.apache.hedwig.protocol.PubSubProtocol.UnsubscribeRequest;
+import org.apache.hedwig.protocol.PubSubProtocol.SubscribeRequest.CreateOrAttach;
+import org.apache.hedwig.protoextensions.SubscriptionStateUtils;
+import org.apache.hedwig.util.Callback;
+
+/**
+ * This is the Hedwig Netty specific implementation of the Subscriber interface.
+ * 
+ */
+public class HedwigSubscriber implements Subscriber {
+
+    private static Logger logger = Logger.getLogger(HedwigSubscriber.class);
+
+    // Concurrent Map to store the cached Channel connections on the client side
+    // to a server host for a given Topic + SubscriberId combination. For each
+    // TopicSubscriber, we want a unique Channel connection to the server for
+    // it. We can also get the ResponseHandler tied to the Channel via the
+    // Channel Pipeline.
+    protected final ConcurrentMap<TopicSubscriber, Channel> topicSubscriber2Channel = new ConcurrentHashMap<TopicSubscriber, Channel>();
+
+    protected final HedwigClient client;
+    protected final ClientConfiguration cfg;
+
+    public HedwigSubscriber(HedwigClient client) {
+        this.client = client;
+        this.cfg = client.getConfiguration();
+    }
+
+    // Private method that holds the common logic for doing synchronous
+    // Subscribe or Unsubscribe requests. This is for code reuse since these
+    // two flows are very similar. The assumption is that the input
+    // OperationType is either SUBSCRIBE or UNSUBSCRIBE.
+    private void subUnsub(ByteString topic, ByteString subscriberId, OperationType operationType,
+            CreateOrAttach createOrAttach) throws CouldNotConnectException, ClientAlreadySubscribedException,
+            ClientNotSubscribedException, ServiceDownException {
+        if (logger.isDebugEnabled())
+            logger.debug("Calling a sync subUnsub request for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8() + ", operationType: " + operationType + ", createOrAttach: "
+                    + createOrAttach);
+        PubSubData pubSubData = new PubSubData(topic, null, subscriberId, operationType, createOrAttach, null, null);
+        synchronized (pubSubData) {
+            PubSubCallback pubSubCallback = new PubSubCallback(pubSubData);
+            asyncSubUnsub(topic, subscriberId, pubSubCallback, null, operationType, createOrAttach);
+            try {
+                while (!pubSubData.isDone)
+                    pubSubData.wait();
+            } catch (InterruptedException e) {
+                throw new ServiceDownException("Interrupted Exception while waiting for async subUnsub call");
+            }
+            // Check from the PubSubCallback if it was successful or not.
+            if (!pubSubCallback.getIsCallSuccessful()) {
+                // See what the exception was that was thrown when the operation
+                // failed.
+                PubSubException failureException = pubSubCallback.getFailureException();
+                if (failureException == null) {
+                    // This should not happen as the operation failed but a null
+                    // PubSubException was passed. Log a warning message but
+                    // throw a generic ServiceDownException.
+                    logger.error("Sync SubUnsub operation failed but no PubSubException was passed!");
+                    throw new ServiceDownException("Server ack response to SubUnsub request is not successful");
+                }
+                // For the expected exceptions that could occur, just rethrow
+                // them.
+                else if (failureException instanceof CouldNotConnectException)
+                    throw (CouldNotConnectException) failureException;
+                else if (failureException instanceof ClientAlreadySubscribedException)
+                    throw (ClientAlreadySubscribedException) failureException;
+                else if (failureException instanceof ClientNotSubscribedException)
+                    throw (ClientNotSubscribedException) failureException;
+                else if (failureException instanceof ServiceDownException)
+                    throw (ServiceDownException) failureException;
+                else {
+                    logger.error("Unexpected PubSubException thrown: " + failureException.toString());
+                    // Throw a generic ServiceDownException but wrap the
+                    // original PubSubException within it.
+                    throw new ServiceDownException(failureException);
+                }
+            }
+        }
+    }
+
+    // Private method that holds the common logic for doing asynchronous
+    // Subscribe or Unsubscribe requests. This is for code reuse since these two
+    // flows are very similar. The assumption is that the input OperationType is
+    // either SUBSCRIBE or UNSUBSCRIBE.
+    private void asyncSubUnsub(ByteString topic, ByteString subscriberId, Callback<Void> callback, Object context,
+            OperationType operationType, CreateOrAttach createOrAttach) {
+        if (logger.isDebugEnabled())
+            logger.debug("Calling an async subUnsub request for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8() + ", operationType: " + operationType + ", createOrAttach: "
+                    + createOrAttach);
+        // Check if we know which server host is the master for the topic we are
+        // subscribing to.
+        PubSubData pubSubData = new PubSubData(topic, null, subscriberId, operationType, createOrAttach, callback,
+                context);
+        if (client.topic2Host.containsKey(topic)) {
+            InetSocketAddress host = client.topic2Host.get(topic);
+            if (operationType.equals(OperationType.UNSUBSCRIBE) && client.getPublisher().host2Channel.containsKey(host)) {
+                // For unsubscribes, we can reuse the channel connections to the
+                // server host that are cached for publishes. For publish and
+                // unsubscribe flows, we will thus use the same Channels and
+                // will cache and store them during the ConnectCallback.
+                doSubUnsub(pubSubData, client.getPublisher().host2Channel.get(host));
+            } else {
+                // We know which server host is the master for the topic so
+                // connect to that first. For subscribes, we want a new channel
+                // connection each time for the TopicSubscriber. If the
+                // TopicSubscriber is already connected and subscribed,
+                // we assume the server will respond with an appropriate status
+                // indicating this. For unsubscribes, it is possible that the
+                // client is subscribed to the topic already but does not
+                // have a Channel connection yet to the server host. e.g. Client
+                // goes down and comes back up but client side soft state memory
+                // does not have the netty Channel connection anymore.
+                client.doConnect(pubSubData, host);
+            }
+        } else {
+            // Server host for the given topic is not known yet so use the
+            // default server host/port as defined in the configs. This should
+            // point to the server VIP which would redirect to a random server
+            // (which might not be the server hosting the topic).
+            client.doConnect(pubSubData, cfg.getDefaultServerHost());
+        }
+    }
+
+    public void subscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode)
+            throws CouldNotConnectException, ClientAlreadySubscribedException, ServiceDownException,
+            InvalidSubscriberIdException {
+        subscribe(topic, subscriberId, mode, false);
+    }
+
+    protected void subscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode, boolean isHub)
+            throws CouldNotConnectException, ClientAlreadySubscribedException, ServiceDownException,
+            InvalidSubscriberIdException {
+        // Validate that the format of the subscriberId is valid either as a
+        // local or hub subscriber.
+        if (!isValidSubscriberId(subscriberId, isHub)) {
+            throw new InvalidSubscriberIdException("SubscriberId passed is not valid: " + subscriberId.toStringUtf8()
+                    + ", isHub: " + isHub);
+        }
+        try {
+            subUnsub(topic, subscriberId, OperationType.SUBSCRIBE, mode);
+        } catch (ClientNotSubscribedException e) {
+            logger.error("Unexpected Exception thrown: " + e.toString());
+            // This exception should never be thrown here. But just in case,
+            // throw a generic ServiceDownException but wrap the original
+            // Exception within it.
+            throw new ServiceDownException(e);
+        }
+    }
+
+    public void asyncSubscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode, Callback<Void> callback,
+            Object context) {
+        asyncSubscribe(topic, subscriberId, mode, callback, context, false);
+    }
+
+    protected void asyncSubscribe(ByteString topic, ByteString subscriberId, CreateOrAttach mode,
+            Callback<Void> callback, Object context, boolean isHub) {
+        // Validate that the format of the subscriberId is valid either as a
+        // local or hub subscriber.
+        if (!isValidSubscriberId(subscriberId, isHub)) {
+            callback.operationFailed(context, new ServiceDownException(new InvalidSubscriberIdException(
+                    "SubscriberId passed is not valid: " + subscriberId.toStringUtf8() + ", isHub: " + isHub)));
+            return;
+        }
+        asyncSubUnsub(topic, subscriberId, callback, context, OperationType.SUBSCRIBE, mode);
+    }
+
+    public void unsubscribe(ByteString topic, ByteString subscriberId) throws CouldNotConnectException,
+            ClientNotSubscribedException, ServiceDownException, InvalidSubscriberIdException {
+        unsubscribe(topic, subscriberId, false);
+    }
+
+    protected void unsubscribe(ByteString topic, ByteString subscriberId, boolean isHub)
+            throws CouldNotConnectException, ClientNotSubscribedException, ServiceDownException,
+            InvalidSubscriberIdException {
+        // Validate that the format of the subscriberId is valid either as a
+        // local or hub subscriber.
+        if (!isValidSubscriberId(subscriberId, isHub)) {
+            throw new InvalidSubscriberIdException("SubscriberId passed is not valid: " + subscriberId.toStringUtf8()
+                    + ", isHub: " + isHub);
+        }
+        // Synchronously close the subscription on the client side. Even
+        // if the unsubscribe request to the server errors out, we won't be
+        // delivering messages for this subscription to the client. The client
+        // can later retry the unsubscribe request to the server so they are
+        // "fully" unsubscribed from the given topic.
+        closeSubscription(topic, subscriberId);
+        try {
+            subUnsub(topic, subscriberId, OperationType.UNSUBSCRIBE, null);
+        } catch (ClientAlreadySubscribedException e) {
+            logger.error("Unexpected Exception thrown: " + e.toString());
+            // This exception should never be thrown here. But just in case,
+            // throw a generic ServiceDownException but wrap the original
+            // Exception within it.
+            throw new ServiceDownException(e);
+        }
+    }
+
+    public void asyncUnsubscribe(final ByteString topic, final ByteString subscriberId, final Callback<Void> callback,
+            final Object context) {
+        asyncUnsubscribe(topic, subscriberId, callback, context, false);
+    }
+
+    protected void asyncUnsubscribe(final ByteString topic, final ByteString subscriberId,
+            final Callback<Void> callback, final Object context, boolean isHub) {
+        // Validate that the format of the subscriberId is valid either as a
+        // local or hub subscriber.
+        if (!isValidSubscriberId(subscriberId, isHub)) {
+            callback.operationFailed(context, new ServiceDownException(new InvalidSubscriberIdException(
+                    "SubscriberId passed is not valid: " + subscriberId.toStringUtf8() + ", isHub: " + isHub)));
+            return;
+        }
+        // Asynchronously close the subscription. On the callback to that
+        // operation once it completes, post the async unsubscribe request.
+        asyncCloseSubscription(topic, subscriberId, new Callback<Void>() {
+            @Override
+            public void operationFinished(Object ctx, Void resultOfOperation) {
+                asyncSubUnsub(topic, subscriberId, callback, context, OperationType.UNSUBSCRIBE, null);
+            }
+
+            @Override
+            public void operationFailed(Object ctx, PubSubException exception) {
+                callback.operationFailed(context, exception);
+            }
+        }, null);
+    }
+
+    // This is a helper method to determine if a subscriberId is valid as either
+    // a hub or local subscriber
+    private boolean isValidSubscriberId(ByteString subscriberId, boolean isHub) {
+        if ((isHub && !SubscriptionStateUtils.isHubSubscriber(subscriberId))
+                || (!isHub && SubscriptionStateUtils.isHubSubscriber(subscriberId)))
+            return false;
+        else
+            return true;
+    }
+
+    public void consume(ByteString topic, ByteString subscriberId, MessageSeqId messageSeqId)
+            throws ClientNotSubscribedException {
+        if (logger.isDebugEnabled())
+            logger.debug("Calling consume for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8() + ", messageSeqId: " + messageSeqId);
+        TopicSubscriber topicSubscriber = new TopicSubscriber(topic, subscriberId);
+        // Check that this topic subscription on the client side exists.
+        if (!topicSubscriber2Channel.containsKey(topicSubscriber)) {
+            throw new ClientNotSubscribedException(
+                    "Cannot send consume message since client is not subscribed to topic: " + topic.toStringUtf8()
+                            + ", subscriberId: " + subscriberId.toStringUtf8());
+        }
+        PubSubData pubSubData = new PubSubData(topic, null, subscriberId, OperationType.CONSUME, null, null, null);
+        // Send the consume message to the server using the same subscribe
+        // channel that the topic subscription uses.
+        doConsume(pubSubData, topicSubscriber2Channel.get(topicSubscriber), messageSeqId);
+    }
+
+    /**
+     * This is a helper method to write the actual subscribe/unsubscribe message
+     * once the client is connected to the server and a Channel is available.
+     * 
+     * @param pubSubData
+     *            Subscribe/Unsubscribe call's data wrapper object. We assume
+     *            that the operationType field is either SUBSCRIBE or
+     *            UNSUBSCRIBE.
+     * @param channel
+     *            Netty I/O channel for communication between the client and
+     *            server
+     */
+    protected void doSubUnsub(PubSubData pubSubData, Channel channel) {
+        // Create a PubSubRequest
+        PubSubRequest.Builder pubsubRequestBuilder = PubSubRequest.newBuilder();
+        pubsubRequestBuilder.setProtocolVersion(ProtocolVersion.VERSION_ONE);
+        pubsubRequestBuilder.setType(pubSubData.operationType);
+        if (pubSubData.triedServers != null && pubSubData.triedServers.size() > 0) {
+            pubsubRequestBuilder.addAllTriedServers(pubSubData.triedServers);
+        }
+        long txnId = client.globalCounter.incrementAndGet();
+        pubsubRequestBuilder.setTxnId(txnId);
+        pubsubRequestBuilder.setShouldClaim(pubSubData.shouldClaim);
+        pubsubRequestBuilder.setTopic(pubSubData.topic);
+
+        // Create either the Subscribe or Unsubscribe Request
+        if (pubSubData.operationType.equals(OperationType.SUBSCRIBE)) {
+            // Create the SubscribeRequest
+            SubscribeRequest.Builder subscribeRequestBuilder = SubscribeRequest.newBuilder();
+            subscribeRequestBuilder.setSubscriberId(pubSubData.subscriberId);
+            subscribeRequestBuilder.setCreateOrAttach(pubSubData.createOrAttach);
+            // For now, all subscribes should wait for all cross-regional
+            // subscriptions to be established before returning.
+            subscribeRequestBuilder.setSynchronous(true);
+
+            // Set the SubscribeRequest into the outer PubSubRequest
+            pubsubRequestBuilder.setSubscribeRequest(subscribeRequestBuilder);
+        } else {
+            // Create the UnSubscribeRequest
+            UnsubscribeRequest.Builder unsubscribeRequestBuilder = UnsubscribeRequest.newBuilder();
+            unsubscribeRequestBuilder.setSubscriberId(pubSubData.subscriberId);
+
+            // Set the UnsubscribeRequest into the outer PubSubRequest
+            pubsubRequestBuilder.setUnsubscribeRequest(unsubscribeRequestBuilder);
+        }
+
+        // Update the PubSubData with the txnId and the requestWriteTime
+        pubSubData.txnId = txnId;
+        pubSubData.requestWriteTime = System.currentTimeMillis();
+
+        // Before we do the write, store this information into the
+        // ResponseHandler so when the server responds, we know what
+        // appropriate Callback Data to invoke for the given txn ID.
+        HedwigClient.getResponseHandlerFromChannel(channel).txn2PubSubData.put(txnId, pubSubData);
+
+        // Finally, write the Subscribe request through the Channel.
+        if (logger.isDebugEnabled())
+            logger.debug("Writing a SubUnsub request to host: " + HedwigClient.getHostFromChannel(channel)
+                    + " for pubSubData: " + pubSubData);
+        ChannelFuture future = channel.write(pubsubRequestBuilder.build());
+        future.addListener(new WriteCallback(pubSubData, client));
+    }
+
+    /**
+     * This is a helper method to write a consume message to the server after a
+     * subscribe Channel connection is made to the server and messages are being
+     * consumed by the client.
+     * 
+     * @param pubSubData
+     *            Consume call's data wrapper object. We assume that the
+     *            operationType field is CONSUME.
+     * @param channel
+     *            Netty I/O channel for communication between the client and
+     *            server
+     * @param messageSeqId
+     *            Message Seq ID for the latest/last message the client has
+     *            consumed.
+     */
+    public void doConsume(final PubSubData pubSubData, final Channel channel, final MessageSeqId messageSeqId) {
+        // Create a PubSubRequest
+        PubSubRequest.Builder pubsubRequestBuilder = PubSubRequest.newBuilder();
+        pubsubRequestBuilder.setProtocolVersion(ProtocolVersion.VERSION_ONE);
+        pubsubRequestBuilder.setType(OperationType.CONSUME);
+        long txnId = client.globalCounter.incrementAndGet();
+        pubsubRequestBuilder.setTxnId(txnId);
+        pubsubRequestBuilder.setTopic(pubSubData.topic);
+
+        // Create the ConsumeRequest
+        ConsumeRequest.Builder consumeRequestBuilder = ConsumeRequest.newBuilder();
+        consumeRequestBuilder.setSubscriberId(pubSubData.subscriberId);
+        consumeRequestBuilder.setMsgId(messageSeqId);
+
+        // Set the ConsumeRequest into the outer PubSubRequest
+        pubsubRequestBuilder.setConsumeRequest(consumeRequestBuilder);
+
+        // For Consume requests, we will send them from the client in a fire and
+        // forget manner. We are not expecting the server to send back an ack
+        // response so no need to register this in the ResponseHandler. There
+        // are no callbacks to invoke since this isn't a client initiated
+        // action. Instead, just have a future listener that will log an error
+        // message if there was a problem writing the consume request.
+        if (logger.isDebugEnabled())
+            logger.debug("Writing a Consume request to host: " + HedwigClient.getHostFromChannel(channel)
+                    + " with messageSeqId: " + messageSeqId + " for pubSubData: " + pubSubData);
+        ChannelFuture future = channel.write(pubsubRequestBuilder.build());
+        future.addListener(new ChannelFutureListener() {
+            @Override
+            public void operationComplete(ChannelFuture future) throws Exception {
+                if (!future.isSuccess()) {
+                    logger.error("Error writing a Consume request to host: " + HedwigClient.getHostFromChannel(channel)
+                            + " with messageSeqId: " + messageSeqId + " for pubSubData: " + pubSubData);                    
+                }
+            }
+        });
+
+    }
+
+    public boolean hasSubscription(ByteString topic, ByteString subscriberId) throws CouldNotConnectException,
+            ServiceDownException {
+        // The subscription type of info should be stored on the server end, not
+        // the client side. Eventually, the server will have the Subscription
+        // Manager part that ties into Zookeeper to manage this info.
+        // Commenting out these type of API's related to that here for now until
+        // this data is available on the server. Will figure out what the
+        // correct way to contact the server to get this info is then.
+        // The client side just has soft memory state for client subscription
+        // information.
+        return topicSubscriber2Channel.containsKey(new TopicSubscriber(topic, subscriberId));
+    }
+
+    public List<ByteString> getSubscriptionList(ByteString subscriberId) throws CouldNotConnectException,
+            ServiceDownException {
+        // Same as the previous hasSubscription method, this data should reside
+        // on the server end, not the client side.
+        return null;
+    }
+
+    public void startDelivery(final ByteString topic, final ByteString subscriberId, MessageHandler messageHandler)
+            throws ClientNotSubscribedException {
+        if (logger.isDebugEnabled())
+            logger.debug("Starting delivery for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8());
+        TopicSubscriber topicSubscriber = new TopicSubscriber(topic, subscriberId);
+        // Make sure we know about this topic subscription on the client side
+        // exists. The assumption is that the client should have in memory the
+        // Channel created for the TopicSubscriber once the server has sent
+        // an ack response to the initial subscribe request.
+        if (!topicSubscriber2Channel.containsKey(topicSubscriber)) {
+            logger.error("Client is not yet subscribed to topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8());
+            throw new ClientNotSubscribedException("Client is not yet subscribed to topic: " + topic.toStringUtf8()
+                    + ", subscriberId: " + subscriberId.toStringUtf8());
+        }
+
+        // Register the MessageHandler with the subscribe Channel's
+        // Response Handler.
+        Channel topicSubscriberChannel = topicSubscriber2Channel.get(topicSubscriber);
+        HedwigClient.getResponseHandlerFromChannel(topicSubscriberChannel).getSubscribeResponseHandler()
+                .setMessageHandler(messageHandler);
+        // Now make the TopicSubscriber Channel readable (it is set to not be
+        // readable when the initial subscription is done). Note that this is an
+        // asynchronous call. If this fails (not likely), the futureListener
+        // will just log an error message for now.
+        ChannelFuture future = topicSubscriberChannel.setReadable(true);
+        future.addListener(new ChannelFutureListener() {
+            @Override
+            public void operationComplete(ChannelFuture future) throws Exception {
+                if (!future.isSuccess()) {
+                    logger.error("Unable to make subscriber Channel readable in startDelivery call for topic: "
+                            + topic.toStringUtf8() + ", subscriberId: " + subscriberId.toStringUtf8());
+                }
+            }
+        });
+    }
+
+    public void stopDelivery(final ByteString topic, final ByteString subscriberId) throws ClientNotSubscribedException {
+        if (logger.isDebugEnabled())
+            logger.debug("Stopping delivery for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8());
+        TopicSubscriber topicSubscriber = new TopicSubscriber(topic, subscriberId);
+        // Make sure we know that this topic subscription on the client side
+        // exists. The assumption is that the client should have in memory the
+        // Channel created for the TopicSubscriber once the server has sent
+        // an ack response to the initial subscribe request.
+        if (!topicSubscriber2Channel.containsKey(topicSubscriber)) {
+            logger.error("Client is not yet subscribed to topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8());
+            throw new ClientNotSubscribedException("Client is not yet subscribed to topic: " + topic.toStringUtf8()
+                    + ", subscriberId: " + subscriberId.toStringUtf8());
+        }
+
+        // Unregister the MessageHandler for the subscribe Channel's
+        // Response Handler.
+        Channel topicSubscriberChannel = topicSubscriber2Channel.get(topicSubscriber);
+        HedwigClient.getResponseHandlerFromChannel(topicSubscriberChannel).getSubscribeResponseHandler()
+                .setMessageHandler(null);
+        // Now make the TopicSubscriber channel not-readable. This will buffer
+        // up messages if any are sent from the server. Note that this is an
+        // asynchronous call. If this fails (not likely), the futureListener
+        // will just log an error message for now.
+        ChannelFuture future = topicSubscriberChannel.setReadable(false);
+        future.addListener(new ChannelFutureListener() {
+            @Override
+            public void operationComplete(ChannelFuture future) throws Exception {
+                if (!future.isSuccess()) {
+                    logger.error("Unable to make subscriber Channel not readable in stopDelivery call for topic: "
+                            + topic.toStringUtf8() + ", subscriberId: " + subscriberId.toStringUtf8());
+                }
+            }
+        });
+    }
+
+    public void closeSubscription(ByteString topic, ByteString subscriberId) throws ServiceDownException {
+        PubSubData pubSubData = new PubSubData(topic, null, subscriberId, null, null, null, null);
+        synchronized (pubSubData) {
+            PubSubCallback pubSubCallback = new PubSubCallback(pubSubData);
+            asyncCloseSubscription(topic, subscriberId, pubSubCallback, null);
+            try {
+                while (!pubSubData.isDone)
+                    pubSubData.wait();
+            } catch (InterruptedException e) {
+                throw new ServiceDownException("Interrupted Exception while waiting for asyncCloseSubscription call");
+            }
+            // Check from the PubSubCallback if it was successful or not.
+            if (!pubSubCallback.getIsCallSuccessful()) {
+                throw new ServiceDownException("Exception while trying to close the subscription for topic: "
+                        + topic.toStringUtf8() + ", subscriberId: " + subscriberId.toStringUtf8());
+            }
+        }
+    }
+
+    public void asyncCloseSubscription(final ByteString topic, final ByteString subscriberId,
+            final Callback<Void> callback, final Object context) {
+        if (logger.isDebugEnabled())
+            logger.debug("Closing subscription asynchronously for topic: " + topic.toStringUtf8() + ", subscriberId: "
+                    + subscriberId.toStringUtf8());
+        TopicSubscriber topicSubscriber = new TopicSubscriber(topic, subscriberId);
+        if (topicSubscriber2Channel.containsKey(topicSubscriber)) {
+            // Remove all cached references for the TopicSubscriber
+            Channel channel = topicSubscriber2Channel.get(topicSubscriber);
+            topicSubscriber2Channel.remove(topicSubscriber);
+            // Close the subscribe channel asynchronously.
+            HedwigClient.getResponseHandlerFromChannel(channel).channelClosedExplicitly = true;
+            ChannelFuture future = channel.close();
+            future.addListener(new ChannelFutureListener() {
+                @Override
+                public void operationComplete(ChannelFuture future) throws Exception {
+                    if (!future.isSuccess()) {
+                        logger.error("Failed to close the subscription channel for topic: " + topic.toStringUtf8()
+                                + ", subscriberId: " + subscriberId.toStringUtf8());
+                        callback.operationFailed(context, new ServiceDownException(
+                                "Failed to close the subscription channel for topic: " + topic.toStringUtf8()
+                                        + ", subscriberId: " + subscriberId.toStringUtf8()));
+                    } else {
+                        callback.operationFinished(context, null);
+                    }
+                }
+            });
+        } else {
+            logger.warn("Trying to close a subscription when we don't have a subscribe channel cached for topic: "
+                    + topic.toStringUtf8() + ", subscriberId: " + subscriberId.toStringUtf8());
+            callback.operationFinished(context, null);
+        }
+    }
+
+    // Public getter and setters for entries in the topic2Host Map.
+    // This is used for classes that need this information but are not in the
+    // same classpath.
+    public Channel getChannelForTopic(TopicSubscriber topic) {
+        return topicSubscriber2Channel.get(topic);
+    }
+
+    public void setChannelForTopic(TopicSubscriber topic, Channel channel) {
+        topicSubscriber2Channel.put(topic, channel);
+    }
+
+    public void removeChannelForTopic(TopicSubscriber topic) {
+        topicSubscriber2Channel.remove(topic);
+    }
+
+}

+ 365 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/ResponseHandler.java

@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.LinkedList;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipelineCoverage;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelHandler;
+import org.jboss.netty.handler.ssl.SslHandler;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.client.exceptions.ServerRedirectLoopException;
+import org.apache.hedwig.client.exceptions.TooManyServerRedirectsException;
+import org.apache.hedwig.client.handlers.PublishResponseHandler;
+import org.apache.hedwig.client.handlers.SubscribeReconnectCallback;
+import org.apache.hedwig.client.handlers.SubscribeResponseHandler;
+import org.apache.hedwig.client.handlers.UnsubscribeResponseHandler;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.exceptions.PubSubException.UncertainStateException;
+import org.apache.hedwig.protocol.PubSubProtocol.OperationType;
+import org.apache.hedwig.protocol.PubSubProtocol.PubSubResponse;
+import org.apache.hedwig.protocol.PubSubProtocol.StatusCode;
+import org.apache.hedwig.util.HedwigSocketAddress;
+
+@ChannelPipelineCoverage("all")
+public class ResponseHandler extends SimpleChannelHandler {
+
+    private static Logger logger = Logger.getLogger(ResponseHandler.class);
+
+    // Concurrent Map to store for each async PubSub request, the txn ID
+    // and the corresponding PubSub call's data which stores the VoidCallback to
+    // invoke when we receive a PubSub ack response from the server.
+    // This is specific to this instance of the ResponseHandler which is
+    // tied to a specific netty Channel Pipeline.
+    protected final ConcurrentMap<Long, PubSubData> txn2PubSubData = new ConcurrentHashMap<Long, PubSubData>();
+
+    // Boolean indicating if we closed the channel this ResponseHandler is
+    // attached to explicitly or not. If so, we do not need to do the
+    // channel disconnected logic here.
+    public boolean channelClosedExplicitly = false;
+
+    private final HedwigClient client;
+    private final HedwigPublisher pub;
+    private final HedwigSubscriber sub;
+    private final ClientConfiguration cfg;
+
+    private final PublishResponseHandler pubHandler;
+    private final SubscribeResponseHandler subHandler;
+    private final UnsubscribeResponseHandler unsubHandler;
+
+    public ResponseHandler(HedwigClient client) {
+        this.client = client;
+        this.sub = client.getSubscriber();
+        this.pub = client.getPublisher();
+        this.cfg = client.getConfiguration();
+        this.pubHandler = new PublishResponseHandler(this);
+        this.subHandler = new SubscribeResponseHandler(this);
+        this.unsubHandler = new UnsubscribeResponseHandler(this);
+    }
+
+    // Public getters needed for the private members
+    public HedwigClient getClient() {
+        return client;
+    }
+
+    public HedwigSubscriber getSubscriber() {
+        return sub;
+    }
+
+    public ClientConfiguration getConfiguration() {
+        return cfg;
+    }
+
+    public SubscribeResponseHandler getSubscribeResponseHandler() {
+        return subHandler;
+    }
+
+    @Override
+    public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+        // If the Message is not a PubSubResponse, just send it upstream and let
+        // something else handle it.
+        if (!(e.getMessage() instanceof PubSubResponse)) {
+            ctx.sendUpstream(e);
+        }
+        // Retrieve the PubSubResponse from the Message that was sent by the
+        // server.
+        PubSubResponse response = (PubSubResponse) e.getMessage();
+        if (logger.isDebugEnabled())
+            logger.debug("Response received from host: " + HedwigClient.getHostFromChannel(ctx.getChannel())
+                    + ", response: " + response);
+
+        // Determine if this PubSubResponse is an ack response for a PubSub
+        // Request or if it is a message being pushed to the client subscriber.
+        if (response.hasMessage()) {
+            // Subscribed messages being pushed to the client so handle/consume
+            // it and return.
+            subHandler.handleSubscribeMessage(response);
+            return;
+        }
+
+        // Response is an ack to a prior PubSubRequest so first retrieve the
+        // PubSub data for this txn.
+        PubSubData pubSubData = txn2PubSubData.containsKey(response.getTxnId()) ? txn2PubSubData.get(response
+                .getTxnId()) : null;
+        // Validate that the PubSub data for this txn is stored. If not, just
+        // log an error message and return since we don't know how to handle
+        // this.
+        if (pubSubData == null) {
+            logger.error("PubSub Data was not found for PubSubResponse: " + response);
+            return;
+        }
+
+        // Now that we've retrieved the PubSubData for this specific Txn ID, we
+        // can remove it from the Map.
+        txn2PubSubData.remove(response.getTxnId());
+
+        // Store the topic2Host mapping if this wasn't a server redirect. We'll
+        // assume that if the server was able to have an open Channel connection
+        // to the client, and responded with an ack message other than the
+        // NOT_RESPONSIBLE_FOR_TOPIC one, it is the correct topic master.
+        if (!response.getStatusCode().equals(StatusCode.NOT_RESPONSIBLE_FOR_TOPIC)) {
+            client.storeTopic2HostMapping(pubSubData, ctx.getChannel());
+        }
+
+        // Depending on the operation type, call the appropriate handler.
+        switch (pubSubData.operationType) {
+        case PUBLISH:
+            pubHandler.handlePublishResponse(response, pubSubData, ctx.getChannel());
+            break;
+        case SUBSCRIBE:
+            subHandler.handleSubscribeResponse(response, pubSubData, ctx.getChannel());
+            break;
+        case UNSUBSCRIBE:
+            unsubHandler.handleUnsubscribeResponse(response, pubSubData, ctx.getChannel());
+            break;
+        default:
+            // The above are the only expected PubSubResponse messages received
+            // from the server for the various client side requests made.
+            logger.error("Response received from server is for an unhandled operation type, txnId: "
+                    + response.getTxnId() + ", operationType: " + pubSubData.operationType);
+        }
+    }
+
+    /**
+     * Logic to repost a PubSubRequest when the server responds with a redirect
+     * indicating they are not the topic master.
+     * 
+     * @param response
+     *            PubSubResponse from the server for the redirect
+     * @param pubSubData
+     *            PubSubData for the original PubSubRequest made
+     * @param channel
+     *            Channel Channel we used to make the original PubSubRequest
+     * @throws Exception
+     *             Throws an exception if there was an error in doing the
+     *             redirect repost of the PubSubRequest
+     */
+    public void handleRedirectResponse(PubSubResponse response, PubSubData pubSubData, Channel channel)
+            throws Exception {
+        if (logger.isDebugEnabled())
+            logger.debug("Handling a redirect from host: " + HedwigClient.getHostFromChannel(channel) + ", response: "
+                    + response + ", pubSubData: " + pubSubData);
+        // In this case, the PubSub request was done to a server that is not
+        // responsible for the topic. First make sure that we haven't
+        // exceeded the maximum number of server redirects.
+        int curNumServerRedirects = (pubSubData.triedServers == null) ? 0 : pubSubData.triedServers.size();
+        if (curNumServerRedirects >= cfg.getMaximumServerRedirects()) {
+            // We've already exceeded the maximum number of server redirects
+            // so consider this as an error condition for the client.
+            // Invoke the operationFailed callback and just return.
+            if (logger.isDebugEnabled())
+                logger.debug("Exceeded the number of server redirects (" + curNumServerRedirects + ") so error out.");
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    new TooManyServerRedirectsException("Already reached max number of redirects: "
+                            + curNumServerRedirects)));
+            return;
+        }
+
+        // We will redirect and try to connect to the correct server
+        // stored in the StatusMsg of the response. First store the
+        // server that we sent the PubSub request to for the topic.
+        ByteString triedServer = ByteString.copyFromUtf8(HedwigSocketAddress.sockAddrStr(HedwigClient
+                .getHostFromChannel(channel)));
+        if (pubSubData.triedServers == null)
+            pubSubData.triedServers = new LinkedList<ByteString>();
+        pubSubData.shouldClaim = true;
+        pubSubData.triedServers.add(triedServer);
+
+        // Now get the redirected server host (expected format is
+        // Hostname:Port:SSLPort) from the server's response message. If one is
+        // not given for some reason, then redirect to the default server
+        // host/VIP to repost the request.
+        String statusMsg = response.getStatusMsg();
+        InetSocketAddress redirectedHost;
+        if (statusMsg != null && statusMsg.length() > 0) {
+            if (cfg.isSSLEnabled()) {
+                redirectedHost = new HedwigSocketAddress(statusMsg).getSSLSocketAddress();
+            } else {
+                redirectedHost = new HedwigSocketAddress(statusMsg).getSocketAddress();
+            }
+        } else {
+            redirectedHost = cfg.getDefaultServerHost();
+        }
+
+        // Make sure the redirected server is not one we've already attempted
+        // already before in this PubSub request.
+        if (pubSubData.triedServers.contains(ByteString.copyFromUtf8(HedwigSocketAddress.sockAddrStr(redirectedHost)))) {
+            logger.error("We've already sent this PubSubRequest before to redirectedHost: " + redirectedHost
+                    + ", pubSubData: " + pubSubData);
+            pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                    new ServerRedirectLoopException("Already made the request before to redirected host: "
+                            + redirectedHost)));
+            return;
+        }
+
+        // Check if we already have a Channel open to the redirected server
+        // host.
+        boolean redirectedHostChannelExists = pub.host2Channel.containsKey(redirectedHost) ? true : false;
+        if (pubSubData.operationType.equals(OperationType.SUBSCRIBE) || !redirectedHostChannelExists) {
+            // We don't have an existing channel to the redirected host OR this
+            // is a redirected Subscribe request. For Subscribe requests, we
+            // always want to create a new unique Channel connection to the
+            // topic master server for the TopicSubscriber.
+            client.doConnect(pubSubData, redirectedHost);
+        } else {
+            // For Publish and Unsubscribe requests, we can just post the
+            // request again directly on the existing cached redirected host
+            // channel.
+            if (pubSubData.operationType.equals(OperationType.PUBLISH)) {
+                pub.doPublish(pubSubData, pub.host2Channel.get(redirectedHost));
+            } else if (pubSubData.operationType.equals(OperationType.UNSUBSCRIBE)) {
+                sub.doSubUnsub(pubSubData, pub.host2Channel.get(redirectedHost));
+            }
+        }
+    }
+
+    // Logic to deal with what happens when a Channel to a server host is
+    // disconnected.
+    @Override
+    public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
+        // If this channel was closed explicitly by the client code,
+        // we do not need to do any of this logic. This could happen
+        // for redundant Publish channels created or redirected subscribe
+        // channels that are not used anymore or when we shutdown the
+        // client and manually close all of the open channels.
+        // Also don't do any of the disconnect logic if the client has stopped.
+        if (channelClosedExplicitly || client.hasStopped())
+            return;
+
+        // Make sure the host retrieved is not null as there could be some weird
+        // channel disconnect events happening during a client shutdown.
+        // If it is, just return as there shouldn't be anything we need to do.
+        InetSocketAddress host = HedwigClient.getHostFromChannel(ctx.getChannel());
+        logger.warn("Channel was disconnected to host: " + host);
+        if (host == null)
+            return;
+
+        // If this Channel was used for Publish and Unsubscribe flows, just
+        // remove it from the HewdigPublisher's host2Channel map. We will
+        // re-establish a Channel connection to that server when the next
+        // publish/unsubscribe request to a topic that the server owns occurs.
+        PubSubData origSubData = subHandler.getOrigSubData();
+
+        // Now determine what type of operation this channel was used for.
+        if (origSubData == null) {
+            // Only remove the Channel from the mapping if this current
+            // disconnected channel is the same as the cached entry.
+            // Due to race concurrency situations, it is possible to
+            // create multiple channels to the same host for publish
+            // and unsubscribe requests.
+            if (pub.host2Channel.containsKey(host) && pub.host2Channel.get(host).equals(ctx.getChannel())) {
+                if (logger.isDebugEnabled())
+                    logger.debug("Disconnected channel for host: " + host
+                            + " was for Publish/Unsubscribe requests so remove all references to it.");
+                pub.host2Channel.remove(host);
+                client.clearAllTopicsForHost(host);
+            }
+        } else {
+            // Subscribe channel disconnected so first close and clear all
+            // cached Channel data set up for this topic subscription.
+            sub.closeSubscription(origSubData.topic, origSubData.subscriberId);
+            client.clearAllTopicsForHost(host);
+            // Since the connection to the server host that was responsible
+            // for the topic died, we are not sure about the state of that
+            // server. Resend the original subscribe request data to the default
+            // server host/VIP. Also clear out all of the servers we've
+            // contacted or attempted to from this request as we are starting a
+            // "fresh" subscribe request.
+            origSubData.clearServersList();
+            // Set a new type of VoidCallback for this async call. We need this
+            // hook so after the subscribe reconnect has completed, delivery for
+            // that topic subscriber should also be restarted (if it was that
+            // case before the channel disconnect).
+            origSubData.callback = new SubscribeReconnectCallback(origSubData, client, subHandler.getMessageHandler());
+            origSubData.context = null;
+            if (logger.isDebugEnabled())
+                logger.debug("Disconnected subscribe channel so reconnect with origSubData: " + origSubData);
+            client.doConnect(origSubData, cfg.getDefaultServerHost());
+        }
+
+        // Finally, all of the PubSubRequests that are still waiting for an ack
+        // response from the server need to be removed and timed out. Invoke the
+        // operationFailed callbacks on all of them. Use the
+        // UncertainStateException since the server did receive the request but
+        // we're not sure of the state of the request since the ack response was
+        // never received.
+        for (PubSubData pubSubData : txn2PubSubData.values()) {
+            if (logger.isDebugEnabled())
+                logger.debug("Channel disconnected so invoking the operationFailed callback for pubSubData: "
+                        + pubSubData);
+            pubSubData.callback.operationFailed(pubSubData.context, new UncertainStateException(
+                    "Server ack response never received before server connection disconnected!"));
+        }
+        txn2PubSubData.clear();
+    }
+
+    // Logic to deal with what happens when a Channel to a server host is
+    // connected. This is needed if the client is using an SSL port to
+    // communicate with the server. If so, we need to do the SSL handshake here
+    // when the channel is first connected.
+    @Override
+    public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
+        // No need to initiate the SSL handshake if we are closing this channel
+        // explicitly or the client has been stopped.
+        if (cfg.isSSLEnabled() && !channelClosedExplicitly && !client.hasStopped()) {
+            if (logger.isDebugEnabled()) {
+                logger.debug("Initiating the SSL handshake");
+            }
+            ctx.getPipeline().get(SslHandler.class).handshake(e.getChannel());
+        }
+    }
+
+    @Override
+    public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+        e.getCause().printStackTrace();
+        e.getChannel().close();
+    }
+
+}

+ 98 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/netty/WriteCallback.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.netty;
+
+import java.net.InetSocketAddress;
+import java.util.LinkedList;
+
+import org.apache.log4j.Logger;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.client.conf.ClientConfiguration;
+import org.apache.hedwig.client.data.PubSubData;
+import org.apache.hedwig.exceptions.PubSubException.ServiceDownException;
+import org.apache.hedwig.util.HedwigSocketAddress;
+
+public class WriteCallback implements ChannelFutureListener {
+
+    private static Logger logger = Logger.getLogger(WriteCallback.class);
+
+    // Private member variables
+    private PubSubData pubSubData;
+    private final HedwigClient client;
+    private final ClientConfiguration cfg;
+
+    // Constructor
+    public WriteCallback(PubSubData pubSubData, HedwigClient client) {
+        super();
+        this.pubSubData = pubSubData;
+        this.client = client;
+        this.cfg = client.getConfiguration();
+    }
+
+    public void operationComplete(ChannelFuture future) throws Exception {
+        // If the client has stopped, there is no need to proceed
+        // with any callback logic here.
+        if (client.hasStopped())
+            return;
+        
+        // When the write operation to the server is done, we just need to check
+        // if it was successful or not.
+        InetSocketAddress host = HedwigClient.getHostFromChannel(future.getChannel());
+        if (!future.isSuccess()) {
+            logger.error("Error writing on channel to host: " + host);
+            // On a write failure for a PubSubRequest, we also want to remove
+            // the saved txnId to PubSubData in the ResponseHandler. These
+            // requests will not receive an ack response from the server
+            // so there is no point storing that information there anymore.
+            HedwigClient.getResponseHandlerFromChannel(future.getChannel()).txn2PubSubData.remove(pubSubData.txnId);
+
+            // If we were not able to write on the channel to the server host,
+            // the host could have died or something is wrong with the channel
+            // connection where we can connect to the host, but not write to it.
+            ByteString hostString = (host == null) ? null : ByteString.copyFromUtf8(HedwigSocketAddress.sockAddrStr(host));
+            if (pubSubData.writeFailedServers != null && pubSubData.writeFailedServers.contains(hostString)) {
+                // We've already tried to write to this server previously and
+                // failed, so invoke the operationFailed callback.
+                logger.error("Error writing to host more than once so just invoke the operationFailed callback!");
+                pubSubData.callback.operationFailed(pubSubData.context, new ServiceDownException(
+                        "Error while writing message to server: " + hostString));
+            } else {
+                if (logger.isDebugEnabled())
+                    logger.debug("Try to send the PubSubRequest again to the default server host/VIP for pubSubData: "
+                            + pubSubData);
+                // Keep track of this current server that we failed to write to
+                // but retry the request on the default server host/VIP.
+                if (pubSubData.writeFailedServers == null)
+                    pubSubData.writeFailedServers = new LinkedList<ByteString>();
+                pubSubData.writeFailedServers.add(hostString);
+                client.doConnect(pubSubData, cfg.getDefaultServerHost());
+            }
+        } else {
+            // Now that the write to the server is done, we have to wait for it
+            // to respond. The ResponseHandler will take care of the ack
+            // response from the server before we can determine if the async
+            // PubSub call has really completed successfully or not.
+            if (logger.isDebugEnabled())
+                logger.debug("Successfully wrote to host: " + host + " for pubSubData: " + pubSubData);
+        }
+    }
+
+}

+ 41 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/ssl/SslClientContextFactory.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.ssl;
+
+import javax.net.ssl.SSLContext;
+
+import org.apache.hedwig.client.conf.ClientConfiguration;
+
+public class SslClientContextFactory extends SslContextFactory {
+
+    public SslClientContextFactory(ClientConfiguration cfg) {
+        try {
+            // Create the SSL context.
+            ctx = SSLContext.getInstance("TLS");
+            ctx.init(null, getTrustManagers(), null);
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    @Override
+    protected boolean isClient() {
+        return true;
+    }
+
+}

+ 65 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/client/ssl/SslContextFactory.java

@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client.ssl;
+
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLEngine;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+
+public abstract class SslContextFactory {
+
+    protected SSLContext ctx;
+
+    public SSLContext getContext() {
+        return ctx;
+    }
+
+    protected abstract boolean isClient();
+
+    public SSLEngine getEngine() {
+        SSLEngine engine = ctx.createSSLEngine();
+        engine.setUseClientMode(isClient());
+        return engine;
+    }
+
+    protected TrustManager[] getTrustManagers() {
+        return new TrustManager[] { new X509TrustManager() {
+            // Always trust, even if invalid.
+
+            @Override
+            public X509Certificate[] getAcceptedIssuers() {
+                return new X509Certificate[0];
+            }
+
+            @Override
+            public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
+                // Always trust.
+            }
+
+            @Override
+            public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
+                // Always trust.
+            }
+        } };
+    }
+
+}

+ 45 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/conf/AbstractConfiguration.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.conf;
+
+import java.net.URL;
+
+import org.apache.commons.configuration.CompositeConfiguration;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+
+public abstract class AbstractConfiguration {
+    protected CompositeConfiguration conf;
+
+    protected AbstractConfiguration() {
+        conf = new CompositeConfiguration();
+    }
+
+    /**
+     * You can load configurations in precedence order. The first one takes
+     * precedence over any loaded later.
+     * 
+     * @param confURL
+     */
+    public void loadConf(URL confURL) throws ConfigurationException {
+        Configuration loadedConf = new PropertiesConfiguration(confURL);
+        conf.addConfiguration(loadedConf);
+
+    }
+}

+ 47 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Callback.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import org.apache.hedwig.exceptions.PubSubException;
+
+/**
+ * This class is used for callbacks for asynchronous operations
+ * 
+ */
+public interface Callback<T> {
+
+    /**
+     * This method is called when the asynchronous operation finishes
+     * 
+     * @param ctx
+     * @param resultOfOperation
+     */
+    public abstract void operationFinished(Object ctx, T resultOfOperation);
+
+    /**
+     * This method is called when the operation failed due to some reason. The
+     * reason for failure is passed in.
+     * 
+     * @param ctx
+     *            The context for the callback
+     * @param exception
+     *            The reason for the failure of the scan
+     */
+    public abstract void operationFailed(Object ctx, PubSubException exception);
+
+}

+ 185 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/CallbackUtils.java

@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+import org.apache.hedwig.exceptions.PubSubException;
+import org.apache.hedwig.exceptions.PubSubException.CompositeException;
+
+public class CallbackUtils {
+
+    /**
+     * A callback that waits for all of a number of events to fire. If any fail,
+     * then fail the final callback with a composite exception.
+     * 
+     * TODO: change this to use any Exception and make CompositeException
+     * generic, not a PubSubException.
+     * 
+     * @param expected
+     *            Number of expected callbacks.
+     * @param cb
+     *            The final callback to call.
+     * @param ctx
+     * @param logger
+     *            May be null.
+     * @param level
+     *            Required iff logger != null.
+     * @param successMsg
+     *            If not null, then this is logged on success.
+     * @param failureMsg
+     *            If not null, then this is logged on failure.
+     * @param eagerErrorHandler
+     *            If not null, then this will be executed after the first
+     *            failure (but before the final failure callback). Useful for
+     *            releasing resources, etc. as soon as we know the composite
+     *            operation is doomed.
+     * @return
+     */
+    public static Callback<Void> multiCallback(final int expected, final Callback<Void> cb, final Object ctx,
+            final Logger logger, final Level level, final Object successMsg, final Object failureMsg,
+            Runnable eagerErrorHandler) {
+        if (expected == 0) {
+            cb.operationFinished(ctx, null);
+            return null;
+        } else {
+            return new Callback<Void>() {
+
+                final AtomicInteger done = new AtomicInteger();
+                final LinkedBlockingQueue<PubSubException> exceptions = new LinkedBlockingQueue<PubSubException>();
+
+                private void tick() {
+                    if (done.incrementAndGet() == expected) {
+                        if (exceptions.isEmpty()) {
+                            cb.operationFinished(ctx, null);
+                        } else {
+                            cb.operationFailed(ctx, new CompositeException(exceptions));
+                        }
+                    }
+                }
+
+                @Override
+                public void operationFailed(Object ctx, PubSubException exception) {
+                    if (logger != null && failureMsg != null)
+                        logger.log(level, failureMsg, exception);
+                    exceptions.add(exception);
+                    tick();
+                }
+
+                @Override
+                public void operationFinished(Object ctx, Void resultOfOperation) {
+                    if (logger != null && successMsg != null)
+                        logger.log(level, successMsg);
+                    tick();
+                }
+
+            };
+        }
+    }
+
+    /**
+     * A callback that waits for all of a number of events to fire. If any fail,
+     * then fail the final callback with a composite exception.
+     */
+    public static Callback<Void> multiCallback(int expected, Callback<Void> cb, Object ctx) {
+        return multiCallback(expected, cb, ctx, null, null, null, null, null);
+    }
+
+    /**
+     * A callback that waits for all of a number of events to fire. If any fail,
+     * then fail the final callback with a composite exception.
+     */
+    public static Callback<Void> multiCallback(int expected, Callback<Void> cb, Object ctx, Runnable eagerErrorHandler) {
+        return multiCallback(expected, cb, ctx, null, null, null, null, eagerErrorHandler);
+    }
+
+    private static Callback<Void> nop = new Callback<Void>() {
+
+        @Override
+        public void operationFailed(Object ctx, PubSubException exception) {
+        }
+
+        @Override
+        public void operationFinished(Object ctx, Void resultOfOperation) {
+        }
+
+    };
+
+    /**
+     * A do-nothing callback.
+     */
+    public static Callback<Void> nop() {
+        return nop;
+    }
+
+    /**
+     * Logs what happened before continuing the callback chain.
+     */
+    public static <T> Callback<T> logger(final Logger logger, final Level successLevel, final Level failureLevel, final Object successMsg,
+            final Object failureMsg, final Callback<T> cont) {
+        return new Callback<T>() {
+
+            @Override
+            public void operationFailed(Object ctx, PubSubException exception) {
+                logger.log(failureLevel, failureMsg, exception);
+                if (cont != null)
+                    cont.operationFailed(ctx, exception);
+            }
+
+            @Override
+            public void operationFinished(Object ctx, T resultOfOperation) {
+                logger.log(successLevel, successMsg);
+                if (cont != null)
+                    cont.operationFinished(ctx, resultOfOperation);
+            }
+
+        };
+    }
+
+    /**
+     * Logs what happened (no continuation).
+     */
+    public static Callback<Void> logger(Logger logger, Level successLevel, Level failureLevel, Object successMsg, Object failureMsg) {
+        return logger(logger, successLevel, failureLevel, successMsg, failureMsg, nop());
+    }
+
+    /**
+     * Return a Callback<Void> that just calls the given Callback cb with the
+     * bound result.
+     */
+    public static <T> Callback<Void> curry(final Callback<T> cb, final T result) {
+        return new Callback<Void>() {
+
+            @Override
+            public void operationFailed(Object ctx, PubSubException exception) {
+                cb.operationFailed(ctx, exception);
+            }
+
+            @Override
+            public void operationFinished(Object ctx, Void resultOfOperation) {
+                cb.operationFinished(ctx, result);
+            }
+
+        };
+    }
+
+}

+ 49 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/ConcurrencyUtils.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CyclicBarrier;
+
+public class ConcurrencyUtils {
+
+    public static <T, U extends T, V extends BlockingQueue<T>> void put(V queue, U value) {
+        try {
+            queue.put(value);
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    public static <T> T take(BlockingQueue<T> queue) {
+        try {
+            return queue.take();
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    public static void await(CyclicBarrier barrier) {
+        try {
+            barrier.await();
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+}

+ 50 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Either.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+public class Either<T, U> {
+
+    private T x;
+    private U y;
+
+    private Either(T x, U y) {
+        this.x = x;
+        this.y = y;
+    }
+
+    public static <T, U> Either<T, U> of(T x, U y) {
+        return new Either<T, U>(x, y);
+    }
+
+    public static <T, U> Either<T, U> left(T x) {
+        return new Either<T, U>(x, null);
+    }
+
+    public static <T, U> Either<T, U> right(U y) {
+        return new Either<T, U>(null, y);
+    }
+
+    public T left() {
+        return x;
+    }
+
+    public U right() {
+        return y;
+    }
+
+}

+ 97 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/FileUtils.java

@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+
+public class FileUtils {
+
+    static DirDeleterThred dirDeleterThread;
+    static Logger log = Logger.getLogger(FileUtils.class);
+
+    static {
+        dirDeleterThread = new DirDeleterThred();
+        Runtime.getRuntime().addShutdownHook(dirDeleterThread);
+    }
+
+    public static File createTempDirectory(String prefix) throws IOException {
+        return createTempDirectory(prefix, null);
+    }
+
+    public static File createTempDirectory(String prefix, String suffix) throws IOException {
+        File tempDir = File.createTempFile(prefix, suffix);
+        if (!tempDir.delete()) {
+            throw new IOException("Could not delete temp file: " + tempDir.getAbsolutePath());
+        }
+
+        if (!tempDir.mkdir()) {
+            throw new IOException("Could not create temp directory: " + tempDir.getAbsolutePath());
+        }
+
+        dirDeleterThread.addDirToDelete(tempDir);
+        return tempDir;
+
+    }
+
+    static class DirDeleterThred extends Thread {
+        List<File> dirsToDelete = new LinkedList<File>();
+
+        public synchronized void addDirToDelete(File dir) {
+            dirsToDelete.add(dir);
+        }
+
+        @Override
+        public void run() {
+            synchronized (this) {
+                for (File dir : dirsToDelete) {
+                    deleteDirectory(dir);
+                }
+            }
+        }
+
+        protected void deleteDirectory(File dir) {
+            if (dir.isFile()) {
+                if (!dir.delete()) {
+                    log.error("Could not delete " + dir.getAbsolutePath());
+                }
+                return;
+            }
+
+            File[] files = dir.listFiles();
+            if (files == null) {
+                return;
+            }
+
+            for (File f : files) {
+                deleteDirectory(f);
+            }
+
+            if (!dir.delete()) {
+                log.error("Could not delete directory: " + dir.getAbsolutePath());
+            }
+
+        }
+
+    }
+
+}

+ 138 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/HedwigSocketAddress.java

@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.net.InetSocketAddress;
+
+/**
+ * This is a data wrapper class that is basically an InetSocketAddress with one
+ * extra piece of information for the SSL port (optional). This is used by
+ * Hedwig so we can encapsulate both regular and SSL port information in one
+ * data structure. Hedwig hub servers can be configured to listen on the
+ * standard regular port and additionally on an optional SSL port. The String
+ * representation of a HedwigSocketAddress is: <hostname>:<port>:<SSL
+ * port(optional)>
+ */
+public class HedwigSocketAddress {
+
+    // Member fields that make up this class.
+    private final String hostname;
+    private final int port;
+    private final int sslPort;
+
+    private final InetSocketAddress socketAddress;
+    private final InetSocketAddress sslSocketAddress;
+
+    // Constants used by this class.
+    public static final String COLON = ":";
+    private static final int NO_SSL_PORT = -1;
+
+    // Constructor that takes in both a regular and SSL port.
+    public HedwigSocketAddress(String hostname, int port, int sslPort) {
+        this.hostname = hostname;
+        this.port = port;
+        this.sslPort = sslPort;
+        socketAddress = new InetSocketAddress(hostname, port);
+        if (sslPort != NO_SSL_PORT)
+            sslSocketAddress = new InetSocketAddress(hostname, sslPort);
+        else
+            sslSocketAddress = null;
+    }
+
+    // Constructor that only takes in a regular port.
+    public HedwigSocketAddress(String hostname, int port) {
+        this(hostname, port, NO_SSL_PORT);
+    }
+
+    // Constructor from a String "serialized" version of this class.
+    public HedwigSocketAddress(String addr) {
+        String[] parts = addr.split(COLON);
+        this.hostname = parts[0];
+        this.port = Integer.parseInt(parts[1]);
+        if (parts.length > 2)
+            this.sslPort = Integer.parseInt(parts[2]);
+        else
+            this.sslPort = NO_SSL_PORT;
+        socketAddress = new InetSocketAddress(hostname, port);
+        if (sslPort != NO_SSL_PORT)
+            sslSocketAddress = new InetSocketAddress(hostname, sslPort);
+        else
+            sslSocketAddress = null;
+    }
+
+    // Public getters
+    public String getHostname() {
+        return hostname;
+    }
+
+    public int getPort() {
+        return port;
+    }
+
+    public int getSSLPort() {
+        return sslPort;
+    }
+
+    // Method to return an InetSocketAddress for the regular port.
+    public InetSocketAddress getSocketAddress() {
+        return socketAddress;
+    }
+
+    // Method to return an InetSocketAddress for the SSL port.
+    // Note that if no SSL port (or an invalid value) was passed
+    // during object creation, this call will throw an IllegalArgumentException
+    // (runtime exception).
+    public InetSocketAddress getSSLSocketAddress() {
+        return sslSocketAddress;
+    }
+
+    // Method to determine if this object instance is SSL enabled or not
+    // (contains a valid SSL port).
+    public boolean isSSLEnabled() {
+        return sslPort != NO_SSL_PORT;
+    }
+
+    // Return the String "serialized" version of this object.
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append(hostname).append(COLON).append(port).append(COLON).append(sslPort);
+        return sb.toString();
+    }
+
+    // Implement an equals method comparing two HedwigSocketAddress objects.
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof HedwigSocketAddress))
+            return false;
+        HedwigSocketAddress that = (HedwigSocketAddress) obj;
+        return (this.hostname.equals(that.hostname) && (this.port == that.port) && (this.sslPort == that.sslPort));
+    }
+
+    // Static helper method to return the string representation for an
+    // InetSocketAddress. The HedwigClient can only operate in SSL or non-SSL
+    // mode. So the server hosts it connects to will just be an
+    // InetSocketAddress instead of a HedwigSocketAddress. This utility method
+    // can be used so we can store these server hosts as strings (ByteStrings)
+    // in various places (e.g. list of server hosts we've connected to
+    // or wrote to unsuccessfully).
+    public static String sockAddrStr(InetSocketAddress addr) {
+        return addr.getAddress().getHostAddress() + ":" + addr.getPort();
+    }
+
+}

+ 43 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Option.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+public class Option<T> {
+
+    private T x;
+
+    public static <T> Option<T> of(T x) {
+        return new Option<T>(x);
+    }
+
+    public static <T> Option<T> of() {
+        return new Option<T>();
+    }
+
+    public Option() {
+    }
+
+    public Option(T x) {
+        this.x = x;
+    }
+
+    public T get() {
+        return x;
+    }
+
+}

+ 42 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/Pair.java

@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+public class Pair<T, U> {
+
+    private T x;
+    private U y;
+
+    public Pair(T x, U y) {
+        this.x = x;
+        this.y = y;
+    }
+
+    public static <T, U> Pair<T, U> of(T x, U y) {
+        return new Pair<T, U>(x, y);
+    }
+
+    public T first() {
+        return x;
+    }
+
+    public U second() {
+        return y;
+    }
+
+}

+ 56 - 0
src/contrib/hedwig/client/src/main/java/org/apache/hedwig/util/PathUtils.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+public class PathUtils {
+
+    /** Generate all prefixes for a path. "/a/b/c" -> ["/a","/a/b","/a/b/c"] */
+    public static List<String> prefixes(String path) {
+        List<String> prefixes = new ArrayList<String>();
+        String prefix = "";
+        for (String comp : path.split("/+")) {
+            // Skip the first (empty) path component.
+            if (!comp.equals("")) {
+                prefix += "/" + comp;
+                prefixes.add(prefix);
+            }
+        }
+        return prefixes;
+    }
+
+    /** Return true iff prefix is a prefix of path. */
+    public static boolean isPrefix(String prefix, String path) {
+        String[] as = prefix.split("/+"), bs = path.split("/+");
+        if (as.length > bs.length)
+            return false;
+        for (int i = 0; i < as.length; i++)
+            if (!as[i].equals(bs[i]))
+                return false;
+        return true;
+    }
+
+    /** Like File.getParent but always uses the / separator. */
+    public static String parent(String path) {
+        return new File(path).getParent().replace("\\", "/");
+    }
+
+}

+ 32 - 0
src/contrib/hedwig/client/src/main/resources/log4j.properties

@@ -0,0 +1,32 @@
+#
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# 
+#
+log4j.rootLogger=INFO, A1
+log4j.logger.org.apache.zookeeper = ERROR
+log4j.logger.org.apache.bookkeeper.client.QuorumOpMonitor = ERROR
+log4j.logger.org.apache.bookkeeper.proto.BookieClient = ERROR
+
+# A1 is set to be a ConsoleAppender.
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+
+# A1 uses PatternLayout.
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+log4j.appender.A1.layout.ConversionPattern=%d %-4r [%t] %-5p %c %x - %m%n
+

+ 51 - 0
src/contrib/hedwig/client/src/test/java/org/apache/hedwig/client/AppTest.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.client;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+/**
+ * Unit test for simple App.
+ */
+public class AppTest extends TestCase {
+    /**
+     * Create the test case
+     * 
+     * @param testName
+     *            name of the test case
+     */
+    public AppTest(String testName) {
+        super(testName);
+    }
+
+    /**
+     * @return the suite of tests being tested
+     */
+    public static Test suite() {
+        return new TestSuite(AppTest.class);
+    }
+
+    /**
+     * Rigourous Test :-)
+     */
+    public void testApp() {
+        assertTrue(true);
+    }
+}

+ 41 - 0
src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestFileUtils.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.io.File;
+
+import org.junit.Test;
+
+import junit.framework.TestCase;
+
+public class TestFileUtils extends TestCase {
+
+    @Test
+    public void testCreateTmpDirectory() throws Exception {
+        String prefix = "abc";
+        String suffix = "def";
+        File dir = FileUtils.createTempDirectory(prefix, suffix);
+        assertTrue(dir.isDirectory());
+        assertTrue(dir.getName().startsWith(prefix));
+        assertTrue(dir.getName().endsWith(suffix));
+        FileUtils.dirDeleterThread.start();
+        FileUtils.dirDeleterThread.join();
+        assertFalse(dir.exists());
+    }
+
+}

+ 104 - 0
src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestHedwigSocketAddress.java

@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.net.InetSocketAddress;
+
+import junit.framework.TestCase;
+
+import org.junit.Test;
+
+public class TestHedwigSocketAddress extends TestCase {
+
+    // Common values used by tests
+    private String hostname = "localhost";
+    private int port = 4080;
+    private int sslPort = 9876;
+    private int invalidPort = -9999;
+    private String COLON = ":";
+    
+    @Test
+    public void testCreateWithSSLPort() throws Exception {
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname, port, sslPort);
+        assertTrue(addr.getSocketAddress().equals(new InetSocketAddress(hostname, port)));
+        assertTrue(addr.getSSLSocketAddress().equals(new InetSocketAddress(hostname, sslPort)));
+    }
+
+    @Test
+    public void testCreateWithNoSSLPort() throws Exception {
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname, port);
+        assertTrue(addr.getSocketAddress().equals(new InetSocketAddress(hostname, port)));
+        assertTrue(addr.getSSLSocketAddress() == null);
+    }
+
+    @Test
+    public void testCreateFromStringWithSSLPort() throws Exception {
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname+COLON+port+COLON+sslPort);
+        assertTrue(addr.getSocketAddress().equals(new InetSocketAddress(hostname, port)));
+        assertTrue(addr.getSSLSocketAddress().equals(new InetSocketAddress(hostname, sslPort)));
+    }    
+
+    @Test
+    public void testCreateFromStringWithNoSSLPort() throws Exception {
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname+COLON+port);
+        assertTrue(addr.getSocketAddress().equals(new InetSocketAddress(hostname, port)));
+        assertTrue(addr.getSSLSocketAddress() == null);
+    }
+    
+    @Test
+    public void testCreateWithInvalidRegularPort() throws Exception {
+        boolean success = false;
+        try {
+            new HedwigSocketAddress(hostname+COLON+invalidPort);
+        }
+        catch (IllegalArgumentException e) {
+            success = true;
+        }
+        assertTrue(success);
+    }    
+
+    @Test
+    public void testCreateWithInvalidSSLPort() throws Exception {
+        boolean success = false;
+        try {
+            new HedwigSocketAddress(hostname, port, invalidPort);
+        }
+        catch (IllegalArgumentException e) {
+            success = true;
+        }
+        assertTrue(success);
+    }    
+
+    @Test
+    public void testToStringConversion() throws Exception {
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname, port, sslPort);
+        HedwigSocketAddress addr2 = new HedwigSocketAddress(addr.toString());
+        assertTrue(addr.getSocketAddress().equals(addr2.getSocketAddress()));
+        assertTrue(addr.getSSLSocketAddress().equals(addr2.getSSLSocketAddress()));
+        addr.toString().equals(addr2.toString());
+    }
+
+    @Test
+    public void testIsSSLEnabledFlag() throws Exception {
+        HedwigSocketAddress sslAddr = new HedwigSocketAddress(hostname, port, sslPort);
+        assertTrue(sslAddr.isSSLEnabled());
+        HedwigSocketAddress addr = new HedwigSocketAddress(hostname, port);
+        assertFalse(addr.isSSLEnabled());               
+    }
+    
+}

+ 54 - 0
src/contrib/hedwig/client/src/test/java/org/apache/hedwig/util/TestPathUtils.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.util;
+
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import org.junit.Test;
+
+public class TestPathUtils extends TestCase {
+
+    @Test
+    public void testPrefixes() {
+        assertEquals(Arrays.asList(new String[] { "/a", "/a/b", "/a/b/c" }), PathUtils.prefixes("/a/b/c"));
+        assertEquals(Arrays.asList(new String[] { "/a", "/a/b", "/a/b/c" }), PathUtils.prefixes("///a///b///c"));
+
+    }
+
+    @Test
+    public void testIsPrefix() {
+        String[] paths = new String[] { "/", "/a", "/a/b" };
+        for (int i = 0; i < paths.length; i++) {
+            for (int j = 0; j <= i; j++) {
+                assertTrue(PathUtils.isPrefix(paths[j], paths[i]));
+                assertTrue(PathUtils.isPrefix(paths[j], paths[i] + "/"));
+                assertTrue(PathUtils.isPrefix(paths[j] + "/", paths[i]));
+                assertTrue(PathUtils.isPrefix(paths[j] + "/", paths[i] + "/"));
+            }
+            for (int j = i + 1; j < paths.length; j++) {
+                assertFalse(PathUtils.isPrefix(paths[j], paths[i]));
+                assertFalse(PathUtils.isPrefix(paths[j], paths[i] + "/"));
+                assertFalse(PathUtils.isPrefix(paths[j] + "/", paths[i]));
+                assertFalse(PathUtils.isPrefix(paths[j] + "/", paths[i] + "/"));
+            }
+        }
+    }
+
+}

+ 7 - 0
src/contrib/hedwig/conf/hw_client_sample.conf

@@ -0,0 +1,7 @@
+# The default Hedwig server host to contact (this ideally should be a VIP 
+# that fronts all of the Hedwig server hubs).
+default_server_host=hwServer1:4080:9876
+# This parameter is a boolean flag indicating if communication with the
+# server should be done via SSL for encryption. The Hedwig server hubs also
+# need to be SSL enabled for this to work.
+ssl_enabled=false

+ 10 - 0
src/contrib/hedwig/conf/hw_server_sample.conf

@@ -0,0 +1,10 @@
+# The ZooKeeper server host(s) for the Hedwig Server to use.
+zk_host=zkServer1:2181
+# The number of milliseconds of each tick in ZooKeeper.
+zk_timeout=2000
+# The port at which the clients will connect.
+server_port=4080
+# The SSL port at which the clients will connect (only if SSL is enabled).
+ssl_server_port=9876
+# Flag indicating if the server should also operate in SSL mode.
+ssl_enabled=false

+ 146 - 0
src/contrib/hedwig/doc/build.txt

@@ -0,0 +1,146 @@
+% Building Hedwig
+% Yang Zhang
+
+Pre-requisites
+==============
+
+For the core itself:
+
+- JDK 6: <http://java.sun.com/>.  Ensure `$JAVA_HOME` is correctly set.
+- Maven 2: <http://maven.apache.org/>.
+- Protocol Buffers 2.3.0: <http://protobuf.googlecode.com/>.
+- Zookeeper 3.4.0: <http://hadoop.apache.org/zookeeper/>.  See below.
+- Bookkeeper 3.4.0: <http://hadoop.apache.org/zookeeper/>.  See below.
+
+Hedwig has been tested on Windows XP, Linux 2.6, and OS X.
+
+For the deployment and distributed support scripts in `hw.bash`:
+
+- Ant: <http://ant.apache.org/>, if you want to build Zookeeper.
+- Bash: <http://www.gnu.org/software/bash/>.
+- Coreutils: <http://www.gnu.org/software/coreutils/>.
+- Expect: <http://expect.nist.gov/>, if you want `unbuffer`.
+- Findutils: <http://www.gnu.org/software/findutils/>.
+- OpenSSH: <http://www.openssh.com/>.
+- Python 2.6: <http://python.org/>.
+
+Protocol Buffers
+----------------
+
+Hedwig requires the use of the Java runtime libraries of Protocol Buffers 2.3.0.
+These libraries need to be installed into your local maven repository. (Maven allows
+multiple versions to be installed.) To install protocol buffels to your local
+repository, you have to download the tarball and follow the README.txt 
+instructions. Note that you must first install the C++ package which contains the
+compiler (protoc) before you can build the java libraries. That will install the
+library jar's in the local maven repository where Hedwig is currently configured
+to point to.
+
+Zookeeper and Bookkeeper
+------------------------
+
+Hedwig currently requires the version of Bookkeeper maintained in Apache's current
+trunk SVN respository (version 3.4.0). This is not a released version yet but certain
+features needed for BookKeeper are only available there.
+
+Hedwig also depends on ZK testing code for its own testing code.
+
+Since Hedwig is a Maven project, all these dependencies must be made available
+as Maven artifacts.  However, neither ZK nor BK are currently Mavenized.
+Hedwig provides some bash scripts to ease the installation of ZK, ZK tests, and
+BK, all as Maven artifacts.
+
+Currently, we have included the necessary ZooKeeper and BookKeeper jars in the Hedwig
+source itself in the $HEDWIG_DIR/server/lib directory. There is no need to retrieve
+them directly from the Apache download site as they are non-released trunk versions.
+
+#Not relevant right now since we already have the ZK jars already in the Hedwig source.
+To fetch and build ZK 3.4.0 (and its tests) in the current directory, run:
+
+  $HEDWIG_DIR/scripts/hw.bash get-zk
+
+#Not relevant right now, but when we start using the apache version of BK, to
+build the local version of BK:
+
+  $HEDWIG_DIR/scripts/hw.bash get-bk
+
+The $HEDWIG_DIR/server/lib directory contains all of the the class and source jars for
+ZK, ZK tests, and BK.  To install these, go to that directory and run the following
+command to install them into your local maven repository:
+
+  $HEDWIG_DIR/scripts/hw.bash install-zk-bk
+
+Command-Line Instructions
+=========================
+
+From the main Hedwig directory, run `mvn package`.  This will produce the
+executable jars for both the client and server, as well as a server "assembly
+jar" containing all dependencies as well for easier deployment.
+
+See the User's Guide for instructions on running and usage.
+
+Eclipse Instructions
+====================
+
+To check out, build, and develop using Eclipse:
+
+1. Install the Subclipse plugin.  Update site:
+   <http://subclipse.tigris.org/update_1.4.x>.
+
+2. Install the Maven plugin.  Update site:
+   <http://m2eclipse.sonatype.org/update>.  From the list of packages available
+   from this site, select everything under the "Maven Integration" category,
+   and from the optional components select the ones with the word "SCM" in them.
+
+3. Go to Preferences > Team > SVN.  For the SVN interface, choose "Pure Java".
+
+4. Choose File > New > Project... > Maven > Checkout Maven Projects from SCM.
+
+5. For the SCM URL type, choose SVN.  For the URL, enter
+   SVN URL.  Maven will automatically
+   create a top-level Eclipse project for each of the 4 Maven modules
+   (recommended).  If you want fewer top-level projects, uncheck the option of
+   having a project for each module (under Advanced).
+
+6. Right-click on the `protocol` project and choose Run As > Maven
+   generate-sources.  This will generate the Java and C++ code for Protocol
+   Buffers.
+
+7. Refresh the workspace to pick up the generated code and add
+   `hedwig/protocol/target/generated-sources/java` as a source folder.  (6 & 7
+   should really be doable automatically, but I haven't figured out how.)
+
+You are now ready to run and debug the client and server code.  See the User's
+Guide for instructions on running and usage.
+
+Utilities
+=========
+
+Removing Conflicting Files in Jars
+----------------------------------
+
+The Maven assembly plugin that produces the fat assembly jar may end up putting
+into the jar files with the same conflicting paths from multiple dependencies.
+This makes working with the files from certain tools (like `jar`) a bit jarring.
+In our case, these files are not things like class files, but rather README and
+LICENSE files, so we can safely remove conflicts by choosing an arbitrary winner.
+To do so, run:
+
+  $HEDWIG_DIR/scripts/hw.bash strip-jar
+
+Adjusting Logging
+-----------------
+
+The logging level is something that is baked into the jar in the
+`log4j.properties` resource.  However, it would be wasteful to go through a
+Maven build cycle to update and adjust this.  If you're working from a source
+tree, it's also annoying to have to edit a source file to adjust the logging.
+
+We have a little script for tweaking the logging level.  After running
+`strip-jar`, run:
+
+  $HEDWIG_DIR/scripts/hw.bash set-logging WARN
+
+To see what the current logging level is:
+
+  $HEDWIG_DIR/scripts/hw.bash get-logging

+ 338 - 0
src/contrib/hedwig/doc/dev.txt

@@ -0,0 +1,338 @@
+% Developer's Guide
+
+Style
+=====
+
+We have provided an Eclipse Formatter file `formatter.xml` with all the
+formatting conventions currently used in the project.  Highlights include no
+tabs, 4-space indentation, and 120-char width.  Please respect this so as to
+reduce the amount of formatting-related noise produced in commits.
+
+Static Analysis
+===============
+
+We would like to use static analysis tools PMD and FindBugs to maintain code
+quality.  However, we have not yet arrived at a consensus on what rules to
+adhere to, and what to ignore.
+
+Netty Notes
+===========
+
+The asynchronous network IO infrastructure that Hedwig uses is [Netty].  Here
+are some notes on Netty's concurrency architecture and its filter pipeline
+design.
+
+[Netty]: http://www.jboss.org/netty
+
+Concurrency Architecture
+------------------------
+
+After calling `ServerBootstrap.bind()`, Netty starts a boss thread
+(`NioServerSocketPipelineSink.Boss`) that just accepts new connections and
+registers them with one of the workers from the `NioWorker` pool in round-robin
+fashion (pool size defaults to CPU count).  Each worker runs its own select
+loop over just the set of keys that have been registered with it.  Workers
+start lazily on demand and run only so long as there are interested fd's/keys.
+All selected events are handled in the same thread and sent up the pipeline
+attached to the channel (this association is established by the boss as soon as
+a new connection is accepted).
+
+All workers, and the boss, run via the executor thread pool; hence, the
+executor must support at least two simultaneous threads.
+
+Handler Pipeline
+----------------
+
+A pipeline implements the intercepting filter pattern.  A pipeline is a
+sequence of handlers.  Whenever a packet is read from the wire, it travels up
+the stream, stopping at each handler that can handle upstream events.
+Vice-versa for writes.  Between each filter, control flows back through the
+centralized pipeline, and a linked list of contexts keeps track of where we are
+in the pipeline (one context object per handler).
+
+Distributed Performance Evaluation
+==================================
+
+We've included some scripts to repeatedly run varying configurations of Hedwig
+on a distributed testbed and collect the resulting data.  The experiments use
+the `org.apache.hedwig.client.App` client application and are driven by
+`scripts/hw.bash` (via the `app` command).
+
+Currently, we have two types of experiments: subscription benchmarks and
+publishing benchmarks.
+
+Subscription Benchmarks
+-----------------------
+
+The subscription benchmark compares synchronous and asynchronous subscriptions.
+Because the synchronicity of subscriptions is a server configuration parameter,
+the servers must be restarted to change this.  The benchmarks varies the
+maximum number of outstanding subscription requests.
+
+To run the subscription benchmark with wilbur6 as the subscriber and wilbur1 as
+its default hub:
+
+  hosts=wilbur6 scripts/hw.bash sub-exp wilbur1
+
+This produces log files into the `sub` directory, which may then be analyzed
+using the analysis scripts.
+
+Publishing Benchmarks
+---------------------
+
+The publishing benchmark measures the throughput and latency of publishing
+messages within a LAN and across a WAN.  It varies the following parameters:
+
+- maximum number of outstanding publish requests
+- number of publishers
+- number of (local) receivers
+
+We vary each dimension separately (and have default settings) to avoid a
+combinatorial explosion in the number of configurations to explore.
+
+First, start a (standalone) instance:
+
+  scripts/hw.bash start-region '' $hwhost $zkhost $bk1host $bk2host $bk3host
+
+To run this over `$host1` through `$host3`, with the number of
+publishers/subscribers varying linearly over this set:
+
+  npars="20 40 60 80 100" scripts/hw.bash pub-exps "$host1 $host2 $host3" $hwhost $zkhost
+
+This will vary the number of outstanding publish requests as specified in
+`npars`.
+
+You may also optionally run this experiment with a second subscribing region:
+
+  scripts/hw.bash start-zk-bks $zkhost $bk1host $bk2host $bk3host
+  npars="..." scripts/hw.bash pub-exps "$host1 $host2 $host3" $hwhost $zkhost $rrecv $rhwhost $rzkhost
+
+where the final three extra arguments specify the client receiver, Hedwig, and
+Zookeeper hosts, in that order.
+
+This command will produce files into `./pub/`, which can then be process using
+`analyze.py`.
+
+Analysis and Visualization
+==========================
+
+`scripts/analyze.py` produces plots from the collected experimental data.  It
+has just a few immediate dependencies. In the following, the
+indentation signifies nested dependencies, like an upside-down tree:
+
+      component AAA that component AA requires
+      component AAB that component AA requires
+    component AA that component A requires
+      component ABA that component AB requires
+      component ABB that component AB requires
+    component AB that component A requires
+  component A that analysis tools depend on
+      component BAA that component BA requires
+      component BAB that component BA requires
+    component BA that component B requires
+      component BBA that component BB requires
+      component BBB that component BB requires
+    component BB that component B requires
+  component B that analysis tools depend on
+
+The reason the tree is upside-down is so that you can treat this whole thing as
+a chunk of bash script.
+
+[toast] is a utility that makes it a breeze to install all this software, but
+you do need to make sure your environment is set up correctly (e.g.
+`PKG_CONFIG_PATH` must point to `~/.toast/armed/lib/pkgconfig/`).
+
+Setup:
+
+  wget -O- http://toastball.net/toast/toast|perl -x - arm toast
+
+  toast arm "http://www.python.org/ftp/python/2.6.2/Python-2.6.2.tar.bz2"
+
+  toast arm numpy
+
+        toast arm libpng
+
+        toast arm pixman
+
+        toast arm freetype
+
+          toast arm 'ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz'
+
+        toast arm fontconfig
+
+      toast arm cairo
+
+    toast arm pycairo
+
+  hg clone https://yang@bitbucket.org/yang/pycha/
+  pycha/setup.bash -d -p $path_to_install_to
+
+  svn co https://assorted.svn.sourceforge.net/svnroot/assorted/python-commons/trunk/ python-commons/
+  python-commons/setup.bash -d -p $path_to_install_to
+
+To analyze the publishing experiments, change to the `pub` data directory and
+run:
+
+  scripts/analyze.py pub
+
+To analyze the subscription experiments, change to the `sub` data directory
+and run:
+
+  scripts/analyze.py sub
+
+[toast]: http://toastball.net/toast/
+
+Debugging
+=========
+
+You can attach an Eclipse debugger (or any debugger) to a Java process running
+on a remote host, as long as it has been started with the appropriate JVM
+flags.  (See the Building Hedwig document to set up your Eclipse environment.)
+To launch something using `hw.bash` with debugger attachment enabled, prefix
+the command with `attach=true`, e.g.:
+
+  attach=true scripts/hw.bash start-regions myregions.cfg
+
+Profiling
+=========
+
+The scripts we have provided include ways for you to launch with YourKit
+profiling enabled.
+
+To deploy YourKit onto a number of machines:
+
+  hosts="..." scripts/hw.bash setup-yjp $path_to_yjp
+
+where the path points to the [YourKit Linux zip archive] (which is freely
+available and doesn't require any license to use).
+
+Now when using the scripts to run distributed experiments, to profile anything
+with YourKit, prefix the command with `use_yjp=true`.  E.g.:
+
+  use_yjp=true scripts/hw.bash start-regions regions.cfg
+
+Now you may start on your local machine the YourKit GUI and connect to the
+hosts that you're interested in.
+
+Note that you may want to disable the default set of filters in YourKit.
+
+[YourKit Linux zip archive]: http://www.yourkit.com/download/yjp-8.0.15.zip
+
+Pseudocode
+==========
+
+This summarizes the control flow through the system.
+
+  publishhandler
+    topicmgr.getowner
+      (maybe) claim the topic, calling back into persmgr.acquiredtopic
+        read /hedwig/standalone/topics/TOPIC (which should initially be empty)
+        for each line, parse as "STARTSEQ\tLEDGERID" # TODO how is this written?
+          ledger = bk.openledger(ledgerid)
+          lastid = ledger.getlast
+          if lastid > 0, lrs[startseq] = persmgr.ledger2lr[ledgerid] = new LedgerRange(ledger, ledgerid, startseq, startseq + lastid # TODO what are ledger ranges?
+        create new ledger for topic
+          # TODO read
+          lr = new LedgerRange(ledger, ledgerid, lastid, -1)
+          lrs[lastid] = lr
+          persmgr.topic2ranges[topic] = lrs
+    add region info to pub req and send that to persmgr.persistmessage
+      entryid = persmgr.topic2ranges[topic].last.ledger.addentry(the pub'd data)
+      update persmgr.topic2lastseq[topic]:
+        .local = persmgr.ledger2lr[ledger id].startseq + entryid
+        .regions = maxes of orig seq and incoming pub seq
+
+  subscribehandler
+    topicmgr.getowner...
+    delivmgr.startservingsubscription(topic, endpoint, ishubsubscriber)
+      delivmgr.endpoint2sub[endpoint] = new subscriber(lastseq = persmgr.getcurrentseqidfortopic(topic).local)
+      delivmgr.topic2ptr2subs[topic][ptr].add(sub)
+      sub.delivernextmessage
+        sub.curseq = persmgr.getseqidafterskipping(topic, sub.lastseq, skip = 1)
+        msg = persmgr.scansinglemessage(topic, seq = sub.curseq)
+          if persmgr.topic2lastseq[topic].local >= seq
+            lr = persmgr.topic2ranges[topic].floor(seq)
+            return lr.ledger.read(first = last = seq - lr.startseq)
+        if failed, then retry in 1 s
+        endpoint.send(msg)
+        movedeliveryptr
+          delivmgr.topic2ptr2subs[topic][sub.lastseq].remove(sub)
+          delivmgr.topic2ptr2subs[topic][sub.curseq].add(sub)
+        previd = sub.lastseq, sub.lastseq = sub.curseq
+        sub.delivernextmessage...
+
+ReadAhead Cache
+================
+
+The delivery manager class is responsible for pushing published messages from 
+the hubs to the subscribers. The most common case is that all subscribers are 
+connected and either caught up, or close to the tail end of the topic. In this 
+case, we don't want the delivery manager to be polling bookkeeper for any newly 
+arrived messages on the topic; new messages should just be pushed to the 
+delivery manager. However, there is also the uncommon case when a subscriber is 
+behind, and messages must be pulled from Bookkeeper.
+
+Since all publishes go through the hub, it is possible to cache the recently 
+published messages in the hub, and then the delivery manager won't have to make 
+the trip to bookkeeper to get the messages but instead get them from local 
+process memory.
+
+These ideas of push, pull, and caching are unified in the following way:
+- A hub has a cache of messages
+
+- When the delivery manager wants to deliver a message, it asks the cache for 
+  it. There are 3 cases:
+  - The message is available in the cache, in which case it is given to the 
+    delivery manager
+  - The message is not present in the cache and the seq-id of the message is 
+    beyond the last message published on that topic (this happens if the 
+    subscriber is totally caught up for that topic). In this case, a stub is put 
+    in the cache in order to notify the delivery manager when that message does 
+    happen to be published.
+  - The message is not in the cache but has been published to the topic. In this 
+    case, a stub is put in the cache, and a read is issued to bookkeeper.
+
+- Whenever a message is published, it is cached. If there is a stub already in 
+  the cache for that message, the delivery manager is notified. 
+
+- Whenever a message is read from bookkeeper, it is cached. There must be a stub 
+  for that message (since reads to bookkeeper are issued only after putting a 
+  stub), so the delivery manager is notified. 
+
+- The cache does readahead, i.e., if a message requested by the delivery manager 
+  is not in the cache, a stub is established not only for that message, but also 
+  for the next n messages where n is configurable (default 10). On a cache hit, 
+  we look ahead n/2 messages, and if that message is not present, we establish 
+  another n/2 stubs. In short, we always ensure that the next n stubs are always 
+  established.
+
+- Over time, the cache will grow in size. There are 2 pruning mechanisms:
+  
+  - Once all subscribers have consumed up to a particular seq-id, they notify 
+    the cache, and all messages up to that seq-id are pruned from the cache.
+  - If the above pruning is not working (e.g., because some subscribers are 
+    down), the cache will eventually hit its size limit which is configurable  
+    (default, half of maximum jvm heap size). At this point, messages are just 
+    pruned in FIFO order. We use the size of the blobs in the message for 
+    estimating the cache size. The assumption is that that size will dominate 
+    over fixed, object-level size overheads. 
+  - Stubs are not purged because according to the above simplification, they are 
+    of 0 size.
+
+Scalability Bottlenecks Down the Road
+=====================================
+
+- Currently each topic subscription is served on a different channel. The number 
+  of channels will become a bottleneck at higher channels. We should switch to 
+  an architecture, where multiple topic subscriptions between the same client, 
+  hub pair should be served on the same channel. We can have commands to start, 
+  stop subscriptions sent all the way to the server (right now these are local).
+- Publishes for a topic are serialized through a hub, to get ordering 
+  guarantees. Currently, all subscriptions to that topic are served from the 
+  same hub. If we start having large number of subscribers to heavy-volume 
+  topics, the outbound bandwidth at the hub, or the CPU at that hub might become 
+  the bottleneck. In that case, we can setup other regions through which the 
+  messages are routed (this hierarchical scheme) reduces bandwidth requirements 
+  at any single node. It should be possible to do this entirely through 
+  configuration.
+

+ 17 - 0
src/contrib/hedwig/doc/doc.txt

@@ -0,0 +1,17 @@
+% Meta-Documentation
+
+In the documentation directory, you'll find:
+
+- `build.txt`: Building Hedwig, or how to set up Hedwig
+- `user.txt`: User's Guide, or how to program against the Hedwig API and how to
+  run it
+- `dev.txt`: Developer's Guide, or Hedwig internals and hacking details
+
+These documents are all written in the [Pandoc] dialect of [Markdown].  This
+makes them readable as plain text files, but also capable of generating HTML or
+LaTeX documentation.
+
+[Pandoc]: http://johnmacfarlane.net/pandoc/
+[Markdown]: http://daringfireball.net/projects/markdown/
+
+Documents are wrapped at 80 chars and use 2-space indentation.

+ 252 - 0
src/contrib/hedwig/doc/user.txt

@@ -0,0 +1,252 @@
+% User's Guide
+% Yang Zhang
+
+Design
+======
+
+In Hedwig, clients publish messages associated with a topic, and they subscribe
+to a topic to receive all messages published with that topic.  Clients are
+associated with (publish to and subscribe from) a Hedwig _instance_ (also
+referred to as a _region_), which consists of a number of servers called
+_hubs_.  The hubs partition up topic ownership among themselves, and all
+publishes and subscribes to a topic must be done to its owning hub.  When a
+client doesn't know the owning hub, it tries a default hub, which may redirect
+the client.
+
+Running a Hedwig instance requires a Zookeeper server and at least three
+Bookkeeper servers.
+
+An instance is designed to run within a datacenter.  For wide-area messaging
+across datacenters, specify in the server configuration the set of default
+servers for each of the other instances.  Dissemination among instances
+currently takes place over an all-to-all topology.  Local subscriptions cause
+the hub to subscribe to all other regions on this topic, so that the local
+region receives all updates to it.  Future work includes allowing the user to
+overlay alternative topologies.
+
+Because all messages on a topic go through a single hub per region, all
+messages within a region are ordered.  This means that, for a given topic,
+messages are delivered in the same order to all subscribers within a region,
+and messages from any particular region are delivered in the same order to all
+subscribers globally, but messages from different regions may be delivered in
+different orders to different regions.  Providing global ordering is
+prohibitively expensive in the wide area.  However, in Hedwig clients such as
+PNUTS, the lack of global ordering is not a problem, as PNUTS serializes all
+updates to a table row at a single designated master for that row.
+
+Topics are independent; Hedwig provides no ordering across different topics.
+
+Version vectors are associated with each topic and serve as the identifiers for
+each message.  Vectors consist of one component per region.  A component value
+is the region's local sequence number on the topic, and is incremented each
+time a hub persists a message (published either locally or remotely) to BK.
+
+TODO: More on how version vectors are to be used, and on maintaining
+vector-maxes.
+
+Entry Points
+============
+
+The main class for running the server is
+`org.apache.hedwig.server.netty.PubSubServer`.  It takes a single argument,
+which is a [Commons Configuration] file.  Currently, for configuration, the
+source is the documentation.  See
+`org.apache.hedwig.server.conf.ServerConfiguration` for server configuration
+parameters.
+
+The client is a library intended to be consumed by user applications.  It takes
+a Commons Configuration object, for which the source/documentation is in
+`org.apache.hedwig.client.conf.ClientConfiguration`.
+
+We have provided a simple client application, `org.apache.hedwig.client.App`,
+that can drive a number of benchmarks.  This also takes a single configuration
+file argument, which is fed to the client library.
+
+We've provided a number of scripts to faciliate running servers and clients
+in a variety of configurations, including over distributed hosts.  These are
+all consolidated in `scripts/hw.bash`.  Although the snippets in this
+documentation run the script from the hedwig main directory, you can run it
+from any location.  Apologies in advance for these being bash scripts; time
+permitting, a more robust and maintainable support/tooling infrastructure would
+be ideal.
+
+[Commons Configuration]: http://commons.apache.org/configuration/
+
+Deployment
+==========
+
+When ssh-ing into a new host, you are requested to verify and accept the host
+key.  In order to automatically accept the host keys for many new hosts
+(dangerous), use:
+
+  hosts="$host1 $host2 ..." scripts/hw.bash warmup
+
+The `hosts` variable is set here to the list of hosts that you would like to
+warm up.
+
+To set up JDK6 on some hosts, use:
+
+  hosts="..." scripts/hw.bash setup-java $path_to_modified_jdk6
+
+The argument must point to a JDK6 binary self-extracting executable, but with
+the `more` command that displays the License agreement replaced with
+`cat`.  Unfortunately, this step must be performed manually.  This script will
+extract the JDK directly into the home directory and update `$PATH` in
+`~/.bashrc` (in an idempotent fashion).
+
+Because the current implementation uses a single socket per subscription, the
+Hedwig launching scripts all require a high `ulimit` on the number of open file
+descriptors.  Non-root users can only use up to the limit specified in
+`/etc/security/limits.conf`; to raise this to 1024^2, run:
+
+  hosts="..." scripts/hw.bash setup-limits
+
+This uses `ssh` so that you need to enter your password for `sudo` just
+once.
+
+For most of the commands presented in the next section, you may prefix the
+command with:
+
+  push_jar=true ...
+
+to first push the assembly jar (assumed to be available in `server/target/`) to
+all hosts.
+
+Running Servers
+===============
+
+To start three BK bookies on ports 3181-3183 on localhost (directories must all
+exist):
+
+  scripts/hw.bash bk 3181 $bk1_journal_dir $bk1_ledger_dir &
+  scripts/hw.bash bk 3182 $bk2_journal_dir $bk2_ledger_dir &
+  scripts/hw.bash bk 3183 $bk3_journal_dir $bk3_ledger_dir &
+
+To start a ZK on port 2181 (directory must exist):
+
+  scripts/hw.bash zk 2181 /path/for/zk/ &
+
+To register the BKs with the ZK (so that Hedwig knows where to find the
+bookies):
+
+  scripts/hw.bash setup-bk localhost:2181 `hostname`:3181 `hostname`:3182 `hostname`:3183
+
+Everything up to this point may be done using a single command over a set of
+hosts, with ZK on port 9877 and BK on port 9878. The following function takes
+2 arguments.  The first is the ZK host.  The second is a string list of BK hosts:
+
+  scripts/hw.bash start-zk-bks $zkhost "$bk1host $bk2host $bk3host ..."
+
+Note that the hosts may be SSH profile aliases in your `~/.ssh/config`; the
+script will parse this file and look up their hostnames where necessary.  This
+applies for the hosts specified in the other commands.
+
+Also, the scripts use the `bk-journal` and `bk-ledger` functions in `hw.bash`
+to determine where to place the BK journal and ledger, given a hostname.
+
+To start a Hedwig server locally:
+
+  scripts/hw.bash hw server.conf &
+
+To start Hedwig servers on some hosts "$hw1host $hw2host $hw3host ..." on port 9876, 
+using $zkhost as the ZK server:
+
+  scripts/hw.bash start-hw '' "$hw1host $hw2host $hw3host ..." $zkhost
+
+Above, the first empty string argument is the list of default servers to each 
+of the other regions.  You may run multiple connected instances of Hedwig this way.  
+E.g., to start three regions each with a single Hedwig hub that talk to each other,
+and using the hw.bash default server ports of 9875 (non-SSL) and 9876 (SSL):
+
+  scripts/hw.bash start-hw "$hw2host:9875:9876 $hw3host:9875:9876" "$hw1host" $zk1host
+  scripts/hw.bash start-hw "$hw1host:9875:9876 $hw3host:9875:9876" "$hw2host" $zk2host
+  scripts/hw.bash start-hw "$hw1host:9875:9876 $hw2host:9875:9876" "$hw3host" $zk3host
+
+Everything up to this point may be done using a single command over a set of
+hosts:
+
+  scripts/hw.bash start-region '' "$hw1host $hw2host $hw3host ..." $zkhost "$bk1host $bk2host $bk3host ..."
+
+The first three arguments are the same as for `start-hw`.
+
+You may start multiple regions as well:
+
+  scripts/hw.bash start-regions regions.cfg
+
+"regions.cfg" is a list of all regions, one per line, with each region having 
+the following format:
+
+  region=<Region name>, hub=<list of hub servers>, default=<single hub server>, zk=<single ZK server>, bk=<list of BK servers>
+
+This will create all of the regions with an all-to-all topology. Each region 
+is connected to the default hub server of every other region. The ", " delimiter
+is used to separate out the different parts of a region along with the hard-coded 
+parameter names. There also needs to be a newline after the last region line.
+Here is an example file specifying three regions:
+
+  region=wilbur, hub=wilbur90 wilbur91, default=wilbur90, zk=wilbur93, bk=wilbur93 wilbur94 wilbur95
+  region=re1, hub=sherpa7 sherpa8, default=sherpa7, zk=sherpa9, bk=sherpa9 sherpa10 sherpa11
+  region=peanuts, hub=peanuts1 peanuts2, default=peanuts2, zk=peanuts3, bk=peanuts3 peanuts4 peanuts5
+
+Running the Client
+==================
+
+To run the test client:
+
+  JAVAFLAGS="..." scripts/hw.bash hwc $conf_path
+
+where `$conf_path` is a client configuration file.
+
+To run the test client on some other hosts:
+
+  hosts="..." JAVAFLAGS="..." scripts/hw.bash app $hwhost
+
+This will generate a simple configuration file assuming $hwhost is listening on
+the default SSL and non-SSL ports which are specified as global variables in hw.bash.
+Currently these are 9875 for non-SSL and 9876 for SSL.
+
+Client usage is currently documented in the source.  To run a subscription
+benchmark, set `JAVAFLAGS` to:
+
+  -Dmode=sub -Dcount=10000 -Dnpar=100 -Dstart=5 -Dnwarmups=30
+
+This will first create 30 warm-up subscriptions to topics "warmup-5" through
+"warmup-34", then 10,000 benchmarked subscriptions to topics "topic-5" through
+"topic-10,004".  It will have a pipeline depth of 100 requests, meaning that
+there will be at most 100 outstanding (unresponded) messages in flight at any
+moment.
+
+To run a publishing benchmark, set `JAVAFLAGS` to:
+
+  -Dmode=pub -Dcount=10000 -Dnpar=100 -Dstart=5
+
+This will publish 10,000 messages to topic "topic-5", with a pipeline depth of
+100 requests.
+
+At the end, the programs will print throughput and latency information.
+
+Utilities
+=========
+
+To kill all the user's Java processes running on some machines, use:
+
+  hosts="..." scripts/hw.bash dkill
+
+To check if any processes are running and are using ports of interest (esp.
+9876-9878):
+
+  hosts="..." scripts/hw.bash dstatus
+
+Add an argument to `dstatus` (may be anything) to get a more detailed listing.
+
+To check if there's anything consuming the CPU on some machines:
+
+  hosts="..." scripts/hw.bash tops
+
+To run an arbitrary command on multiple hosts in parallel:
+
+  hosts="..." scripts/hw.bash parssh $command
+
+To do this in sequence:
+
+  hosts="..." xargs= scripts/hw.bash parssh $command

+ 286 - 0
src/contrib/hedwig/formatter.xml

@@ -0,0 +1,286 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<profiles version="11">
+<profile kind="CodeFormatterProfile" name="Hedwig" version="11">
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.source" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.lineSplit" value="120"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.indentation.size" value="4"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.assertIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="space"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.enumIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="4"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.compiler.compliance" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode" value="enabled"/>
+<setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.targetPlatform" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
+</profile>
+</profiles>

+ 68 - 0
src/contrib/hedwig/pom.xml

@@ -0,0 +1,68 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hedwig</groupId>
+  <version>1.0-SNAPSHOT</version>
+  <artifactId>hedwig</artifactId>
+  <packaging>pom</packaging>
+  <name>hedwig</name>
+  <modules>
+    <module>client</module>
+    <module>server</module>
+    <module>protocol</module>
+  </modules>
+  <repositories>
+  </repositories>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <configuration>
+          <source>1.6</source>
+          <target>1.6</target>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+  <reporting>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jxr-plugin</artifactId>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <version>2.1</version>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-pmd-plugin</artifactId>
+        <version>2.3</version>
+        <configuration>
+          <linkXref>true</linkXref>
+          <targetJdk>1.6</targetJdk>
+        </configuration>
+      </plugin>
+    </plugins>
+  </reporting>
+</project>

+ 26 - 0
src/contrib/hedwig/protocol/Makefile

@@ -0,0 +1,26 @@
+TARGET_DIR = target/generated-sources/cpp
+PROTO_DIR = src/main/protobuf
+
+INCLUDES = -I$(TARGET_DIR) 
+
+CXX = g++
+CXXFLAGS = -g $(INCLUDES)
+
+#-----File Dependencies----------------------
+PROTO = PubSubProtocol.proto
+SRC = $(TARGET_DIR)/$/$(PROTO_DIR)/$(addsuffix .pb.cc, $(basename $(PROTO)))
+OBJ = $(addsuffix .o, $(basename $(SRC)))
+
+all: $(OBJ)
+
+$(SRC):
+	mkdir -p $(TARGET_DIR); protoc --cpp_out=$(TARGET_DIR) $(PROTO_DIR)/$(PROTO)
+
+$(OBJ): $(SRC)
+	$(CXX) $(CXXFLAGS) -c $< -o $(OBJ)
+
+
+clean:
+	rm -rf $(TARGET_DIR)
+
+

+ 77 - 0
src/contrib/hedwig/protocol/pom.xml

@@ -0,0 +1,77 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hedwig</groupId>
+    <artifactId>hedwig</artifactId>
+    <version>1.0-SNAPSHOT</version>
+  </parent>
+  <groupId>org.apache.hedwig</groupId>
+  <artifactId>protocol</artifactId>
+  <packaging>jar</packaging>
+  <version>1.0-SNAPSHOT</version>
+  <name>protocol</name>
+  <url>http://maven.apache.org</url>
+  <dependencies>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <version>2.3.0</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <repositories>
+  </repositories>
+  <build>
+    <defaultGoal>install</defaultGoal>
+    <plugins>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>generate-sources</id>
+            <phase>generate-sources</phase>
+            <configuration>
+              <tasks>
+                <mkdir dir="target/generated-sources/java" />
+                <mkdir dir="target/generated-sources/cpp" />
+                <exec executable="protoc" failonerror="true">
+                  <arg value="--java_out=target/generated-sources/java" />
+                  <arg value="--cpp_out=target/generated-sources/cpp" />
+                  <arg value="src/main/protobuf/PubSubProtocol.proto" />
+                </exec>
+              </tasks>
+              <sourceRoot>target/generated-sources/java</sourceRoot>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

+ 162 - 0
src/contrib/hedwig/protocol/src/main/java/org/apache/hedwig/exceptions/PubSubException.java

@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.exceptions;
+
+import java.util.Collection;
+
+import org.apache.hedwig.protocol.PubSubProtocol.StatusCode;
+
+@SuppressWarnings("serial")
+public abstract class PubSubException extends Exception {
+    protected StatusCode code;
+
+    protected PubSubException(StatusCode code, String msg) {
+        super(msg);
+        this.code = code;
+    }
+
+    protected PubSubException(StatusCode code, Exception e) {
+        super(e);
+        this.code = code;
+    }
+
+    public static PubSubException create(StatusCode code, String msg) {
+        if (code == StatusCode.CLIENT_ALREADY_SUBSCRIBED) {
+            return new ClientAlreadySubscribedException(msg);
+        } else if (code == StatusCode.CLIENT_NOT_SUBSCRIBED) {
+            return new ClientNotSubscribedException(msg);
+        } else if (code == StatusCode.MALFORMED_REQUEST) {
+            return new MalformedRequestException(msg);
+        } else if (code == StatusCode.NO_SUCH_TOPIC) {
+            return new NoSuchTopicException(msg);
+        } else if (code == StatusCode.NOT_RESPONSIBLE_FOR_TOPIC) {
+            return new ServerNotResponsibleForTopicException(msg);
+        } else if (code == StatusCode.SERVICE_DOWN) {
+            return new ServiceDownException(msg);
+        } else if (code == StatusCode.COULD_NOT_CONNECT) {
+            return new CouldNotConnectException(msg);
+        }
+        /*
+         * Insert new ones here
+         */
+        else if (code == StatusCode.UNCERTAIN_STATE) {
+            return new UncertainStateException(msg);
+        }
+        // Finally the catch all exception (for unexpected error conditions)
+        else {
+            return new UnexpectedConditionException("Unknow status code:" + code.getNumber() + ", msg: " + msg);
+        }
+    }
+
+    public StatusCode getCode() {
+        return code;
+    }
+
+    public static class ClientAlreadySubscribedException extends PubSubException {
+        public ClientAlreadySubscribedException(String msg) {
+            super(StatusCode.CLIENT_ALREADY_SUBSCRIBED, msg);
+        }
+    }
+
+    public static class ClientNotSubscribedException extends PubSubException {
+        public ClientNotSubscribedException(String msg) {
+            super(StatusCode.CLIENT_NOT_SUBSCRIBED, msg);
+        }
+    }
+
+    public static class MalformedRequestException extends PubSubException {
+        public MalformedRequestException(String msg) {
+            super(StatusCode.MALFORMED_REQUEST, msg);
+        }
+    }
+
+    public static class NoSuchTopicException extends PubSubException {
+        public NoSuchTopicException(String msg) {
+            super(StatusCode.NO_SUCH_TOPIC, msg);
+        }
+    }
+
+    public static class ServerNotResponsibleForTopicException extends PubSubException {
+        // Note the exception message serves as the name of the responsible host
+        public ServerNotResponsibleForTopicException(String responsibleHost) {
+            super(StatusCode.NOT_RESPONSIBLE_FOR_TOPIC, responsibleHost);
+        }
+    }
+
+    public static class TopicBusyException extends PubSubException {
+        public TopicBusyException(String msg) {
+            super(StatusCode.TOPIC_BUSY, msg);
+        }
+    }
+
+    public static class ServiceDownException extends PubSubException {
+        public ServiceDownException(String msg) {
+            super(StatusCode.SERVICE_DOWN, msg);
+        }
+
+        public ServiceDownException(Exception e) {
+            super(StatusCode.SERVICE_DOWN, e);
+        }
+    }
+
+    public static class CouldNotConnectException extends PubSubException {
+        public CouldNotConnectException(String msg) {
+            super(StatusCode.COULD_NOT_CONNECT, msg);
+        }
+    }
+
+    /*
+     * Insert new ones here
+     */
+    public static class UncertainStateException extends PubSubException {
+        public UncertainStateException(String msg) {
+            super(StatusCode.UNCERTAIN_STATE, msg);
+        }
+    }
+
+    // The catch all exception (for unexpected error conditions)
+    public static class UnexpectedConditionException extends PubSubException {
+        public UnexpectedConditionException(String msg) {
+            super(StatusCode.UNEXPECTED_CONDITION, msg);
+        }
+    }
+    
+    // The composite exception (for concurrent operations).
+    public static class CompositeException extends PubSubException {
+        private final Collection<PubSubException> exceptions;
+        public CompositeException(Collection<PubSubException> exceptions) {
+            super(StatusCode.COMPOSITE, "composite exception");
+            this.exceptions = exceptions;
+        }
+        public Collection<PubSubException> getExceptions() {
+            return exceptions;
+        }
+        @Override
+        public String toString() {
+            StringBuilder builder = new StringBuilder();
+            builder.append(super.toString()).append('\n');
+            for (PubSubException exception : exceptions)
+                builder.append(exception).append('\n');
+            return builder.toString();
+        }
+    }
+
+    public static class ClientNotSubscribedRuntimeException extends RuntimeException {
+    }
+
+}

+ 153 - 0
src/contrib/hedwig/protocol/src/main/java/org/apache/hedwig/protoextensions/MessageIdUtils.java

@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hedwig.protoextensions;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.protobuf.ByteString;
+import org.apache.hedwig.protocol.PubSubProtocol.Message;
+import org.apache.hedwig.exceptions.PubSubException.UnexpectedConditionException;
+import org.apache.hedwig.protocol.PubSubProtocol.MessageSeqId;
+import org.apache.hedwig.protocol.PubSubProtocol.RegionSpecificSeqId;
+
+public class MessageIdUtils {
+
+    public static String msgIdToReadableString(MessageSeqId seqId) {
+        StringBuilder sb = new StringBuilder();
+        sb.append("local:");
+        sb.append(seqId.getLocalComponent());
+
+        String separator = ";";
+        for (RegionSpecificSeqId regionId : seqId.getRemoteComponentsList()) {
+            sb.append(separator);
+            sb.append(regionId.getRegion().toStringUtf8());
+            sb.append(':');
+            sb.append(regionId.getSeqId());
+        }
+        return sb.toString();
+    }
+
+    public static Map<ByteString, RegionSpecificSeqId> inMapForm(MessageSeqId msi) {
+        Map<ByteString, RegionSpecificSeqId> map = new HashMap<ByteString, RegionSpecificSeqId>();
+
+        for (RegionSpecificSeqId lmsid : msi.getRemoteComponentsList()) {
+            map.put(lmsid.getRegion(), lmsid);
+        }
+
+        return map;
+    }
+
+    public static boolean areEqual(MessageSeqId m1, MessageSeqId m2) {
+
+        if (m1.getLocalComponent() != m2.getLocalComponent()) {
+            return false;
+        }
+
+        if (m1.getRemoteComponentsCount() != m2.getRemoteComponentsCount()) {
+            return false;
+        }
+
+        Map<ByteString, RegionSpecificSeqId> m2map = inMapForm(m2);
+
+        for (RegionSpecificSeqId lmsid1 : m1.getRemoteComponentsList()) {
+            RegionSpecificSeqId lmsid2 = m2map.get(lmsid1.getRegion());
+            if (lmsid2 == null) {
+                return false;
+            }
+            if (lmsid1.getSeqId() != lmsid2.getSeqId()) {
+                return false;
+            }
+        }
+
+        return true;
+
+    }
+
+    public static Message mergeLocalSeqId(Message.Builder messageBuilder, long localSeqId) {
+        MessageSeqId.Builder msidBuilder = MessageSeqId.newBuilder(messageBuilder.getMsgId());
+        msidBuilder.setLocalComponent(localSeqId);
+        messageBuilder.setMsgId(msidBuilder);
+        return messageBuilder.build();
+    }
+
+    public static Message mergeLocalSeqId(Message orginalMessage, long localSeqId) {
+        return mergeLocalSeqId(Message.newBuilder(orginalMessage), localSeqId);
+    }
+
+    /**
+     * Compares two seq numbers represented as lists of longs.
+     * 
+     * @param l1
+     * @param l2
+     * @return 1 if the l1 is greater, 0 if they are equal, -1 if l2 is greater
+     * @throws UnexpectedConditionException
+     *             If the lists are of unequal length
+     */
+    public static int compare(List<Long> l1, List<Long> l2) throws UnexpectedConditionException {
+        if (l1.size() != l2.size()) {
+            throw new UnexpectedConditionException("Seq-ids being compared have different sizes: " + l1.size()
+                    + " and " + l2.size());
+        }
+
+        for (int i = 0; i < l1.size(); i++) {
+            long v1 = l1.get(i);
+            long v2 = l2.get(i);
+
+            if (v1 == v2) {
+                continue;
+            }
+
+            return v1 > v2 ? 1 : -1;
+        }
+
+        // All components equal
+        return 0;
+    }
+
+    /**
+     * Returns the element-wise vector maximum of the two vectors id1 and id2,
+     * if we imagine them to be sparse representations of vectors.
+     */
+    public static void takeRegionMaximum(MessageSeqId.Builder newIdBuilder, MessageSeqId id1, MessageSeqId id2) {
+        Map<ByteString, RegionSpecificSeqId> id2Map = MessageIdUtils.inMapForm(id2);
+
+        for (RegionSpecificSeqId rrsid1 : id1.getRemoteComponentsList()) {
+            ByteString region = rrsid1.getRegion();
+
+            RegionSpecificSeqId rssid2 = id2Map.get(region);
+
+            if (rssid2 == null) {
+                newIdBuilder.addRemoteComponents(rrsid1);
+                continue;
+            }
+
+            newIdBuilder.addRemoteComponents((rrsid1.getSeqId() > rssid2.getSeqId()) ? rrsid1 : rssid2);
+
+            // remove from map
+            id2Map.remove(region);
+        }
+
+        // now take the remaining components in the map and add them
+        for (RegionSpecificSeqId rssid2 : id2Map.values()) {
+            newIdBuilder.addRemoteComponents(rssid2);
+        }
+
+    }
+}

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio