瀏覽代碼

ZOOKEEPER-1038. Move bookkeeper and hedwig code in subversion

git-svn-id: https://svn.apache.org/repos/asf/zookeeper/trunk@1087133 13f79535-47bb-0310-9956-ffa450edef68
Benjamin Reed 14 年之前
父節點
當前提交
7faeaa12f4
共有 100 個文件被更改,包括 0 次插入18244 次删除
  1. 0 62
      src/contrib/bookkeeper/README.txt
  2. 0 137
      src/contrib/bookkeeper/benchmark/org/apache/bookkeeper/benchmark/MySqlClient.java
  3. 0 252
      src/contrib/bookkeeper/benchmark/org/apache/bookkeeper/benchmark/TestClient.java
  4. 0 72
      src/contrib/bookkeeper/conf/log4j.properties
  5. 0 545
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/Bookie.java
  6. 0 81
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/BookieException.java
  7. 0 168
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/BufferedChannel.java
  8. 0 487
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/EntryLogger.java
  9. 0 124
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/FileInfo.java
  10. 0 536
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerCache.java
  11. 0 133
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerDescriptor.java
  12. 0 151
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerEntryPage.java
  13. 0 147
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/MarkerFileChannel.java
  14. 0 126
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/AsyncCallback.java
  15. 0 249
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BKException.java
  16. 0 410
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BookKeeper.java
  17. 0 204
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BookieWatcher.java
  18. 0 50
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/CRC32DigestManager.java
  19. 0 184
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/DigestManager.java
  20. 0 61
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/DistributionSchedule.java
  21. 0 167
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerCreateOp.java
  22. 0 80
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerDeleteOp.java
  23. 0 83
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerEntry.java
  24. 0 547
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerHandle.java
  25. 0 198
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerMetadata.java
  26. 0 140
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerOpenOp.java
  27. 0 178
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerRecoveryOp.java
  28. 0 67
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/MacDigestManager.java
  29. 0 138
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/PendingAddOp.java
  30. 0 170
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/PendingReadOp.java
  31. 0 87
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/RoundRobinDistributionSchedule.java
  32. 0 85
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/SyncCounter.java
  33. 0 178
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieClient.java
  34. 0 75
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieProtocol.java
  35. 0 209
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieServer.java
  36. 0 57
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java
  37. 0 521
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/NIOServerFactory.java
  38. 0 573
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
  39. 0 148
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/ServerStats.java
  40. 0 173
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/streaming/LedgerInputStream.java
  41. 0 147
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
  42. 0 763
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/tools/BookKeeperTools.java
  43. 0 209
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/LocalBookKeeper.java
  44. 0 54
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/Main.java
  45. 0 38
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/MathUtils.java
  46. 0 98
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/OrderedSafeExecutor.java
  47. 0 38
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/SafeRunnable.java
  48. 0 94
      src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/StringUtils.java
  49. 0 256
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/AsyncLedgerOpsTest.java
  50. 0 176
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BaseTestCase.java
  51. 0 232
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieClientTest.java
  52. 0 305
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieFailureTest.java
  53. 0 720
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieReadWriteTest.java
  54. 0 400
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieRecoveryTest.java
  55. 0 74
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/CloseTest.java
  56. 0 178
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/ConcurrentLedgerTest.java
  57. 0 163
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LedgerDeleteTest.java
  58. 0 88
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LedgerRecoveryTest.java
  59. 0 117
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LoopbackClient.java
  60. 0 60
      src/contrib/bookkeeper/test/org/apache/bookkeeper/test/NIOServerFactoryTest.java
  61. 0 202
      src/contrib/hedwig/LICENSE.txt
  62. 0 2
      src/contrib/hedwig/NOTICE.txt
  63. 0 3
      src/contrib/hedwig/README
  64. 0 73
      src/contrib/hedwig/client/pom.xml
  65. 0 29
      src/contrib/hedwig/client/src/main/cpp/Makefile.am
  66. 0 186
      src/contrib/hedwig/client/src/main/cpp/aminclude.am
  67. 0 1252
      src/contrib/hedwig/client/src/main/cpp/c-doc.Doxyfile
  68. 0 56
      src/contrib/hedwig/client/src/main/cpp/config.h.in
  69. 0 40
      src/contrib/hedwig/client/src/main/cpp/configure.ac
  70. 0 30
      src/contrib/hedwig/client/src/main/cpp/hedwig-0.1.pc.in
  71. 0 45
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/callback.h
  72. 0 80
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/client.h
  73. 0 51
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/exceptions.h
  74. 0 61
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/publish.h
  75. 0 52
      src/contrib/hedwig/client/src/main/cpp/inc/hedwig/subscribe.h
  76. 0 32
      src/contrib/hedwig/client/src/main/cpp/lib/Makefile.am
  77. 0 420
      src/contrib/hedwig/client/src/main/cpp/lib/channel.cpp
  78. 0 156
      src/contrib/hedwig/client/src/main/cpp/lib/channel.h
  79. 0 57
      src/contrib/hedwig/client/src/main/cpp/lib/client.cpp
  80. 0 376
      src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.cpp
  81. 0 150
      src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.h
  82. 0 166
      src/contrib/hedwig/client/src/main/cpp/lib/data.cpp
  83. 0 99
      src/contrib/hedwig/client/src/main/cpp/lib/data.h
  84. 0 72
      src/contrib/hedwig/client/src/main/cpp/lib/eventdispatcher.cpp
  85. 0 44
      src/contrib/hedwig/client/src/main/cpp/lib/eventdispatcher.h
  86. 0 27
      src/contrib/hedwig/client/src/main/cpp/lib/exceptions.cpp
  87. 0 83
      src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.cpp
  88. 0 54
      src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.h
  89. 0 434
      src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.cpp
  90. 0 166
      src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.h
  91. 0 141
      src/contrib/hedwig/client/src/main/cpp/lib/util.cpp
  92. 0 86
      src/contrib/hedwig/client/src/main/cpp/lib/util.h
  93. 0 49
      src/contrib/hedwig/client/src/main/cpp/log4cpp.conf
  94. 0 111
      src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_asio.m4
  95. 0 252
      src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_base.m4
  96. 0 149
      src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_thread.m4
  97. 0 533
      src/contrib/hedwig/client/src/main/cpp/m4/ax_doxygen.m4
  98. 0 49
      src/contrib/hedwig/client/src/main/cpp/scripts/log4cxx.conf
  99. 0 64
      src/contrib/hedwig/client/src/main/cpp/scripts/network-delays.sh
  100. 0 49
      src/contrib/hedwig/client/src/main/cpp/scripts/server-control.sh

+ 0 - 62
src/contrib/bookkeeper/README.txt

@@ -1,62 +0,0 @@
-BookKeeper README
-
-1- Overview
-BookKeeper is a highly available logging service. As many critical services rely upon write-ahead logs to provide persistence along with high performance, an alternative to make such a service highly available despite the failures of individual servers it to offload write-ahead logs to an external service. 
-
-This is exactly what BookKeeper provides. With BookKeeper, a service (or application) writes to a set of servers dedicated to storing such logs. An example of such an application is the Namenode of the Hadoop Distributed File System. 
-
-The main components of BookKeeper are:
-* Client: Applications interact with BookKeeper through the interface of of a BookKeeper client;
-* Ledger: A ledger is our equivalent to a log file. Clients read entries from and write entries to ledgers;  
-* Bookie: Bookies are BookKeeper servers and they store the content of ledgers. Typically there are multiple bookies implementing a ledger.
-
-2- How to compile
-Run "ant" from "trunk/contrib/bookkeeper". This will generate the bookkeeper jar in "trunk/build/contrib/bookkeeper".
-
-3- Setting up
-
-A typical BookKeeper configuration includes a set of bookies and a ZooKeeper ensemble, where the ZooKeeper instance stores metadata for BookKeeper. As an example of such metadata, BookKeeper clients learn about available bookies by consulting a ZooKeeper service. 
-
-To set up BookKeeper, follow these steps:
-* Once bookies and ZooKeeper servers are running, create two znodes: "/ledgers" and "/ledgers/available". 
-* To run a bookie, run the java class "org.apache.bookkeeper.proto.BookieServer". It takes 3 parameters: a port, one directory path for transaction logs, and one directory path for indexes and data. Here is an example: java -cp .:bookkeeper.jar:../ZooKeeper/zookeeper-<version>.jar:/usr/local/apache-log4j-1.2.15/log4j-1.2.15.jar -Dlog4j.configuration=log4j.properties org.apache.bookkeeper.proto.BookieServer 3181 /disk1/bk/ /disk2/bk/
-* For each bookie b, if <host> is the host name of b and <port> is the bookie port, then create a znode "/ledgers/available/<host>:<port>".
-* It is ready to run! 
-
-For test purposes, there is a class named "org.apache.bookkeeper.util.LocalBookkeeper" which runs a custom number on BookKeeper servers, along with a ZooKeeper server, on a single node. A typical invocation would be: 
-java -cp:<classpath> org.apache.bookkeeper.util.LocalBookKeeper <number-of-bookies>
-
-4- Developing applications
-
-BookKeeper is written in Java. When implementing an application that uses BookKeeper, follow these steps:
-
-a. Instantiate a BookKeeper object. The single parameter to the BookKeeper constructor is a list of ZooKeeper servers;
-b. Once we have a BookKeeper object, we can create a ledger with createLedger. The default call to createLedger takes a single parameter, which is supposed to be for password authentication, but currently it has no effect. A call to createLedger returns a ledger handle (type LedgerHandle);
-c. Once we have a ledger, we can write to the ledger by calling either addEntry or asyncAddEntry. The first call is synchronous, whereas the second call is asynchronous, and both write byte arrays as entries. To use the asynchronous version, the application has to implement the AddCallback interface;
-d. Ideally, once the application finishes writing to the ledger, it should close it by calling close on the ledger handle. If it doesn't then BookKeeper will try to recover the ledger when a client tries to open it. By closing the ledger properly, we avoid this recovery step, which is recommended but not mandatory;
-e. Before reading from a ledger, a client has to open it by calling openLedger on a BookKeeper object, and readEntries or asycnReadEntries to read entries. Both read calls take as input two entry numbers, n1 and n2, and return all entries from n1 through n2.   
-
-Here is a simple example of a method that creates a BookKeeper object, creates a ledger, writes an entry to the ledger, and closes it:
-
-BookKeeper bk;
-LedgerHandle lh;
-
-public void allInOne(String servers) throws KeeperException, IOException, InterruptedException{
-        bk = new BookKeeper(servers);
-        try{
-          lh = bk.createLedger(new byte[] {'a', 'b'});
-          bk.addEntry(lh, new byte[]{'a', 'b'});
-          bk.close(lh);
-        } catch (BKException e) {
-            e.printStackTrace();
-        }
-    }
-
-5- Selecting quorum mode and number of bookies (advanced)
-
-There are two methods to store ledgers with BookKeeper:
-
-a. Self-verifying: Each entry includes a digest that is used to guarantee that upon a read, the value read is the same as the one written. This mode requires n > 2t bookies, and quorums of size t + 1. By default, a call to createLedger uses this method and 3 servers;
-b. Generic: Entries do not include a digest, and it requires more replicas: n > 3t and quorums of size 2t + 1. 
-
-The quorum mode and number of bookies can be selected through the createLedger method.

+ 0 - 137
src/contrib/bookkeeper/benchmark/org/apache/bookkeeper/benchmark/MySqlClient.java

@@ -1,137 +0,0 @@
-package org.apache.bookkeeper.benchmark;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.FileOutputStream;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.QuorumEngine;
-import org.apache.log4j.Logger;
-
-
-import org.apache.zookeeper.KeeperException;
-
-public class MySqlClient {
-	static Logger LOG = Logger.getLogger(QuorumEngine.class);
-
-	BookKeeper x;
-	LedgerHandle lh;
-	Integer entryId;
-	HashMap<Integer, Integer> map;
-
-	FileOutputStream fStream;
-	FileOutputStream fStreamLocal;
-	long start, lastId;
-	Connection con;
-	Statement stmt;
-	
-	
-	public MySqlClient(String hostport, String user, String pass) 
-			throws ClassNotFoundException {
-		entryId = 0;
-		map = new HashMap<Integer, Integer>();
-		Class.forName("com.mysql.jdbc.Driver");
-		// database is named "bookkeeper"
-		String url = "jdbc:mysql://" + hostport + "/bookkeeper";
-		try {
-			con = DriverManager.getConnection(url, user, pass);
-			stmt = con.createStatement();
-			// drop table and recreate it
-			stmt.execute("DROP TABLE IF EXISTS data;");
-			stmt.execute("create table data(transaction_id bigint PRIMARY KEY AUTO_INCREMENT, content TEXT);");
-			LOG.info("Database initialization terminated");
-		} catch (SQLException e) {
-			
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		}
-	}
-
-	public void closeHandle() throws KeeperException, InterruptedException, SQLException{
-		con.close();
-	}
-	/**
-	 * First parameter is an integer defining the length of the message 
-	 * Second parameter is the number of writes
-	 * Third parameter is host:port 
-	 * Fourth parameter is username
-	 * Fifth parameter is password
-	 * @param args
-	 * @throws ClassNotFoundException 
-	 * @throws SQLException 
-	 */
-	public static void main(String[] args) throws ClassNotFoundException, SQLException {		
-		int lenght = Integer.parseInt(args[1]);
-		StringBuilder sb = new StringBuilder();
-		while(lenght-- > 0){
-			sb.append('a');
-		}
-		try {
-			MySqlClient c = new MySqlClient(args[2], args[3], args[4]);
-			c.writeSameEntryBatch(sb.toString().getBytes(), Integer.parseInt(args[0]));
-			c.writeSameEntry(sb.toString().getBytes(), Integer.parseInt(args[0]));
-			c.closeHandle();
-		} catch (NumberFormatException e) {
-			e.printStackTrace();
-		} catch (InterruptedException e) {
-			e.printStackTrace();
-		} catch (KeeperException e) {
-			e.printStackTrace();
-		} 
-
-	}
-
-	/**	
-	 * 	Adds  data entry to the DB 
-	 * 	@param data 	the entry to be written, given as a byte array 
-	 * 	@param times	the number of times the entry should be written on the DB	*/
-	void writeSameEntryBatch(byte[] data, int times) throws InterruptedException, SQLException{
-		start = System.currentTimeMillis();
-		int count = times;
-		String content = new String(data);
-		System.out.println("Data: " + content + ", " + data.length);
-		while(count-- > 0){
-			stmt.addBatch("insert into data(content) values(\"" + content + "\");");
-		}
-		LOG.info("Finished writing batch SQL command in ms: " + (System.currentTimeMillis() - start));
-		start = System.currentTimeMillis();
-		stmt.executeBatch();
-		System.out.println("Finished " + times + " writes in ms: " + (System.currentTimeMillis() - start));       
-		LOG.info("Ended computation");
-	}
-
-	void writeSameEntry(byte[] data, int times) throws InterruptedException, SQLException{
-		start = System.currentTimeMillis();
-		int count = times;
-		String content = new String(data);
-		System.out.println("Data: " + content + ", " + data.length);
-		while(count-- > 0){
-			stmt.executeUpdate("insert into data(content) values(\"" + content + "\");");
-		}
-		System.out.println("Finished " + times + " writes in ms: " + (System.currentTimeMillis() - start));       
-		LOG.info("Ended computation");
-	}
-
-}

+ 0 - 252
src/contrib/bookkeeper/benchmark/org/apache/bookkeeper/benchmark/TestClient.java

@@ -1,252 +0,0 @@
-package org.apache.bookkeeper.benchmark;
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.Enumeration;
-import java.util.HashMap;
-
-import org.apache.bookkeeper.client.AddCallback;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.QuorumEngine;
-import org.apache.bookkeeper.client.ReadCallback;
-import org.apache.bookkeeper.client.LedgerHandle.QMode;
-import org.apache.log4j.Logger;
-
-import org.apache.zookeeper.KeeperException;
-
-/**
- * This is a simple test program to compare the performance of writing to
- * BookKeeper and to the local file system.
- * 
- */
-
-public class TestClient 
-    implements AddCallback, ReadCallback{
-    private static final Logger LOG = Logger.getLogger(TestClient.class);
-    
-    BookKeeper x;
-    LedgerHandle lh;
-    Integer entryId;
-    HashMap<Integer, Integer> map;
-    
-    FileOutputStream fStream;
-    FileOutputStream fStreamLocal;
-    long start, lastId;
-    
-    public TestClient() {
-        entryId = 0;
-        map = new HashMap<Integer, Integer>();
-    }
-    
-    public TestClient(String servers) throws KeeperException, IOException, InterruptedException{
-        this();
-        x = new BookKeeper(servers);
-        try{
-        lh = x.createLedger(new byte[] {'a', 'b'});
-        } catch (BKException e) {
-            LOG.error(e.toString());
-        }
-    }
-    
-    public TestClient(String servers, int ensSize, int qSize)
-    throws KeeperException, IOException, InterruptedException{
-        this();
-        x = new BookKeeper(servers);
-        try{
-        lh = x.createLedger(ensSize, qSize, QMode.VERIFIABLE, new byte[] {'a', 'b'});
-        } catch (BKException e) {
-            LOG.error(e.toString());
-        }
-    }
-    
-    public TestClient(FileOutputStream fStream)
-    throws FileNotFoundException {
-        this.fStream = fStream;
-        this.fStreamLocal = new FileOutputStream("./local.log");
-    }
-    
-    
-    public Integer getFreshEntryId(int val){
-        ++this.entryId;
-        synchronized (map) {
-            map.put(this.entryId, val);
-        }
-        return this.entryId;
-    }
-    
-    public boolean removeEntryId(Integer id){
-        boolean retVal = false;
-        synchronized (map) {
-                map.remove(id);
-                retVal = true;
-     
-            if(map.size() == 0) map.notifyAll();
-            else{
-                if(map.size() < 4)
-                    LOG.error(map.toString());
-            }
-        }
-        return retVal;
-    }
-
-    public void closeHandle() throws KeeperException, InterruptedException{
-        x.closeLedger(lh);
-    }
-    /**
-     * First says if entries should be written to BookKeeper (0) or to the local
-     * disk (1). Second parameter is an integer defining the length of a ledger entry. 
-     * Third parameter is the number of writes.
-     * 
-     * @param args
-     */
-    public static void main(String[] args) {
-        
-        int lenght = Integer.parseInt(args[1]);
-        StringBuilder sb = new StringBuilder();
-        while(lenght-- > 0){
-            sb.append('a');
-        }
-        
-        Integer selection = Integer.parseInt(args[0]);
-        switch(selection){
-        case 0:           
-            StringBuilder servers_sb = new StringBuilder();
-            for (int i = 4; i < args.length; i++){
-                servers_sb.append(args[i] + " ");
-            }
-        
-            String servers = servers_sb.toString().trim().replace(' ', ',');
-            try {
-                TestClient c = new TestClient(servers, Integer.parseInt(args[3]), Integer.parseInt(args[4]));
-                c.writeSameEntryBatch(sb.toString().getBytes(), Integer.parseInt(args[2]));
-                //c.writeConsecutiveEntriesBatch(Integer.parseInt(args[0]));
-                c.closeHandle();
-            } catch (NumberFormatException e) {
-                LOG.error(e);
-            } catch (InterruptedException e) {
-                LOG.error(e);
-            } catch (KeeperException e) {
-                LOG.error(e);
-            } catch (IOException e) {
-                LOG.error(e);
-            }
-            break;
-        case 1:
-            
-            try{
-                TestClient c = new TestClient(new FileOutputStream(args[2]));
-                c.writeSameEntryBatchFS(sb.toString().getBytes(), Integer.parseInt(args[3]));
-            } catch(FileNotFoundException e){
-                LOG.error(e);
-            }
-            break;
-        case 2:
-            break;
-        }
-    }
-
-    void writeSameEntryBatch(byte[] data, int times) throws InterruptedException{
-        start = System.currentTimeMillis();
-        int count = times;
-        LOG.debug("Data: " + new String(data) + ", " + data.length);
-        while(count-- > 0){
-            x.asyncAddEntry(lh, data, this, this.getFreshEntryId(2));
-        }
-        LOG.debug("Finished " + times + " async writes in ms: " + (System.currentTimeMillis() - start));       
-        synchronized (map) {
-            if(map.size() != 0)
-                map.wait();
-        }
-        LOG.debug("Finished processing in ms: " + (System.currentTimeMillis() - start));
-        
-        LOG.debug("Ended computation");
-    }
-    
-    void writeConsecutiveEntriesBatch(int times) throws InterruptedException{
-        start = System.currentTimeMillis();
-        int count = times;
-        while(count-- > 0){
-            byte[] write = new byte[2];
-            int j = count%100;
-            int k = (count+1)%100;
-            write[0] = (byte) j;
-            write[1] = (byte) k;
-            x.asyncAddEntry(lh, write, this, this.getFreshEntryId(2));
-        }
-        LOG.debug("Finished " + times + " async writes in ms: " + (System.currentTimeMillis() - start));       
-        synchronized (map) {
-            if(map.size() != 0)
-                map.wait();
-        }
-        LOG.debug("Finished processing writes (ms): " + (System.currentTimeMillis() - start));
-        
-        Integer mon = Integer.valueOf(0);
-        synchronized(mon){
-            try{
-                x.asyncReadEntries(lh, 1, times - 1, this, mon);
-                mon.wait();
-            } catch (BKException e){
-                LOG.error(e);
-            }
-        }
-        LOG.error("Ended computation");
-    }
-
-    void writeSameEntryBatchFS(byte[] data, int times) {
-        int count = times;
-        LOG.debug("Data: " + data.length + ", " + times);
-        try{
-            start = System.currentTimeMillis();
-            while(count-- > 0){
-                fStream.write(data);
-                fStreamLocal.write(data);
-                fStream.flush();
-            }
-            fStream.close();
-            System.out.println("Finished processing writes (ms): " + (System.currentTimeMillis() - start));
-        } catch(IOException e){
-            LOG.error(e);
-        }
-    }
-        
-   
-    public void addComplete(int rc, long ledgerId, long entryId, Object ctx) {
-        this.removeEntryId((Integer) ctx);
-    }
-   
-    public void readComplete(int rc, long ledgerId, Enumeration<LedgerEntry> seq, Object ctx){
-        System.out.println("Read callback: " + rc);
-        while(seq.hasMoreElements()){
-            LedgerEntry le = seq.nextElement();
-            LOG.debug(new String(le.getEntry()));
-        }
-        synchronized(ctx){
-            ctx.notify();
-        }
-    }
-}

+ 0 - 72
src/contrib/bookkeeper/conf/log4j.properties

@@ -1,72 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# 
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.ConsoleAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=bookkeeper.log
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{1}@%L] - %m%n
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=bookkeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

+ 0 - 545
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/Bookie.java

@@ -1,545 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.bookkeeper.bookie.BookieException;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-
-
-
-/**
- * Implements a bookie.
- *
- */
-
-public class Bookie extends Thread {
-    HashMap<Long, LedgerDescriptor> ledgers = new HashMap<Long, LedgerDescriptor>();
-    static Logger LOG = Logger.getLogger(Bookie.class);
-    
-    final File journalDirectory;
-
-    final File ledgerDirectories[];
-    
-    // ZK registration path for this bookie
-    static final String BOOKIE_REGISTRATION_PATH = "/ledgers/available/";
-
-    // ZooKeeper client instance for the Bookie
-    ZooKeeper zk;
-    
-    // Running flag
-    private volatile boolean running = false;
-
-    public static class NoLedgerException extends IOException {
-        private static final long serialVersionUID = 1L;
-        private long ledgerId;
-        public NoLedgerException(long ledgerId) {
-            this.ledgerId = ledgerId;
-        }
-        public long getLedgerId() {
-            return ledgerId;
-        }
-    }
-    public static class NoEntryException extends IOException {
-        private static final long serialVersionUID = 1L;
-        private long ledgerId;
-        private long entryId;
-        public NoEntryException(long ledgerId, long entryId) {
-            super("Entry " + entryId + " not found in " + ledgerId);
-            this.ledgerId = ledgerId;
-            this.entryId = entryId;
-        }
-        public long getLedger() {
-            return ledgerId;
-        }
-        public long getEntry() {
-            return entryId;
-        }
-    }
-
-    EntryLogger entryLogger;
-    LedgerCache ledgerCache;
-    class SyncThread extends Thread {
-        volatile boolean running = true;
-        public SyncThread() {
-            super("SyncThread");
-        }
-        @Override
-        public void run() {
-            while(running) {
-                synchronized(this) {
-                    try {
-                        wait(100);
-                        if (!entryLogger.testAndClearSomethingWritten()) {
-                            continue;
-                        }
-                    } catch (InterruptedException e) {
-                        Thread.currentThread().interrupt();
-                        continue;
-                    }
-                }
-                lastLogMark.markLog();
-                try {
-                    ledgerCache.flushLedger(true);
-                } catch (IOException e) {
-                    e.printStackTrace();
-                }
-                try {
-                    entryLogger.flush();
-                } catch (IOException e) {
-                    e.printStackTrace();
-                }
-                lastLogMark.rollLog();
-            }
-        }
-    }
-    SyncThread syncThread = new SyncThread();
-    public Bookie(int port, String zkServers, File journalDirectory, File ledgerDirectories[]) throws IOException {
-        instantiateZookeeperClient(port, zkServers);
-        this.journalDirectory = journalDirectory;
-        this.ledgerDirectories = ledgerDirectories;
-        entryLogger = new EntryLogger(ledgerDirectories, this);
-        ledgerCache = new LedgerCache(ledgerDirectories);
-        lastLogMark.readLog();
-        final long markedLogId = lastLogMark.txnLogId;
-        if (markedLogId > 0) {
-            File logFiles[] = journalDirectory.listFiles();
-            ArrayList<Long> logs = new ArrayList<Long>();
-            for(File f: logFiles) {
-                String name = f.getName();
-                if (!name.endsWith(".txn")) {
-                    continue;
-                }
-                String idString = name.split("\\.")[0];
-                long id = Long.parseLong(idString, 16);
-                if (id < markedLogId) {
-                    continue;
-                }
-                logs.add(id);
-            }
-            Collections.sort(logs);
-            if (logs.size() == 0 || logs.get(0) != markedLogId) {
-                throw new IOException("Recovery log " + markedLogId + " is missing");
-            }
-            // TODO: When reading in the journal logs that need to be synced, we
-            // should use BufferedChannels instead to minimize the amount of
-            // system calls done.
-            ByteBuffer lenBuff = ByteBuffer.allocate(4);
-            ByteBuffer recBuff = ByteBuffer.allocate(64*1024);
-            for(Long id: logs) {
-                FileChannel recLog = openChannel(id);
-                while(true) {
-                    lenBuff.clear();
-                    fullRead(recLog, lenBuff);
-                    if (lenBuff.remaining() != 0) {
-                        break;
-                    }
-                    lenBuff.flip();
-                    int len = lenBuff.getInt();
-                    if (len == 0) {
-                        break;
-                    }
-                    recBuff.clear();
-                    if (recBuff.remaining() < len) {
-                        recBuff = ByteBuffer.allocate(len);
-                    }
-                    recBuff.limit(len);
-                    if (fullRead(recLog, recBuff) != len) {
-                        // This seems scary, but it just means that this is where we
-                        // left off writing
-                        break;
-                    }
-                    recBuff.flip();
-                    long ledgerId = recBuff.getLong();
-                    // XXX we net to make sure we set the master keys appropriately!
-                    LedgerDescriptor handle = getHandle(ledgerId, false);
-                    try {
-                        recBuff.rewind();
-                        handle.addEntry(recBuff);
-                    } finally {
-                        putHandle(handle);
-                    }
-                }
-            }
-        }
-        setDaemon(true);
-        LOG.debug("I'm starting a bookie with journal directory " + journalDirectory.getName());
-        start();
-        syncThread.start();
-    }
-
-    /**
-     * Instantiate the ZooKeeper client for the Bookie.
-     */
-    private void instantiateZookeeperClient(int port, String zkServers) throws IOException {
-        if (zkServers == null) {
-            LOG.warn("No ZK servers passed to Bookie constructor so BookKeeper clients won't know about this server!");
-            zk = null;
-            return;
-        }
-        // Create the ZooKeeper client instance
-        zk = new ZooKeeper(zkServers, 10000, new Watcher() {
-            @Override
-            public void process(WatchedEvent event) {
-                // TODO: handle session disconnects and expires
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Process: " + event.getType() + " " + event.getPath());
-                }
-            }
-        });
-        // Create the ZK ephemeral node for this Bookie.
-        try {
-            zk.create(BOOKIE_REGISTRATION_PATH + InetAddress.getLocalHost().getHostAddress() + ":" + port, new byte[0],
-                    Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
-        } catch (Exception e) {
-            LOG.fatal("ZK exception registering ephemeral Znode for Bookie!", e);
-            // Throw an IOException back up. This will cause the Bookie
-            // constructor to error out. Alternatively, we could do a System
-            // exit here as this is a fatal error.
-            throw new IOException(e);
-        }
-    }
-
-    private static int fullRead(FileChannel fc, ByteBuffer bb) throws IOException {
-        int total = 0;
-        while(bb.remaining() > 0) {
-            int rc = fc.read(bb);
-            if (rc <= 0) {
-                return total;
-            }
-            total += rc;
-        }
-        return total;
-    }
-    private void putHandle(LedgerDescriptor handle) {
-        synchronized (ledgers) {
-            handle.decRef();
-        }
-    }
-
-    private LedgerDescriptor getHandle(long ledgerId, boolean readonly, byte[] masterKey) throws IOException {
-        LedgerDescriptor handle = null;
-        synchronized (ledgers) {
-            handle = ledgers.get(ledgerId);
-            if (handle == null) {
-                if (readonly) {
-                    throw new NoLedgerException(ledgerId);
-                }
-                handle = createHandle(ledgerId, readonly);
-                ledgers.put(ledgerId, handle);
-                handle.setMasterKey(ByteBuffer.wrap(masterKey));
-            } 
-            handle.incRef();
-        }
-        return handle;
-    }
-    
-    private LedgerDescriptor getHandle(long ledgerId, boolean readonly) throws IOException {
-        LedgerDescriptor handle = null;
-        synchronized (ledgers) {
-            handle = ledgers.get(ledgerId);
-            if (handle == null) {
-                if (readonly) {
-                    throw new NoLedgerException(ledgerId);
-                }
-                handle = createHandle(ledgerId, readonly);
-                ledgers.put(ledgerId, handle);
-            } 
-            handle.incRef();
-        }
-        return handle;
-    }
-    
-
-    private LedgerDescriptor createHandle(long ledgerId, boolean readOnly) throws IOException {
-        return new LedgerDescriptor(ledgerId, entryLogger, ledgerCache);
-    }
-    
-    static class QueueEntry {
-        QueueEntry(ByteBuffer entry, long ledgerId, long entryId, 
-                WriteCallback cb, Object ctx) {
-            this.entry = entry.duplicate();
-            this.cb = cb;
-            this.ctx = ctx;
-            this.ledgerId = ledgerId;
-            this.entryId = entryId;
-        }
-
-        ByteBuffer entry;
-        
-        long ledgerId;
-        
-        long entryId;
-
-        WriteCallback cb;
-
-        Object ctx;
-    }
-
-    LinkedBlockingQueue<QueueEntry> queue = new LinkedBlockingQueue<QueueEntry>();
-
-    public final static long preAllocSize = 4*1024*1024;
-    
-    public final static ByteBuffer zeros = ByteBuffer.allocate(512);
-    
-    class LastLogMark {
-        long txnLogId;
-        long txnLogPosition;
-        LastLogMark lastMark;
-        LastLogMark(long logId, long logPosition) {
-            this.txnLogId = logId;
-            this.txnLogPosition = logPosition;
-        }
-        synchronized void setLastLogMark(long logId, long logPosition) {
-            txnLogId = logId;
-            txnLogPosition = logPosition;
-        }
-        synchronized void markLog() {
-            lastMark = new LastLogMark(txnLogId, txnLogPosition);
-        }
-        synchronized void rollLog() {
-            byte buff[] = new byte[16];
-            ByteBuffer bb = ByteBuffer.wrap(buff);
-            bb.putLong(txnLogId);
-            bb.putLong(txnLogPosition);
-            for(File dir: ledgerDirectories) {
-                File file = new File(dir, "lastMark");
-                try {
-                    FileOutputStream fos = new FileOutputStream(file);
-                    fos.write(buff);
-                    fos.getChannel().force(true);
-                    fos.close();
-                } catch (IOException e) {
-                    LOG.error("Problems writing to " + file, e);
-                }
-            }
-        }
-        synchronized void readLog() {
-            byte buff[] = new byte[16];
-            ByteBuffer bb = ByteBuffer.wrap(buff);
-            for(File dir: ledgerDirectories) {
-                File file = new File(dir, "lastMark");
-                try {
-                    FileInputStream fis = new FileInputStream(file);
-                    fis.read(buff);
-                    fis.close();
-                    bb.clear();
-                    long i = bb.getLong();
-                    long p = bb.getLong();
-                    if (i > txnLogId) {
-                        txnLogId = i;
-                    }
-                    if (p > txnLogPosition) {
-                        txnLogPosition = p;
-                    }
-                } catch (IOException e) {
-                    LOG.error("Problems reading from " + file + " (this is okay if it is the first time starting this bookie");
-                }
-            }
-        }
-    }
-    
-    private LastLogMark lastLogMark = new LastLogMark(0, 0);
-    
-    public boolean isRunning(){
-        return running;
-    }
-    
-    @Override
-    public void run() {
-        LinkedList<QueueEntry> toFlush = new LinkedList<QueueEntry>();
-        ByteBuffer lenBuff = ByteBuffer.allocate(4);
-        try {
-            long logId = System.currentTimeMillis();
-            FileChannel logFile = openChannel(logId);
-            BufferedChannel bc = new BufferedChannel(logFile, 65536);
-            zeros.clear();
-            long nextPrealloc = preAllocSize;
-            long lastFlushPosition = 0;
-            logFile.write(zeros, nextPrealloc);
-            running = true;
-            // TODO: Currently, when we roll over the journal logs, the older
-            // ones are never garbage collected. We should remove a journal log
-            // once all of its entries have been synced with the entry logs.
-            while (true) {
-                QueueEntry qe = null;
-                if (toFlush.isEmpty()) {
-                    qe = queue.take();
-                } else {
-                    qe = queue.poll();
-                    if (qe == null || bc.position() > lastFlushPosition + 512*1024) {
-                        //logFile.force(false);
-                        bc.flush(true);
-                        lastFlushPosition = bc.position();
-                        lastLogMark.setLastLogMark(logId, lastFlushPosition);
-                        for (QueueEntry e : toFlush) {
-                            e.cb.writeComplete(0, e.ledgerId, e.entryId, null, e.ctx);
-                        }
-                        toFlush.clear();
-                    }
-                }
-                if (qe == null) {
-                    continue;
-                }
-                lenBuff.clear();
-                lenBuff.putInt(qe.entry.remaining());
-                lenBuff.flip();
-                //
-                // we should be doing the following, but then we run out of
-                // direct byte buffers
-                // logFile.write(new ByteBuffer[] { lenBuff, qe.entry });
-                bc.write(lenBuff);
-                bc.write(qe.entry);
-                if (bc.position() > nextPrealloc) {
-                    nextPrealloc = (logFile.size() / preAllocSize + 1) * preAllocSize;
-                    zeros.clear();
-                    logFile.write(zeros, nextPrealloc);
-                }
-                toFlush.add(qe);
-            }
-        } catch (Exception e) {
-            LOG.fatal("Bookie thread exiting", e);
-        }
-        running = false;
-    }
-
-    private FileChannel openChannel(long logId) throws FileNotFoundException {
-        FileChannel logFile = new RandomAccessFile(new File(journalDirectory,
-                Long.toHexString(logId) + ".txn"),
-                "rw").getChannel();
-        return logFile;
-    }
-
-    public void shutdown() throws InterruptedException {
-        // Shutdown the ZK client
-        if(zk != null) zk.close();
-        this.interrupt();
-        this.join();
-        syncThread.running = false;
-        syncThread.join();
-        for(LedgerDescriptor d: ledgers.values()) {
-            d.close();
-        }
-        // Shutdown the EntryLogger which has the GarbageCollector Thread running
-        entryLogger.shutdown();
-    }
-    
-    public void addEntry(ByteBuffer entry, WriteCallback cb, Object ctx, byte[] masterKey)
-            throws IOException, BookieException {
-        long ledgerId = entry.getLong();
-        LedgerDescriptor handle = getHandle(ledgerId, false, masterKey);
-        
-        if(!handle.cmpMasterKey(ByteBuffer.wrap(masterKey))){
-            throw BookieException.create(BookieException.Code.UnauthorizedAccessException);
-        }
-        try {
-            entry.rewind();
-            long entryId = handle.addEntry(entry);
-            entry.rewind();
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Adding " + entryId + "@" + ledgerId);
-            }
-            queue.add(new QueueEntry(entry, ledgerId, entryId, cb, ctx));
-        } finally {
-            putHandle(handle);
-        }
-    }
-
-    public ByteBuffer readEntry(long ledgerId, long entryId) throws IOException {
-        LedgerDescriptor handle = getHandle(ledgerId, true);
-        try {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Reading " + entryId + "@" + ledgerId);
-            }
-            return handle.readEntry(entryId);
-        } finally {
-            putHandle(handle);
-        }
-    }
-
-    // The rest of the code is test stuff
-    static class CounterCallback implements WriteCallback {
-        int count;
-
-        synchronized public void writeComplete(int rc, long l, long e, InetSocketAddress addr, Object ctx) {
-            count--;
-            if (count == 0) {
-                notifyAll();
-            }
-        }
-
-        synchronized public void incCount() {
-            count++;
-        }
-
-        synchronized public void waitZero() throws InterruptedException {
-            while (count > 0) {
-                wait();
-            }
-        }
-    }
-
-    /**
-     * @param args
-     * @throws IOException
-     * @throws InterruptedException
-     */
-    public static void main(String[] args) throws IOException,
-            InterruptedException, BookieException {
-        Bookie b = new Bookie(5000, null, new File("/tmp"), new File[] { new File("/tmp") });
-        CounterCallback cb = new CounterCallback();
-        long start = System.currentTimeMillis();
-        for (int i = 0; i < 100000; i++) {
-            ByteBuffer buff = ByteBuffer.allocate(1024);
-            buff.putLong(1);
-            buff.putLong(i);
-            buff.limit(1024);
-            buff.position(0);
-            cb.incCount();
-            b.addEntry(buff, cb, null, new byte[0]);
-        }
-        cb.waitZero();
-        long end = System.currentTimeMillis();
-        System.out.println("Took " + (end-start) + "ms");
-    }
-}

+ 0 - 81
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/BookieException.java

@@ -1,81 +0,0 @@
-package org.apache.bookkeeper.bookie;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
- 
- 
- import java.lang.Exception;
- 
- @SuppressWarnings("serial")
-public abstract class BookieException extends Exception {
-
-    private int code;
-    public BookieException(int code){
-        this.code = code;
-    }
-    
-    public static BookieException create(int code){
-        switch(code){
-        case Code.UnauthorizedAccessException:
-            return new BookieUnauthorizedAccessException();
-        default:
-            return new BookieIllegalOpException();
-        }
-    }
-    
-    public interface Code {
-        int OK = 0;
-        int UnauthorizedAccessException = -1;
-        
-        int IllegalOpException = -100;
-    }
-    
-    public void setCode(int code){
-        this.code = code;
-    }
-    
-    public int getCode(){
-        return this.code;
-    }
-    
-    public String getMessage(int code){
-        switch(code){
-        case Code.OK:
-            return "No problem";
-        case Code.UnauthorizedAccessException:
-            return "Error while reading ledger";
-        default:
-            return "Invalid operation";
-        }
-    }
-    
-    public static class BookieUnauthorizedAccessException extends BookieException {
-        public BookieUnauthorizedAccessException(){
-            super(Code.UnauthorizedAccessException);
-        }   
-    }
-    
-    public static class BookieIllegalOpException extends BookieException {
-        public BookieIllegalOpException(){
-            super(Code.UnauthorizedAccessException);
-        }   
-    }
-}

+ 0 - 168
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/BufferedChannel.java

@@ -1,168 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-
-/**
- * Provides a buffering layer in front of a FileChannel.
- */
-public class BufferedChannel 
-{
-    ByteBuffer writeBuffer;
-    ByteBuffer readBuffer;
-    private FileChannel bc;
-    long position;
-    int capacity;
-    long readBufferStartPosition;
-    long writeBufferStartPosition;
-    BufferedChannel(FileChannel bc, int capacity) throws IOException {
-        this.bc = bc;
-        this.capacity = capacity;
-        position = bc.position();
-        writeBufferStartPosition = position;
-    }
-/*    public void close() throws IOException {
-        bc.close();
-    }
-*/
-//    public boolean isOpen() {
-//        return bc.isOpen();
-//    }
-
-    synchronized public int write(ByteBuffer src) throws IOException {
-        int copied = 0;
-        if (writeBuffer == null) {
-            writeBuffer = ByteBuffer.allocateDirect(capacity);
-        }
-        while(src.remaining() > 0) {
-            int truncated = 0;
-            if (writeBuffer.remaining() < src.remaining()) {
-                truncated = src.remaining() - writeBuffer.remaining();
-                src.limit(src.limit()-truncated);
-            }
-            copied += src.remaining();
-            writeBuffer.put(src);
-            src.limit(src.limit()+truncated);
-            if (writeBuffer.remaining() == 0) {
-                writeBuffer.flip();
-                bc.write(writeBuffer);
-                writeBuffer.clear();
-                writeBufferStartPosition = bc.position();
-            }
-        }
-        position += copied;
-        return copied;
-    }
-    
-    public long position() {
-        return position;
-    }
-    
-    /**
-     * Retrieve the current size of the underlying FileChannel
-     * 
-     * @return FileChannel size measured in bytes
-     * 
-     * @throws IOException if some I/O error occurs reading the FileChannel
-     */
-    public long size() throws IOException {
-        return bc.size();
-    }
-    
-    public void flush(boolean sync) throws IOException {
-        synchronized(this) {
-            if (writeBuffer == null) {
-                return;
-            }
-            writeBuffer.flip();
-            bc.write(writeBuffer);
-            writeBuffer.clear();
-            writeBufferStartPosition = bc.position();
-        }
-        if (sync) {
-            bc.force(false);
-        }
-    }
-
-    /*public Channel getInternalChannel() {
-        return bc;
-    }*/
-    synchronized public int read(ByteBuffer buff, long pos) throws IOException {
-        if (readBuffer == null) {
-            readBuffer = ByteBuffer.allocateDirect(capacity);
-            readBufferStartPosition = Long.MIN_VALUE;
-        }
-        int rc = buff.remaining();
-        while(buff.remaining() > 0) {
-            // check if it is in the write buffer    
-            if (writeBuffer != null && writeBufferStartPosition <= pos) {
-                long positionInBuffer = pos - writeBufferStartPosition;
-                long bytesToCopy = writeBuffer.position()-positionInBuffer;
-                if (bytesToCopy > buff.remaining()) {
-                    bytesToCopy = buff.remaining();
-                }
-                if (bytesToCopy == 0) {
-                    throw new IOException("Read past EOF");
-                }
-                ByteBuffer src = writeBuffer.duplicate();
-                src.position((int) positionInBuffer);
-                src.limit((int) (positionInBuffer+bytesToCopy));
-                buff.put(src);
-                pos+= bytesToCopy;
-                // first check if there is anything we can grab from the readBuffer
-            } else if (readBufferStartPosition <= pos && pos < readBufferStartPosition+readBuffer.capacity()) {
-                long positionInBuffer = pos - readBufferStartPosition;
-                long bytesToCopy = readBuffer.capacity()-positionInBuffer;
-                if (bytesToCopy > buff.remaining()) {
-                    bytesToCopy = buff.remaining();
-                }
-                ByteBuffer src = readBuffer.duplicate();
-                src.position((int) positionInBuffer);
-                src.limit((int) (positionInBuffer+bytesToCopy));
-                buff.put(src);
-                pos += bytesToCopy;
-            // let's read it
-            } else {
-                readBufferStartPosition = pos;
-                readBuffer.clear();
-                // make sure that we don't overlap with the write buffer
-                if (readBufferStartPosition + readBuffer.capacity() >= writeBufferStartPosition) {
-                    readBufferStartPosition = writeBufferStartPosition - readBuffer.capacity();
-                    if (readBufferStartPosition < 0) {
-                        readBuffer.put(LedgerEntryPage.zeroPage, 0, (int)-readBufferStartPosition);
-                    }
-                }
-                while(readBuffer.remaining() > 0) {
-                    if (bc.read(readBuffer, readBufferStartPosition+readBuffer.position()) <= 0) {
-                        throw new IOException("Short read");
-                    }
-                }
-                readBuffer.put(LedgerEntryPage.zeroPage, 0, readBuffer.remaining());
-                readBuffer.clear();
-            }
-        }
-        return rc;
-    }
-}

+ 0 - 487
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/EntryLogger.java

@@ -1,487 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.AsyncCallback;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.Code;
-
-/**
- * This class manages the writing of the bookkeeper entries. All the new
- * entries are written to a common log. The LedgerCache will have pointers
- * into files created by this class with offsets into the files to find
- * the actual ledger entry. The entry log files created by this class are
- * identified by a long.
- */
-public class EntryLogger {
-    private static final Logger LOG = Logger.getLogger(EntryLogger.class);
-    private File dirs[];
-    // This is a handle to the Bookie parent instance. We need this to get
-    // access to the LedgerCache as well as the ZooKeeper client handle.
-    private final Bookie bookie;
-
-    private long logId;
-    /**
-     * The maximum size of a entry logger file.
-     */
-    final static long LOG_SIZE_LIMIT = Long.getLong("logSizeLimit", 2 * 1024 * 1024 * 1024L);
-    private volatile BufferedChannel logChannel;
-    /**
-     * The 1K block at the head of the entry logger file
-     * that contains the fingerprint and (future) meta-data
-     */
-    final static int LOGFILE_HEADER_SIZE = 1024;
-    final ByteBuffer LOGFILE_HEADER = ByteBuffer.allocate(LOGFILE_HEADER_SIZE);
-
-    // this indicates that a write has happened since the last flush
-    private volatile boolean somethingWritten = false;
-
-    // ZK ledgers related String constants
-    static final String LEDGERS_PATH = "/ledgers";
-    static final String LEDGER_NODE_PREFIX = "L";
-    static final String AVAILABLE_NODE = "available";
-
-    // Maps entry log files to the set of ledgers that comprise the file.
-    private ConcurrentMap<Long, ConcurrentHashMap<Long, Boolean>> entryLogs2LedgersMap = new ConcurrentHashMap<Long, ConcurrentHashMap<Long, Boolean>>();
-    // This is the thread that garbage collects the entry logs that do not
-    // contain any active ledgers in them.
-    GarbageCollectorThread gcThread = new GarbageCollectorThread();
-    // This is how often we want to run the Garbage Collector Thread (in milliseconds). 
-    // This should be passed as a System property. Default it to 1000 ms (1sec).
-    final static int gcWaitTime = Integer.getInteger("gcWaitTime", 1000);
-
-    /**
-     * Create an EntryLogger that stores it's log files in the given
-     * directories
-     */
-    public EntryLogger(File dirs[], Bookie bookie) throws IOException {
-        this.dirs = dirs;
-        this.bookie = bookie;
-        // Initialize the entry log header buffer. This cannot be a static object
-        // since in our unit tests, we run multiple Bookies and thus EntryLoggers
-        // within the same JVM. All of these Bookie instances access this header
-        // so there can be race conditions when entry logs are rolled over and
-        // this header buffer is cleared before writing it into the new logChannel.
-        LOGFILE_HEADER.put("BKLO".getBytes());
-        // Find the largest logId
-        for(File f: dirs) {
-            long lastLogId = getLastLogId(f);
-            if (lastLogId >= logId) {
-                logId = lastLogId+1;
-            }
-        }
-        createLogId(logId);
-        // Start the Garbage Collector thread to prune unneeded entry logs.
-        gcThread.start();
-    }
-    
-    /**
-     * Maps entry log files to open channels.
-     */
-    private ConcurrentHashMap<Long, BufferedChannel> channels = new ConcurrentHashMap<Long, BufferedChannel>();
-
-    /**
-     * This is the garbage collector thread that runs in the background to
-     * remove any entry log files that no longer contains any active ledger.
-     */
-    class GarbageCollectorThread extends Thread {
-        volatile boolean running = true;
-
-        public GarbageCollectorThread() {
-            super("GarbageCollectorThread");
-        }
-
-        @Override
-        public void run() {
-            while (running) {
-                synchronized (this) {
-                    try {
-                        wait(gcWaitTime);
-                    } catch (InterruptedException e) {
-                        Thread.currentThread().interrupt();
-                        continue;
-                    }
-                }
-                // Initialization check. No need to run any logic if we are still starting up.
-                if (entryLogs2LedgersMap.isEmpty() || bookie.ledgerCache == null
-                        || bookie.ledgerCache.activeLedgers == null) {
-                    continue;
-                }
-                // First sync ZK to make sure we're reading the latest active/available ledger nodes.
-                bookie.zk.sync(LEDGERS_PATH, new AsyncCallback.VoidCallback() {
-                    @Override
-                    public void processResult(int rc, String path, Object ctx) {
-                        if (rc != Code.OK.intValue()) {
-                            LOG.error("ZK error syncing the ledgers node when getting children: ", KeeperException
-                                    .create(KeeperException.Code.get(rc), path));
-                            return;
-                        }
-                        // Sync has completed successfully so now we can poll ZK 
-                        // and read in the latest set of active ledger nodes.
-                        List<String> ledgerNodes;
-                        try {
-                            ledgerNodes = bookie.zk.getChildren(LEDGERS_PATH, null);
-                        } catch (Exception e) {
-                            LOG.error("Error polling ZK for the available ledger nodes: ", e);
-                            // We should probably wait a certain amount of time before retrying in case of temporary issues.
-                            return;
-                        }
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Retrieved current set of ledger nodes: " + ledgerNodes);
-                        }
-                        // Convert the ZK retrieved ledger nodes to a HashSet for easier comparisons.
-                        HashSet<Long> allActiveLedgers = new HashSet<Long>(ledgerNodes.size(), 1.0f);
-                        for (String ledgerNode : ledgerNodes) {
-                            try {
-                                // The available node is also stored in this path so ignore that.
-                                // That node is the path for the set of available Bookie Servers.
-                                if (ledgerNode.equals(AVAILABLE_NODE))
-                                    continue;
-                                String parts[] = ledgerNode.split(LEDGER_NODE_PREFIX);
-                                allActiveLedgers.add(Long.parseLong(parts[parts.length - 1]));
-                            } catch (NumberFormatException e) {
-                                LOG.fatal("Error extracting ledgerId from ZK ledger node: " + ledgerNode);
-                                // This is a pretty bad error as it indicates a ledger node in ZK
-                                // has an incorrect format. For now just continue and consider
-                                // this as a non-existent ledger.
-                                continue;
-                            }
-                        }
-                        ConcurrentMap<Long, Boolean> curActiveLedgers = bookie.ledgerCache.activeLedgers;
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("All active ledgers from ZK: " + allActiveLedgers);
-                            LOG.debug("Current active ledgers from Bookie: " + curActiveLedgers.keySet());
-                        }
-                        // Remove any active ledgers that don't exist in ZK.
-                        for (Long ledger : curActiveLedgers.keySet()) {
-                            if (!allActiveLedgers.contains(ledger)) {
-                                // Remove it from the current active ledgers set and also from all 
-                                // LedgerCache data references to the ledger, i.e. the physical ledger index file.
-                                LOG.info("Removing a non-active/deleted ledger: " + ledger);
-                                curActiveLedgers.remove(ledger);
-                                try {
-                                    bookie.ledgerCache.deleteLedger(ledger);
-                                } catch (IOException e) {
-                                    LOG.error("Exception when deleting the ledger index file on the Bookie: ", e);
-                                }
-                            }
-                        }
-                        // Loop through all of the entry logs and remove the non-active ledgers.
-                        for (Long entryLogId : entryLogs2LedgersMap.keySet()) {
-                            ConcurrentHashMap<Long, Boolean> entryLogLedgers = entryLogs2LedgersMap.get(entryLogId);
-                            for (Long entryLogLedger : entryLogLedgers.keySet()) {
-                                // Remove the entry log ledger from the set if it isn't active.
-                                if (!bookie.ledgerCache.activeLedgers.containsKey(entryLogLedger)) {
-                                    entryLogLedgers.remove(entryLogLedger);
-                                }
-                            }
-                            if (entryLogLedgers.isEmpty()) {
-                                // This means the entry log is not associated with any active ledgers anymore.
-                                // We can remove this entry log file now.
-                                LOG.info("Deleting entryLogId " + entryLogId + " as it has no active ledgers!");
-                                File entryLogFile;
-                                try {
-                                    entryLogFile = findFile(entryLogId);
-                                } catch (FileNotFoundException e) {
-                                    LOG.error("Trying to delete an entryLog file that could not be found: "
-                                            + entryLogId + ".log");
-                                    continue;
-                                }
-                                entryLogFile.delete();
-                                channels.remove(entryLogId);
-                                entryLogs2LedgersMap.remove(entryLogId);
-                            }
-                        }
-                    };
-                }, null);
-            }
-        }
-    }
-    
-    /**
-     * Creates a new log file with the given id.
-     */
-    private void createLogId(long logId) throws IOException {
-        List<File> list = Arrays.asList(dirs);
-        Collections.shuffle(list);
-        File firstDir = list.get(0);
-        if (logChannel != null) {
-            logChannel.flush(true);
-        }
-        logChannel = new BufferedChannel(new RandomAccessFile(new File(firstDir, Long.toHexString(logId)+".log"), "rw").getChannel(), 64*1024);
-        logChannel.write((ByteBuffer) LOGFILE_HEADER.clear());
-        channels.put(logId, logChannel);
-        for(File f: dirs) {
-            setLastLogId(f, logId);
-        }
-        // Extract all of the ledger ID's that comprise all of the entry logs
-        // (except for the current new one which is still being written to).
-        extractLedgersFromEntryLogs();
-    }
-
-    /**
-     * writes the given id to the "lastId" file in the given directory.
-     */
-    private void setLastLogId(File dir, long logId) throws IOException {
-        FileOutputStream fos;
-        fos = new FileOutputStream(new File(dir, "lastId"));
-        BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos));
-        try {
-            bw.write(Long.toHexString(logId) + "\n");
-            bw.flush();
-        } finally {
-            try {
-                fos.close();
-            } catch (IOException e) {
-            }
-        }
-    }
-    
-    /**
-     * reads id from the "lastId" file in the given directory.
-     */
-    private long getLastLogId(File f) {
-        FileInputStream fis;
-        try {
-            fis = new FileInputStream(new File(f, "lastId"));
-        } catch (FileNotFoundException e) {
-            return -1;
-        }
-        BufferedReader br = new BufferedReader(new InputStreamReader(fis));
-        try {
-            String lastIdString = br.readLine();
-            return Long.parseLong(lastIdString);
-        } catch (IOException e) {
-            return -1;
-        } catch(NumberFormatException e) {
-            return -1;
-        } finally {
-            try {
-                fis.close();
-            } catch (IOException e) {
-            }
-        }
-    }
-    
-    private void openNewChannel() throws IOException {
-        createLogId(++logId);
-    }
-    
-    synchronized void flush() throws IOException {
-        if (logChannel != null) {
-            logChannel.flush(true);
-        }
-    }
-    synchronized long addEntry(long ledger, ByteBuffer entry) throws IOException {
-        if (logChannel.position() + entry.remaining() + 4 > LOG_SIZE_LIMIT) {
-            openNewChannel();
-        }
-        ByteBuffer buff = ByteBuffer.allocate(4);
-        buff.putInt(entry.remaining());
-        buff.flip();
-        logChannel.write(buff);
-        long pos = logChannel.position();
-        logChannel.write(entry);
-        //logChannel.flush(false);
-        somethingWritten = true;
-        return (logId << 32L) | pos;
-    }
-    
-    byte[] readEntry(long ledgerId, long entryId, long location) throws IOException {
-        long entryLogId = location >> 32L;
-        long pos = location & 0xffffffffL;
-        ByteBuffer sizeBuff = ByteBuffer.allocate(4);
-        pos -= 4; // we want to get the ledgerId and length to check
-        BufferedChannel fc;
-        try {
-            fc = getChannelForLogId(entryLogId);
-        } catch (FileNotFoundException e) {
-            FileNotFoundException newe = new FileNotFoundException(e.getMessage() + " for " + ledgerId + " with location " + location);
-            newe.setStackTrace(e.getStackTrace());
-            throw newe;
-        }
-        if (fc.read(sizeBuff, pos) != sizeBuff.capacity()) {
-            throw new IOException("Short read from entrylog " + entryLogId);
-        }
-        pos += 4;
-        sizeBuff.flip();
-        int entrySize = sizeBuff.getInt();
-        // entrySize does not include the ledgerId
-        if (entrySize > 1024*1024) {
-            LOG.error("Sanity check failed for entry size of " + entrySize + " at location " + pos + " in " + entryLogId);
-            
-        }
-        byte data[] = new byte[entrySize];
-        ByteBuffer buff = ByteBuffer.wrap(data);
-        int rc = fc.read(buff, pos);
-        if ( rc != data.length) {
-            throw new IOException("Short read for " + ledgerId + "@" + entryId + " in " + entryLogId + "@" + pos + "("+rc+"!="+data.length+")");
-        }
-        buff.flip();
-        long thisLedgerId = buff.getLong();
-        if (thisLedgerId != ledgerId) {
-            throw new IOException("problem found in " + entryLogId + "@" + entryId + " at position + " + pos + " entry belongs to " + thisLedgerId + " not " + ledgerId);
-        }
-        long thisEntryId = buff.getLong();
-        if (thisEntryId != entryId) {
-            throw new IOException("problem found in " + entryLogId + "@" + entryId + " at position + " + pos + " entry is " + thisEntryId + " not " + entryId);
-        }
-        
-        return data;
-    }
-    
-    private BufferedChannel getChannelForLogId(long entryLogId) throws IOException {
-        BufferedChannel fc = channels.get(entryLogId);
-        if (fc != null) {
-            return fc;
-        }
-        File file = findFile(entryLogId);
-        FileChannel newFc = new RandomAccessFile(file, "rw").getChannel();
-        // If the file already exists before creating a BufferedChannel layer above it,
-        // set the FileChannel's position to the end so the write buffer knows where to start.
-        newFc.position(newFc.size());
-        synchronized (channels) {
-            fc = channels.get(entryLogId);
-            if (fc != null){
-                newFc.close();
-                return fc;
-            }
-            fc = new BufferedChannel(newFc, 8192);
-            channels.put(entryLogId, fc);
-            return fc;
-        }
-    }
-
-    private File findFile(long logId) throws FileNotFoundException {
-        for(File d: dirs) {
-            File f = new File(d, Long.toHexString(logId)+".log");
-            if (f.exists()) {
-                return f;
-            }
-        }
-        throw new FileNotFoundException("No file for log " + Long.toHexString(logId));
-    }
-    
-    synchronized public boolean testAndClearSomethingWritten() {
-        try {
-            return somethingWritten;
-        } finally {
-            somethingWritten = false;
-        }
-    }
-
-    /**
-     * Method to read in all of the entry logs (those that we haven't done so yet),
-     * and find the set of ledger ID's that make up each entry log file.
-     */
-    private void extractLedgersFromEntryLogs() throws IOException {
-        // Extract it for every entry log except for the current one.
-        // Entry Log ID's are just a long value that starts at 0 and increments
-        // by 1 when the log fills up and we roll to a new one.
-        ByteBuffer sizeBuff = ByteBuffer.allocate(4);
-        BufferedChannel bc;
-        for (long entryLogId = 0; entryLogId < logId; entryLogId++) {
-            // Comb the current entry log file if it has not already been extracted.
-            if (entryLogs2LedgersMap.containsKey(entryLogId)) {
-                continue;
-            }
-            LOG.info("Extracting the ledgers from entryLogId: " + entryLogId);
-            // Get the BufferedChannel for the current entry log file
-            try {
-                bc = getChannelForLogId(entryLogId);
-            } catch (FileNotFoundException e) {
-                // If we can't find the entry log file, just log a warning message and continue.
-                // This could be a deleted/garbage collected entry log.
-                LOG.warn("Entry Log file not found in log directories: " + entryLogId + ".log");
-                continue;
-            }
-            // Start the read position in the current entry log file to be after
-            // the header where all of the ledger entries are.
-            long pos = LOGFILE_HEADER_SIZE;
-            ConcurrentHashMap<Long, Boolean> entryLogLedgers = new ConcurrentHashMap<Long, Boolean>();
-            // Read through the entry log file and extract the ledger ID's.
-            while (true) {
-                // Check if we've finished reading the entry log file.
-                if (pos >= bc.size()) {
-                    break;
-                }
-                if (bc.read(sizeBuff, pos) != sizeBuff.capacity()) {
-                    throw new IOException("Short read from entrylog " + entryLogId);
-                }
-                pos += 4;
-                sizeBuff.flip();
-                int entrySize = sizeBuff.getInt();
-                if (entrySize > 1024 * 1024) {
-                    LOG.error("Sanity check failed for entry size of " + entrySize + " at location " + pos + " in "
-                            + entryLogId);
-                }
-                byte data[] = new byte[entrySize];
-                ByteBuffer buff = ByteBuffer.wrap(data);
-                int rc = bc.read(buff, pos);
-                if (rc != data.length) {
-                    throw new IOException("Short read for entryLog " + entryLogId + "@" + pos + "(" + rc + "!="
-                            + data.length + ")");
-                }
-                buff.flip();
-                long ledgerId = buff.getLong();
-                entryLogLedgers.put(ledgerId, true);
-                // Advance position to the next entry and clear sizeBuff.
-                pos += entrySize;
-                sizeBuff.clear();
-            }
-            LOG.info("Retrieved all ledgers that comprise entryLogId: " + entryLogId + ", values: " + entryLogLedgers);
-            entryLogs2LedgersMap.put(entryLogId, entryLogLedgers);
-        }
-    }
-
-    /**
-     * Shutdown method to gracefully stop all threads spawned in this class and exit.
-     * 
-     * @throws InterruptedException if there is an exception stopping threads.
-     */
-    public void shutdown() throws InterruptedException {
-        gcThread.running = false;
-        gcThread.interrupt();
-        gcThread.join();
-    }
-
-}

+ 0 - 124
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/FileInfo.java

@@ -1,124 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-
-/**
- * This is the file handle for a ledger's index file that maps entry ids to location.
- * It is used by LedgerCache.
- */
-class FileInfo {
-    private FileChannel fc;
-    private final File lf;
-    /**
-     * The fingerprint of a ledger index file
-     */
-    private byte header[] = "BKLE\0\0\0\0".getBytes();
-    static final long START_OF_DATA = 1024;
-    private long size;
-    private int useCount;
-    private boolean isClosed;
-    public FileInfo(File lf) throws IOException {
-        this.lf = lf;
-        fc = new RandomAccessFile(lf, "rws").getChannel();
-        size = fc.size();
-        if (size == 0) {
-            fc.write(ByteBuffer.wrap(header));
-        }
-    }
-
-    synchronized public long size() {
-        long rc = size-START_OF_DATA;
-        if (rc < 0) {
-            rc = 0;
-        }
-        return rc;
-    }
-
-    synchronized public int read(ByteBuffer bb, long position) throws IOException {
-        int total = 0;
-        while(bb.remaining() > 0) {
-            int rc = fc.read(bb, position+START_OF_DATA);
-            if (rc <= 0) {
-                throw new IOException("Short read");
-            }
-            total += rc;
-        }
-        return total;
-    }
-
-    synchronized public void close() throws IOException {
-        isClosed = true;
-        if (useCount == 0) {
-            fc.close();
-        }
-    }
-
-    synchronized public long write(ByteBuffer[] buffs, long position) throws IOException {
-        long total = 0;
-        try {
-            fc.position(position+START_OF_DATA);
-            while(buffs[buffs.length-1].remaining() > 0) {
-                long rc = fc.write(buffs);
-                if (rc <= 0) {
-                    throw new IOException("Short write");
-                }
-                total += rc;
-            }
-        } finally {
-            long newsize = position+START_OF_DATA+total;
-            if (newsize > size) {
-                size = newsize;
-            }
-        }
-        return total;
-    }
-
-    synchronized public void use() {
-        useCount++;
-    }
-    
-    synchronized public void release() {
-        useCount--;
-        if (isClosed && useCount == 0) {
-            try {
-                fc.close();
-            } catch (IOException e) {
-                e.printStackTrace();
-            }
-        }
-    }
-    
-    /**
-     * Getter to a handle on the actual ledger index file.
-     * This is used when we are deleting a ledger and want to physically remove the index file.
-     */
-    File getFile() {
-        return lf;
-    }
-
-}

+ 0 - 536
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerCache.java

@@ -1,536 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.log4j.Logger;
-
-/**
- * This class maps a ledger entry number into a location (entrylogid, offset) in
- * an entry log file. It does user level caching to more efficiently manage disk
- * head scheduling.
- */
-public class LedgerCache {
-    private final static Logger LOG = Logger.getLogger(LedgerDescriptor.class);
-    
-    final File ledgerDirectories[];
-
-    public LedgerCache(File ledgerDirectories[]) {
-        this.ledgerDirectories = ledgerDirectories;
-        // Retrieve all of the active ledgers.
-        getActiveLedgers();
-    }
-    /**
-     * the list of potentially clean ledgers
-     */
-    LinkedList<Long> cleanLedgers = new LinkedList<Long>();
-    
-    /**
-     * the list of potentially dirty ledgers
-     */
-    LinkedList<Long> dirtyLedgers = new LinkedList<Long>();
-    
-    HashMap<Long, FileInfo> fileInfoCache = new HashMap<Long, FileInfo>();
-    
-    LinkedList<Long> openLedgers = new LinkedList<Long>();
-    
-    // Stores the set of active (non-deleted) ledgers.
-    ConcurrentMap<Long, Boolean> activeLedgers = new ConcurrentHashMap<Long, Boolean>();
-
-    static int OPEN_FILE_LIMIT = 900;
-    static {
-        if (System.getProperty("openFileLimit") != null) {
-            OPEN_FILE_LIMIT = Integer.parseInt(System.getProperty("openFileLimit"));
-        }
-        LOG.info("openFileLimit is " + OPEN_FILE_LIMIT);
-    }
-    
-    // allocate half of the memory to the page cache
-    private static int pageLimit = (int)(Runtime.getRuntime().maxMemory() / 3) / LedgerEntryPage.PAGE_SIZE;
-    static {
-        LOG.info("maxMemory = " + Runtime.getRuntime().maxMemory());
-        if (System.getProperty("pageLimit") != null) {
-            pageLimit = Integer.parseInt(System.getProperty("pageLimit"));
-        }
-        LOG.info("pageLimit is " + pageLimit);
-    }
-    // The number of pages that have actually been used
-    private int pageCount;
-    HashMap<Long, HashMap<Long,LedgerEntryPage>> pages = new HashMap<Long, HashMap<Long,LedgerEntryPage>>();
-    
-    private void putIntoTable(HashMap<Long, HashMap<Long,LedgerEntryPage>> table, LedgerEntryPage lep) {
-        HashMap<Long, LedgerEntryPage> map = table.get(lep.getLedger());
-        if (map == null) {
-            map = new HashMap<Long, LedgerEntryPage>();
-            table.put(lep.getLedger(), map);
-        }
-        map.put(lep.getFirstEntry(), lep);
-    }
-    
-    private static LedgerEntryPage getFromTable(HashMap<Long, HashMap<Long,LedgerEntryPage>> table, Long ledger, Long firstEntry) {
-        HashMap<Long, LedgerEntryPage> map = table.get(ledger);
-        if (map != null) {
-            return map.get(firstEntry);
-        }
-        return null;
-    }
-    
-   synchronized private LedgerEntryPage getLedgerEntryPage(Long ledger, Long firstEntry, boolean onlyDirty) {
-        LedgerEntryPage lep = getFromTable(pages, ledger, firstEntry);
-        try {
-            if (onlyDirty && lep.isClean()) {
-                return null;
-            }
-            return lep;
-        } finally {
-            if (lep != null) {
-                lep.usePage();
-            }
-        }
-    }
-
-   public void putEntryOffset(long ledger, long entry, long offset) throws IOException {
-        int offsetInPage = (int) (entry%LedgerEntryPage.ENTRIES_PER_PAGES);
-        // find the id of the first entry of the page that has the entry
-        // we are looking for
-        long pageEntry = entry-offsetInPage;
-        LedgerEntryPage lep = getLedgerEntryPage(ledger, pageEntry, false);
-        if (lep == null) {
-            // find a free page
-            lep = grabCleanPage(ledger, pageEntry);
-            updatePage(lep);
-            synchronized(this) {
-                putIntoTable(pages, lep);
-            }
-        }
-        if (lep != null) {
-            lep.setOffset(offset, offsetInPage*8);
-            lep.releasePage();
-            return;
-        }
-    }
-    
-    public long getEntryOffset(long ledger, long entry) throws IOException {
-        int offsetInPage = (int) (entry%LedgerEntryPage.ENTRIES_PER_PAGES);
-        // find the id of the first entry of the page that has the entry
-        // we are looking for
-        long pageEntry = entry-offsetInPage;
-        LedgerEntryPage lep = getLedgerEntryPage(ledger, pageEntry, false);
-        try {
-            if (lep == null) {
-                lep = grabCleanPage(ledger, pageEntry);
-                synchronized(this) {
-                    putIntoTable(pages, lep);
-                }
-                updatePage(lep);
-                
-            }
-            return lep.getOffset(offsetInPage*8);
-        } finally {
-            if (lep != null) {
-                lep.releasePage();
-            }
-        }
-    }
-    
-    static final private String getLedgerName(long ledgerId) {
-        int parent = (int) (ledgerId & 0xff);
-        int grandParent = (int) ((ledgerId & 0xff00) >> 8);
-        StringBuilder sb = new StringBuilder();
-        sb.append(Integer.toHexString(grandParent));
-        sb.append('/');
-        sb.append(Integer.toHexString(parent));
-        sb.append('/');
-        sb.append(Long.toHexString(ledgerId));
-        sb.append(".idx");
-        return sb.toString();
-    }
-    
-    static final private void checkParents(File f) throws IOException {
-        File parent = f.getParentFile();
-        if (parent.exists()) {
-            return;
-        }
-        if (parent.mkdirs() == false) {
-            throw new IOException("Counldn't mkdirs for " + parent);
-        }
-    }
-    
-    static final private Random rand = new Random();
-
-    static final private File pickDirs(File dirs[]) {
-        return dirs[rand.nextInt(dirs.length)];
-    }
-
-    FileInfo getFileInfo(Long ledger, boolean create) throws IOException {
-        synchronized(fileInfoCache) {
-            FileInfo fi = fileInfoCache.get(ledger);
-            if (fi == null) {
-                String ledgerName = getLedgerName(ledger);
-                File lf = null;
-                for(File d: ledgerDirectories) {
-                    lf = new File(d, ledgerName);
-                    if (lf.exists()) {
-                        break;
-                    }
-                    lf = null;
-                }
-                if (lf == null) {
-                    if (!create) {
-                        throw new Bookie.NoLedgerException(ledger);
-                    }
-                    File dir = pickDirs(ledgerDirectories);
-                    lf = new File(dir, ledgerName);
-                    checkParents(lf);
-                    // A new ledger index file has been created for this Bookie.
-                    // Add this new ledger to the set of active ledgers.
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("New ledger index file created for ledgerId: " + ledger);
-                    }
-                    activeLedgers.put(ledger, true);
-                }
-                if (openLedgers.size() > OPEN_FILE_LIMIT) {
-                    fileInfoCache.remove(openLedgers.removeFirst()).close();
-                }
-                fi = new FileInfo(lf);
-                fileInfoCache.put(ledger, fi);
-                openLedgers.add(ledger);
-            }
-            if (fi != null) {
-                fi.use();
-            }
-            return fi;
-        }
-    }
-    private void updatePage(LedgerEntryPage lep) throws IOException {
-        if (!lep.isClean()) {
-            throw new IOException("Trying to update a dirty page");
-        }
-        FileInfo fi = null;
-        try {
-            fi = getFileInfo(lep.getLedger(), true);
-            long pos = lep.getFirstEntry()*8;
-            if (pos >= fi.size()) {
-                lep.zeroPage();
-            } else {
-                lep.readPage(fi);
-            }
-        } finally {
-            if (fi != null) {
-                fi.release();
-            }
-        }
-    }
-
-    void flushLedger(boolean doAll) throws IOException {
-        synchronized(dirtyLedgers) {
-            if (dirtyLedgers.isEmpty()) {
-                synchronized(this) {
-                    for(Long l: pages.keySet()) {
-                        if (LOG.isTraceEnabled()) {
-                            LOG.trace("Adding " + Long.toHexString(l) + " to dirty pages");
-                        }
-                        dirtyLedgers.add(l);
-                    }
-                }
-            }
-            if (dirtyLedgers.isEmpty()) {
-                return;
-            }
-            while(!dirtyLedgers.isEmpty()) {
-                Long l = dirtyLedgers.removeFirst();
-                LinkedList<Long> firstEntryList;
-                synchronized(this) {
-                    HashMap<Long, LedgerEntryPage> pageMap = pages.get(l);
-                    if (pageMap == null || pageMap.isEmpty()) {
-                        continue;
-                    }
-                    firstEntryList = new LinkedList<Long>();
-                    for(Map.Entry<Long, LedgerEntryPage> entry: pageMap.entrySet()) {
-                        LedgerEntryPage lep = entry.getValue();
-                        if (lep.isClean()) {
-                            if (LOG.isTraceEnabled()) {
-                                LOG.trace("Page is clean " + lep);
-                            }
-                            continue;
-                        }
-                        firstEntryList.add(lep.getFirstEntry());
-                    }
-                }
-                // Now flush all the pages of a ledger
-                List<LedgerEntryPage> entries = new ArrayList<LedgerEntryPage>(firstEntryList.size());
-                FileInfo fi = null;
-                try {
-                    for(Long firstEntry: firstEntryList) {
-                        LedgerEntryPage lep = getLedgerEntryPage(l, firstEntry, true);
-                        if (lep != null) {
-                            entries.add(lep);
-                        }
-                    }
-                    Collections.sort(entries, new Comparator<LedgerEntryPage>() {
-                        @Override
-                        public int compare(LedgerEntryPage o1, LedgerEntryPage o2) {
-                            return (int)(o1.getFirstEntry()-o2.getFirstEntry());
-                        }});
-                    ArrayList<Integer> versions = new ArrayList<Integer>(entries.size());
-                    fi = getFileInfo(l, true);
-                    int start = 0;
-                    long lastOffset = -1;
-                    for(int i = 0; i < entries.size(); i++) {
-                        versions.add(i, entries.get(i).getVersion());
-                        if (lastOffset != -1 && (entries.get(i).getFirstEntry() - lastOffset) != LedgerEntryPage.ENTRIES_PER_PAGES) {
-                            // send up a sequential list
-                            int count = i - start;
-                            if (count == 0) {
-                                System.out.println("Count cannot possibly be zero!");
-                            }
-                            writeBuffers(l, entries, fi, start, count);
-                            start = i;
-                        }
-                        lastOffset = entries.get(i).getFirstEntry();
-                    }
-                    if (entries.size()-start == 0 && entries.size() != 0) {
-                        System.out.println("Nothing to write, but there were entries!");
-                    }
-                    writeBuffers(l, entries, fi, start, entries.size()-start);
-                    synchronized(this) {
-                        for(int i = 0; i < entries.size(); i++) {
-                            LedgerEntryPage lep = entries.get(i);
-                            lep.setClean(versions.get(i));
-                        }
-                    }
-                } finally {
-                    for(LedgerEntryPage lep: entries) {
-                        lep.releasePage();
-                    }
-                    if (fi != null) {
-                        fi.release();
-                    }
-                }
-                if (!doAll) {
-                    break;
-                }
-                // Yeild. if we are doing all the ledgers we don't want to block other flushes that
-                // need to happen
-                try {
-                    dirtyLedgers.wait(1);
-                } catch (InterruptedException e) {
-                    // just pass it on
-                    Thread.currentThread().interrupt();
-                }
-            }
-        }
-    }
-    
-    private void writeBuffers(Long ledger,
-            List<LedgerEntryPage> entries, FileInfo fi,
-            int start, int count) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Writing " + count + " buffers of " + Long.toHexString(ledger));
-        }
-        if (count == 0) {
-            //System.out.println("Count is zero!");
-            return;
-        }
-        ByteBuffer buffs[] = new ByteBuffer[count];
-        for(int j = 0; j < count; j++) {
-            buffs[j] = entries.get(start+j).getPageToWrite();
-            if (entries.get(start+j).getLedger() != ledger) {
-                throw new IOException("Writing to " + ledger + " but page belongs to " + entries.get(start+j).getLedger());
-            }
-        }
-        long totalWritten = 0;
-        while(buffs[buffs.length-1].remaining() > 0) {
-            long rc = fi.write(buffs, entries.get(start+0).getFirstEntry()*8);
-            if (rc <= 0) {
-                throw new IOException("Short write to ledger " + ledger + " rc = " + rc);
-            }
-            //System.out.println("Wrote " + rc + " to " + ledger);
-            totalWritten += rc;
-        }
-        if (totalWritten != count*LedgerEntryPage.PAGE_SIZE) {
-            throw new IOException("Short write to ledger " + ledger + " wrote " + totalWritten + " expected " + count*LedgerEntryPage.PAGE_SIZE);
-        }
-    }
-    private LedgerEntryPage grabCleanPage(long ledger, long entry) throws IOException {
-        if (entry % LedgerEntryPage.ENTRIES_PER_PAGES != 0) {
-            throw new IllegalArgumentException(entry + " is not a multiple of " + LedgerEntryPage.ENTRIES_PER_PAGES);
-        }
-        synchronized(this) {
-            if (pageCount  < pageLimit) {
-                // let's see if we can allocate something
-                LedgerEntryPage lep = new LedgerEntryPage();
-                lep.setLedger(ledger);
-                lep.setFirstEntry(entry);
-                // note, this will not block since it is a new page
-                lep.usePage();
-                pageCount++;
-                return lep;
-            }
-        }
-        
-        outerLoop:
-        while(true) {
-            synchronized(cleanLedgers) {
-                if (cleanLedgers.isEmpty()) {
-                    flushLedger(false);
-                    synchronized(this) {
-                        for(Long l: pages.keySet()) {
-                            cleanLedgers.add(l);
-                        }
-                    }
-                }
-                synchronized(this) {
-                    Long cleanLedger = cleanLedgers.getFirst();
-                    Map<Long, LedgerEntryPage> map = pages.get(cleanLedger);
-                    if (map == null || map.isEmpty()) {
-                        cleanLedgers.removeFirst();
-                        continue;
-                    }
-                    Iterator<Map.Entry<Long, LedgerEntryPage>> it = map.entrySet().iterator();
-                    LedgerEntryPage lep = it.next().getValue();
-                    while((lep.inUse() || !lep.isClean())) {
-                        if (it.hasNext()) {
-                            continue outerLoop;
-                        }
-                        lep = it.next().getValue();
-                    }
-                    it.remove();
-                    if (map.isEmpty()) {
-                        pages.remove(lep.getLedger());
-                    }
-                    lep.usePage();
-                    lep.zeroPage();
-                    lep.setLedger(ledger);
-                    lep.setFirstEntry(entry);
-                    return lep;
-                }
-            }
-        }
-    }
-
-    public long getLastEntry(long ledgerId) {
-        long lastEntry = 0;
-        // Find the last entry in the cache
-        synchronized(this) {
-            Map<Long, LedgerEntryPage> map = pages.get(ledgerId);
-            if (map != null) {
-                for(LedgerEntryPage lep: map.values()) {
-                    if (lep.getFirstEntry() + LedgerEntryPage.ENTRIES_PER_PAGES < lastEntry) {
-                        continue;
-                    }
-                    lep.usePage();
-                    long highest = lep.getLastEntry();
-                    if (highest > lastEntry) {
-                        lastEntry = highest;
-                    }
-                    lep.releasePage();
-                }
-            }
-        }
-        
-        return lastEntry;
-    }
-
-    /**
-     * This method will look within the ledger directories for the ledger index
-     * files. That will comprise the set of active ledgers this particular
-     * BookieServer knows about that have not yet been deleted by the BookKeeper
-     * Client. This is called only once during initialization.
-     */
-    private void getActiveLedgers() {
-        // Ledger index files are stored in a file hierarchy with a parent and
-        // grandParent directory. We'll have to go two levels deep into these
-        // directories to find the index files.
-        for (File ledgerDirectory : ledgerDirectories) {
-            for (File grandParent : ledgerDirectory.listFiles()) {
-                if (grandParent.isDirectory()) {
-                    for (File parent : grandParent.listFiles()) {
-                        if (parent.isDirectory()) {
-                            for (File index : parent.listFiles()) {
-                                if (!index.isFile() || !index.getName().endsWith(".idx")) {
-                                    continue;
-                                }
-                                // We've found a ledger index file. The file name is the 
-                                // HexString representation of the ledgerId.
-                                String ledgerIdInHex = index.getName().substring(0, index.getName().length() - 4);
-                                activeLedgers.put(Long.parseLong(ledgerIdInHex, 16), true);
-                            }
-                        }
-                    }
-                }
-            }
-        }
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Active ledgers found: " + activeLedgers);
-        }
-    }
-    
-    /**
-     * This method is called whenever a ledger is deleted by the BookKeeper Client
-     * and we want to remove all relevant data for it stored in the LedgerCache.
-     */
-    void deleteLedger(long ledgerId) throws IOException {
-        if (LOG.isDebugEnabled())
-            LOG.debug("Deleting ledgerId: " + ledgerId);
-        // Delete the ledger's index file and close the FileInfo
-        FileInfo fi = getFileInfo(ledgerId, false);
-        fi.getFile().delete();
-        fi.close();
-
-        // Remove it from the activeLedgers set
-        activeLedgers.remove(ledgerId);
-
-        // Now remove it from all the other lists and maps. 
-        // These data structures need to be synchronized first before removing entries. 
-        synchronized(this) {
-            pages.remove(ledgerId);
-        }
-        synchronized(fileInfoCache) {
-            fileInfoCache.remove(ledgerId);
-        }
-        synchronized(cleanLedgers) {
-            cleanLedgers.remove(ledgerId);
-        }
-        synchronized(dirtyLedgers) {
-            dirtyLedgers.remove(ledgerId);
-        }
-        synchronized(openLedgers) {
-            openLedgers.remove(ledgerId);
-        }
-    }
-
-}

+ 0 - 133
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerDescriptor.java

@@ -1,133 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.apache.log4j.Logger;
-
-
-
-/**
- * Implements a ledger inside a bookie. In particular, it implements operations
- * to write entries to a ledger and read entries from a ledger.
- *
- */
-public class LedgerDescriptor {
-    final static Logger LOG = Logger.getLogger(LedgerDescriptor.class);
-    LedgerCache ledgerCache;
-    LedgerDescriptor(long ledgerId, EntryLogger entryLogger, LedgerCache ledgerCache) {
-        this.ledgerId = ledgerId;
-        this.entryLogger = entryLogger;
-        this.ledgerCache = ledgerCache;
-    }
-    
-    private ByteBuffer masterKey = null;
-    
-    void setMasterKey(ByteBuffer masterKey){
-        this.masterKey = masterKey;
-    }
-    
-    boolean cmpMasterKey(ByteBuffer masterKey){
-        return this.masterKey.equals(masterKey);
-    }
-    
-    private long ledgerId;
-    EntryLogger entryLogger;
-    private int refCnt;
-    synchronized public void incRef() {
-        refCnt++;
-    }
-    synchronized public void decRef() {
-        refCnt--;
-    }
-    synchronized public int getRefCnt() {
-        return refCnt;
-    }
-    long addEntry(ByteBuffer entry) throws IOException {
-        long ledgerId = entry.getLong();
-        if (ledgerId != this.ledgerId) {
-            throw new IOException("Entry for ledger " + ledgerId + " was sent to " + this.ledgerId);
-        }
-        long entryId = entry.getLong();
-        entry.rewind();
-        
-        /*
-         * Log the entry
-         */
-        long pos = entryLogger.addEntry(ledgerId, entry);
-        
-        
-        /*
-         * Set offset of entry id to be the current ledger position
-         */
-        ledgerCache.putEntryOffset(ledgerId, entryId, pos);
-        return entryId;
-    }
-    ByteBuffer readEntry(long entryId) throws IOException {
-        long offset;
-        /*
-         * If entryId is -1, then return the last written.
-         */
-        if (entryId == -1) {
-            long lastEntry = ledgerCache.getLastEntry(ledgerId);
-            FileInfo fi = null;
-            try {
-                fi = ledgerCache.getFileInfo(ledgerId, false);
-                long size = fi.size();
-                // we may not have the last entry in the cache
-                if (size > lastEntry*8) {
-                    ByteBuffer bb = ByteBuffer.allocate(LedgerEntryPage.PAGE_SIZE);
-                    long position = size-LedgerEntryPage.PAGE_SIZE;
-                    if (position < 0) {
-                        position = 0;
-                    }
-                    fi.read(bb, position);
-                    bb.flip();
-                    long startingEntryId = position/8;
-                    for(int i = LedgerEntryPage.ENTRIES_PER_PAGES-1; i >= 0; i--) {
-                        if (bb.getLong(i*8) != 0) {
-                            if (lastEntry < startingEntryId+i) {
-                                lastEntry = startingEntryId+i;
-                            }
-                            break;
-                        }
-                    }
-                }
-            } finally {
-                if (fi != null) {
-                    fi.release();
-                }
-            }
-            entryId = lastEntry;
-        }
-        
-        offset = ledgerCache.getEntryOffset(ledgerId, entryId);
-        if (offset == 0) {
-            throw new Bookie.NoEntryException(ledgerId, entryId);
-        }
-        return ByteBuffer.wrap(entryLogger.readEntry(ledgerId, entryId, offset));
-    }
-    void close() {
-    }
-}

+ 0 - 151
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/LedgerEntryPage.java

@@ -1,151 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-/**
- * This is a page in the LedgerCache. It holds the locations
- * (entrylogfile, offset) for entry ids.
- */
-public class LedgerEntryPage {
-    public static final int PAGE_SIZE = 8192;
-    public static final int ENTRIES_PER_PAGES = PAGE_SIZE/8;
-    private long ledger = -1;
-    private long firstEntry = -1;
-    private ByteBuffer page = ByteBuffer.allocateDirect(PAGE_SIZE);
-    private boolean clean = true;
-    private boolean pinned = false;
-    private int useCount;
-    private int version;
-    
-    @Override
-    public String toString() {
-        StringBuilder sb = new StringBuilder();
-        sb.append(getLedger());
-        sb.append('@');
-        sb.append(getFirstEntry());
-        sb.append(clean ? " clean " : " dirty ");
-        sb.append(useCount);
-        return sb.toString();
-    }
-    synchronized public void usePage() {
-        useCount++;
-    }
-    synchronized public void pin() {
-        pinned = true;
-    }
-    synchronized public void unpin() {
-        pinned = false;
-    }
-    synchronized public boolean isPinned() {
-        return pinned;
-    }
-    synchronized public void releasePage() {
-        useCount--;
-        if (useCount < 0) {
-            throw new IllegalStateException("Use count has gone below 0");
-        }
-    }
-    synchronized private void checkPage() {
-        if (useCount <= 0) {
-            throw new IllegalStateException("Page not marked in use");
-        }
-    }
-    @Override
-    public boolean equals(Object other) {
-        LedgerEntryPage otherLEP = (LedgerEntryPage) other;
-        return otherLEP.getLedger() == getLedger() && otherLEP.getFirstEntry() == getFirstEntry();
-    }
-    @Override
-    public int hashCode() {
-        return (int)getLedger() ^ (int)(getFirstEntry());
-    }
-    void setClean(int versionOfCleaning) {
-        this.clean = (versionOfCleaning == version);
-    }
-    boolean isClean() {
-        return clean;
-    }
-    public void setOffset(long offset, int position) {
-        checkPage();
-        version++;
-        this.clean = false;
-        page.putLong(position, offset);
-    }
-    public long getOffset(int position) {
-        checkPage();
-        return page.getLong(position);
-    }
-    static final byte zeroPage[] = new byte[64*1024];
-    public void zeroPage() {
-        checkPage();
-        page.clear();
-        page.put(zeroPage, 0, page.remaining());
-        clean = true;
-    }
-    public void readPage(FileInfo fi) throws IOException {
-        checkPage();
-        page.clear();
-        while(page.remaining() != 0) {
-            if (fi.read(page, getFirstEntry()*8) <= 0) {
-                throw new IOException("Short page read of ledger " + getLedger() + " tried to get " + page.capacity() + " from position " + getFirstEntry()*8 + " still need " + page.remaining());
-            }
-        }
-        clean = true;
-    }
-    public ByteBuffer getPageToWrite() {
-        checkPage();
-        page.clear();
-        return page;
-    }
-    void setLedger(long ledger) {
-        this.ledger = ledger;
-    }
-    long getLedger() {
-        return ledger;
-    }
-    int getVersion() {
-        return version;
-    }
-    void setFirstEntry(long firstEntry) {
-        if (firstEntry % ENTRIES_PER_PAGES != 0) {
-            throw new IllegalArgumentException(firstEntry + " is not a multiple of " + ENTRIES_PER_PAGES);
-        }
-        this.firstEntry = firstEntry;
-    }
-    long getFirstEntry() {
-        return firstEntry;
-    }
-    public boolean inUse() {
-        return useCount > 0;
-    }
-    public long getLastEntry() {
-        for(int i = ENTRIES_PER_PAGES - 1; i >= 0; i--) {
-            if (getOffset(i*8) > 0) {
-                return i + firstEntry;
-            }
-        }
-        return 0;
-    }
-}

+ 0 - 147
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/bookie/MarkerFileChannel.java

@@ -1,147 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.bookie;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.MappedByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.FileLock;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-
-/**
- * This class is just a stub that can be used in collections with
- * FileChannels
- */
-public class MarkerFileChannel extends FileChannel {
-
-    @Override
-    public void force(boolean metaData) throws IOException {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
-    public FileLock lock(long position, long size, boolean shared)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public MappedByteBuffer map(MapMode mode, long position, long size)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public long position() throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public FileChannel position(long newPosition) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public int read(ByteBuffer dst) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public int read(ByteBuffer dst, long position) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public long read(ByteBuffer[] dsts, int offset, int length)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public long size() throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public long transferFrom(ReadableByteChannel src, long position, long count)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public long transferTo(long position, long count, WritableByteChannel target)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public FileChannel truncate(long size) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public FileLock tryLock(long position, long size, boolean shared)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public int write(ByteBuffer src) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public int write(ByteBuffer src, long position) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public long write(ByteBuffer[] srcs, int offset, int length)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    protected void implCloseChannel() throws IOException {
-        // TODO Auto-generated method stub
-
-    }
-
-}

+ 0 - 126
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/AsyncCallback.java

@@ -1,126 +0,0 @@
-package org.apache.bookkeeper.client;
-
-import java.util.Enumeration;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-public interface AsyncCallback {
-  public interface AddCallback {
-    /**
-     * Callback declaration
-     * 
-     * @param rc
-     *          return code
-     * @param ledgerId
-     *          ledger identifier
-     * @param entryId
-     *          entry identifier
-     * @param ctx
-     *          control object
-     */
-    void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx);
-  }
-
-  public interface CloseCallback {
-    /**
-     * Callback definition
-     * 
-     * @param rc
-     *          return code
-     * @param ledgerId
-     *          ledger identifier
-     * @param ctx
-     *          control object
-     */
-    void closeComplete(int rc, LedgerHandle lh, Object ctx);
-  }
-
-  public interface CreateCallback {
-    /**
-     * Declaration of callback method
-     * 
-     * @param rc
-     *          return status
-     * @param lh
-     *          ledger handle
-     * @param ctx
-     *          control object
-     */
-
-    void createComplete(int rc, LedgerHandle lh, Object ctx);
-  }
-
-  public interface OpenCallback {
-    /**
-     * Callback for asynchronous call to open ledger
-     * 
-     * @param rc
-     *          Return code
-     * @param lh
-     *          ledger handle
-     * @param ctx
-     *          control object
-     */
-
-    public void openComplete(int rc, LedgerHandle lh, Object ctx);
-
-  }
-
-  public interface ReadCallback {
-    /**
-     * Callback declaration
-     * 
-     * @param rc
-     *          return code
-     * @param ledgerId
-     *          ledger identifier
-     * @param seq
-     *          sequence of entries
-     * @param ctx
-     *          control object
-     */
-
-    void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq,
-        Object ctx);
-  }
-  
-  public interface DeleteCallback {
-      /**
-       * Callback definition for delete operations
-       * 
-       * @param rc
-       *          return code
-       * @param ctx
-       *          control object
-       */
-      void deleteComplete(int rc, Object ctx);
-    }
-
-  public interface RecoverCallback {
-      /**
-       * Callback definition for bookie recover operations
-       * 
-       * @param rc
-       *          return code
-       * @param ctx
-       *          control object
-       */
-      void recoverComplete(int rc, Object ctx);
-    }
-
-}

+ 0 - 249
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BKException.java

@@ -1,249 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.lang.Exception;
-
-/**
- * Class the enumerates all the possible error conditions
- * 
- */
-
-@SuppressWarnings("serial")
-public abstract class BKException extends Exception {
-
-    private int code;
-
-    BKException(int code) {
-        this.code = code;
-    }
-
-    /**
-     * Create an exception from an error code
-     * @param code return error code
-     * @return correponding exception
-     */
-    public static BKException create(int code) {
-        switch (code) {
-        case Code.ReadException:
-            return new BKReadException();
-        case Code.QuorumException:
-            return new BKQuorumException();
-        case Code.NoBookieAvailableException:
-            return new BKBookieException();
-        case Code.DigestNotInitializedException:
-            return new BKDigestNotInitializedException();
-        case Code.DigestMatchException:
-            return new BKDigestMatchException();
-        case Code.NotEnoughBookiesException:
-            return new BKNotEnoughBookiesException();
-        case Code.NoSuchLedgerExistsException:
-            return new BKNoSuchLedgerExistsException();
-        case Code.BookieHandleNotAvailableException:
-            return new BKBookieHandleNotAvailableException();
-        case Code.ZKException:
-            return new ZKException();
-        case Code.LedgerRecoveryException:
-            return new BKLedgerRecoveryException();
-        case Code.LedgerClosedException:
-            return new BKLedgerClosedException();
-        case Code.WriteException:
-            return new BKWriteException();
-        case Code.NoSuchEntryException:
-            return new BKNoSuchEntryException();
-        case Code.IncorrectParameterException:
-            return new BKIncorrectParameterException();
-        case Code.InterruptedException:
-            return new BKInterruptedException();
-        default:
-            return new BKIllegalOpException();
-        }
-    }
-
-    /**
-     * List of return codes
-     *
-     */
-    public interface Code {
-        int OK = 0;
-        int ReadException = -1;
-        int QuorumException = -2;
-        int NoBookieAvailableException = -3;
-        int DigestNotInitializedException = -4;
-        int DigestMatchException = -5;
-        int NotEnoughBookiesException = -6;
-        int NoSuchLedgerExistsException = -7;
-        int BookieHandleNotAvailableException = -8;
-        int ZKException = -9;
-        int LedgerRecoveryException = -10;
-        int LedgerClosedException = -11;
-        int WriteException = -12;
-        int NoSuchEntryException = -13;
-        int IncorrectParameterException = -14;
-        int InterruptedException = -15;
-        
-        int IllegalOpException = -100;
-    }
-
-    public void setCode(int code) {
-        this.code = code;
-    }
-
-    public int getCode() {
-        return this.code;
-    }
-
-    public static String getMessage(int code) {
-        switch (code) {
-        case Code.OK:
-            return "No problem";
-        case Code.ReadException:
-            return "Error while reading ledger";
-        case Code.QuorumException:
-            return "Invalid quorum size on ensemble size";
-        case Code.NoBookieAvailableException:
-            return "Invalid quorum size on ensemble size";
-        case Code.DigestNotInitializedException:
-            return "Digest engine not initialized";
-        case Code.DigestMatchException:
-            return "Entry digest does not match";
-        case Code.NotEnoughBookiesException:
-            return "Not enough non-faulty bookies available";
-        case Code.NoSuchLedgerExistsException:
-            return "No such ledger exists";
-        case Code.BookieHandleNotAvailableException:
-            return "Bookie handle is not available";
-        case Code.ZKException:
-            return "Error while using ZooKeeper";
-        case Code.LedgerRecoveryException:
-            return "Error while recovering ledger";
-        case Code.LedgerClosedException:
-            return "Attempt to write to a closed ledger";
-        case Code.WriteException:
-            return "Write failed on bookie";
-        case Code.NoSuchEntryException:
-            return "No such entry";
-        case Code.IncorrectParameterException:
-            return "Incorrect parameter input";
-        case Code.InterruptedException:
-            return "Interrupted while waiting for permit";
-        default:
-            return "Invalid operation";
-        }
-    }
-
-    public static class BKReadException extends BKException {
-        public BKReadException() {
-            super(Code.ReadException);
-        }
-    }
-
-    public static class BKNoSuchEntryException extends BKException {
-        public BKNoSuchEntryException() {
-            super(Code.NoSuchEntryException);
-        }
-    }
-
-    public static class BKQuorumException extends BKException {
-        public BKQuorumException() {
-            super(Code.QuorumException);
-        }
-    }
-
-    public static class BKBookieException extends BKException {
-        public BKBookieException() {
-            super(Code.NoBookieAvailableException);
-        }
-    }
-
-    public static class BKDigestNotInitializedException extends BKException {
-        public BKDigestNotInitializedException() {
-            super(Code.DigestNotInitializedException);
-        }
-    }
-
-    public static class BKDigestMatchException extends BKException {
-        public BKDigestMatchException() {
-            super(Code.DigestMatchException);
-        }
-    }
-
-    public static class BKIllegalOpException extends BKException {
-        public BKIllegalOpException() {
-            super(Code.IllegalOpException);
-        }
-    }
-
-    public static class BKNotEnoughBookiesException extends BKException {
-        public BKNotEnoughBookiesException() {
-            super(Code.NotEnoughBookiesException);
-        }
-    }
-
-    public static class BKWriteException extends BKException {
-        public BKWriteException() {
-            super(Code.WriteException);
-        }
-    }
-
-    public static class BKNoSuchLedgerExistsException extends BKException {
-        public BKNoSuchLedgerExistsException() {
-            super(Code.NoSuchLedgerExistsException);
-        }
-    }
-
-    public static class BKBookieHandleNotAvailableException extends BKException {
-        public BKBookieHandleNotAvailableException() {
-            super(Code.BookieHandleNotAvailableException);
-        }
-    }
-
-    public static class ZKException extends BKException {
-        public ZKException() {
-            super(Code.ZKException);
-        }
-    }
-
-    public static class BKLedgerRecoveryException extends BKException {
-        public BKLedgerRecoveryException() {
-            super(Code.LedgerRecoveryException);
-        }
-    }
-
-    public static class BKLedgerClosedException extends BKException {
-        public BKLedgerClosedException() {
-            super(Code.LedgerClosedException);
-        }
-    }
-    
-    public static class BKIncorrectParameterException extends BKException {
-        public BKIncorrectParameterException() {
-            super(Code.IncorrectParameterException);
-        }
-    }
-    
-    public static class BKInterruptedException extends BKException {
-        public BKInterruptedException() {
-            super(Code.InterruptedException);
-        }
-    }
-}

+ 0 - 410
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BookKeeper.java

@@ -1,410 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-import java.util.concurrent.Executors;
-
-import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
-import org.apache.bookkeeper.client.AsyncCallback.DeleteCallback;
-import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
-import org.apache.bookkeeper.client.BKException.Code;
-import org.apache.bookkeeper.proto.BookieClient;
-import org.apache.bookkeeper.util.OrderedSafeExecutor;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-
-/**
- * BookKeeper client. We assume there is one single writer to a ledger at any
- * time.
- * 
- * There are four possible operations: start a new ledger, write to a ledger,
- * read from a ledger and delete a ledger.
- * 
- * The exceptions resulting from synchronous calls and error code resulting from
- * asynchronous calls can be found in the class {@link BKException}.
- * 
- * 
- */
-
-public class BookKeeper implements OpenCallback, CreateCallback, DeleteCallback {
-
-  static final Logger LOG = Logger.getLogger(BookKeeper.class);
-
-  ZooKeeper zk = null;
-  // whether the zk handle is one we created, or is owned by whoever
-  // instantiated us
-  boolean ownZKHandle = false;
-
-  ClientSocketChannelFactory channelFactory;
-  // whether the socket factory is one we created, or is owned by whoever
-  // instantiated us
-  boolean ownChannelFactory = false;
-
-  BookieClient bookieClient;
-  BookieWatcher bookieWatcher;
-
-  OrderedSafeExecutor callbackWorker = new OrderedSafeExecutor(Runtime
-      .getRuntime().availableProcessors());
-  OrderedSafeExecutor mainWorkerPool = new OrderedSafeExecutor(Runtime
-      .getRuntime().availableProcessors());
-
-  /**
-   * Create a bookkeeper client. A zookeeper client and a client socket factory
-   * will be instantiated as part of this constructor.
-   * 
-   * @param servers
-   *          A list of one of more servers on which zookeeper is running. The
-   *          client assumes that the running bookies have been registered with
-   *          zookeeper under the path
-   *          {@link BookieWatcher#BOOKIE_REGISTRATION_PATH}
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   */
-  public BookKeeper(String servers) throws IOException, InterruptedException,
-      KeeperException {
-    this(new ZooKeeper(servers, 10000, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        // TODO: handle session disconnects and expires
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Process: " + event.getType() + " " + event.getPath());
-        }
-      }
-    }), new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
-        Executors.newCachedThreadPool()));
-
-    ownZKHandle = true;
-    ownChannelFactory = true;
-  }
-
-  /**
-   * Create a bookkeeper client but use the passed in zookeeper client instead
-   * of instantiating one.
-   * 
-   * @param zk
-   *          Zookeeper client instance connected to the zookeeper with which
-   *          the bookies have registered
-   * @throws InterruptedException
-   * @throws KeeperException
-   */
-  public BookKeeper(ZooKeeper zk) throws InterruptedException, KeeperException {
-    this(zk, new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
-        Executors.newCachedThreadPool()));
-    ownChannelFactory = true;
-  }
-
-  /**
-   * Create a bookkeeper client but use the passed in zookeeper client and
-   * client socket channel factory instead of instantiating those.
-   * 
-   * @param zk
-   *          Zookeeper client instance connected to the zookeeper with which
-   *          the bookies have registered
-   * @param channelFactory
-   *          A factory that will be used to create connections to the bookies
-   * @throws InterruptedException
-   * @throws KeeperException
-   */
-  public BookKeeper(ZooKeeper zk, ClientSocketChannelFactory channelFactory)
-      throws InterruptedException, KeeperException {
-    if (zk == null || channelFactory == null) {
-      throw new NullPointerException();
-    }
-    this.zk = zk;
-    this.channelFactory = channelFactory;
-    bookieWatcher = new BookieWatcher(this);
-    bookieWatcher.readBookiesBlocking();
-    bookieClient = new BookieClient(channelFactory, mainWorkerPool);
-  }
-
-  /**
-   * There are 2 digest types that can be used for verification. The CRC32 is
-   * cheap to compute but does not protect against byzantine bookies (i.e., a
-   * bookie might report fake bytes and a matching CRC32). The MAC code is more
-   * expensive to compute, but is protected by a password, i.e., a bookie can't
-   * report fake bytes with a mathching MAC unless it knows the password
-   */
-  public enum DigestType {
-    MAC, CRC32
-  };
-
-  public ZooKeeper getZkHandle() {
-    return zk;
-  }
-
-  /**
-   * Get the BookieClient, currently used for doing bookie recovery.
-   * 
-   * @return BookieClient for the BookKeeper instance.
-   */
-  public BookieClient getBookieClient() {
-      return bookieClient;
-  }
-  
-  /**
-   * Creates a new ledger asynchronously. To create a ledger, we need to specify
-   * the ensemble size, the quorum size, the digest type, a password, a callback
-   * implementation, and an optional control object. The ensemble size is how
-   * many bookies the entries should be striped among and the quorum size is the
-   * degree of replication of each entry. The digest type is either a MAC or a
-   * CRC. Note that the CRC option is not able to protect a client against a
-   * bookie that replaces an entry. The password is used not only to
-   * authenticate access to a ledger, but also to verify entries in ledgers.
-   * 
-   * @param ensSize
-   *          ensemble size
-   * @param qSize
-   *          quorum size
-   * @param digestType
-   *          digest type, either MAC or CRC32
-   * @param passwd
-   *          password
-   * @param cb
-   *          createCallback implementation
-   * @param ctx
-   *          optional control object
-   */
-  public void asyncCreateLedger(int ensSize, int qSize, DigestType digestType,
-      byte[] passwd, CreateCallback cb, Object ctx) {
-
-    new LedgerCreateOp(this, ensSize, qSize, digestType, passwd, cb, ctx)
-        .initiate();
-
-  }
-
-  /**
-   * Create callback implementation for synchronous create call.
-   * 
-   * @param rc
-   *          return code
-   * @param lh
-   *          ledger handle object
-   * @param ctx
-   *          optional control object
-   */
-  public void createComplete(int rc, LedgerHandle lh, Object ctx) {
-    SyncCounter counter = (SyncCounter) ctx;
-    counter.setLh(lh);
-    counter.setrc(rc);
-    counter.dec();
-  }
-
-  /**
-   * Creates a new ledger. Default of 3 servers, and quorum of 2 servers.
-   * 
-   * @param digestType
-   *          digest type, either MAC or CRC32
-   * @param passwd
-   *          password
-   * @return
-   * @throws KeeperException
-   * @throws InterruptedException
-   * @throws BKException
-   */
-  public LedgerHandle createLedger(DigestType digestType, byte passwd[])
-      throws KeeperException, BKException, InterruptedException, IOException {
-    return createLedger(3, 2, digestType, passwd);
-  }
-
-  /**
-   * Synchronous call to create ledger. Parameters match those of
-   * {@link #asyncCreateLedger(int, int, DigestType, byte[], CreateCallback, Object)}
-   * 
-   * @param ensSize
-   * @param qSize
-   * @param digestType
-   * @param passwd
-   * @return
-   * @throws KeeperException
-   * @throws InterruptedException
-   * @throws IOException
-   * @throws BKException
-   */
-  public LedgerHandle createLedger(int ensSize, int qSize,
-      DigestType digestType, byte passwd[]) throws KeeperException,
-      InterruptedException, IOException, BKException {
-    SyncCounter counter = new SyncCounter();
-    counter.inc();
-    /*
-     * Calls asynchronous version
-     */
-    asyncCreateLedger(ensSize, qSize, digestType, passwd, this, counter);
-
-    /*
-     * Wait
-     */
-    counter.block(0);
-    if (counter.getLh() == null) {
-      LOG.error("ZooKeeper error: " + counter.getrc());
-      throw BKException.create(Code.ZKException);
-    }
-
-    return counter.getLh();
-  }
-
-  /**
-   * Open existing ledger asynchronously for reading.
-   * 
-   * @param lId
-   *          ledger identifier
-   * @param digestType
-   *          digest type, either MAC or CRC32
-   * @param passwd
-   *          password
-   * @param ctx
-   *          optional control object
-   */
-  public void asyncOpenLedger(long lId, DigestType digestType, byte passwd[],
-      OpenCallback cb, Object ctx) {
-
-    new LedgerOpenOp(this, lId, digestType, passwd, cb, ctx).initiate();
-
-  }
-
-  /**
-   * Callback method for synchronous open operation
-   * 
-   * @param rc
-   *          return code
-   * @param lh
-   *          ledger handle
-   * @param ctx
-   *          optional control object
-   */
-  public void openComplete(int rc, LedgerHandle lh, Object ctx) {
-    SyncCounter counter = (SyncCounter) ctx;
-    counter.setLh(lh);
-
-    LOG.debug("Open complete: " + rc);
-
-    counter.setrc(rc);
-    counter.dec();
-  }
-
-  /**
-   * Synchronous open ledger call
-   * 
-   * @param lId
-   *          ledger identifier
-   * @param digestType
-   *          digest type, either MAC or CRC32
-   * @param passwd
-   *          password
-   * @return
-   * @throws InterruptedException
-   * @throws BKException
-   */
-
-  public LedgerHandle openLedger(long lId, DigestType digestType, byte passwd[])
-      throws BKException, InterruptedException {
-    SyncCounter counter = new SyncCounter();
-    counter.inc();
-
-    /*
-     * Calls async open ledger
-     */
-    asyncOpenLedger(lId, digestType, passwd, this, counter);
-
-    /*
-     * Wait
-     */
-    counter.block(0);
-    if (counter.getrc() != BKException.Code.OK)
-      throw BKException.create(counter.getrc());
-
-    return counter.getLh();
-  }
-
-  /**
-   * Deletes a ledger asynchronously.
-   * 
-   * @param lId
-   *            ledger Id
-   * @param cb
-   *            deleteCallback implementation
-   * @param ctx
-   *            optional control object
-   */
-  public void asyncDeleteLedger(long lId, DeleteCallback cb, Object ctx) {
-      new LedgerDeleteOp(this, lId, cb, ctx).initiate();
-  }
-  
-  /**
-   * Delete callback implementation for synchronous delete call.
-   * 
-   * @param rc
-   *            return code
-   * @param ctx
-   *            optional control object
-   */
-  public void deleteComplete(int rc, Object ctx) {
-      SyncCounter counter = (SyncCounter) ctx;
-      counter.setrc(rc);
-      counter.dec();
-  }
-
-  /**
-   * Synchronous call to delete a ledger. Parameters match those of
-   * {@link #asyncDeleteLedger(long, DeleteCallback, Object)}
-   * 
-   * @param lId
-   *            ledgerId
-   * @throws InterruptedException
-   * @throws BKException
-   */
-  public void deleteLedger(long lId) throws InterruptedException, BKException {
-      SyncCounter counter = new SyncCounter();
-      counter.inc();
-      // Call asynchronous version
-      asyncDeleteLedger(lId, this, counter);
-      // Wait
-      counter.block(0);
-      if (counter.getrc() != KeeperException.Code.OK.intValue()) { 
-          LOG.error("ZooKeeper error deleting ledger node: " + counter.getrc());
-          throw BKException.create(Code.ZKException);
-      }
-  }
-  
-  /**
-   * Shuts down client.
-   * 
-   */
-  public void halt() throws InterruptedException {
-    bookieClient.close();
-    bookieWatcher.halt();
-    if (ownChannelFactory) {
-      channelFactory.releaseExternalResources();
-    }
-    if (ownZKHandle) {
-      zk.close();
-    }
-    callbackWorker.shutdown();
-    mainWorkerPool.shutdown();
-  }
-}

+ 0 - 204
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/BookieWatcher.java

@@ -1,204 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
-import org.apache.bookkeeper.util.SafeRunnable;
-import org.apache.bookkeeper.util.StringUtils;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.AsyncCallback.ChildrenCallback;
-import org.apache.zookeeper.KeeperException.Code;
-
-/**
- * This class is responsible for maintaining a consistent view of what bookies
- * are available by reading Zookeeper (and setting watches on the bookie nodes).
- * When a bookie fails, the other parts of the code turn to this class to find a
- * replacement
- * 
- */
-class BookieWatcher implements Watcher, ChildrenCallback {
-    static final Logger logger = Logger.getLogger(BookieWatcher.class);
-    
-    public static final String BOOKIE_REGISTRATION_PATH = "/ledgers/available";
-    static final Set<InetSocketAddress> EMPTY_SET = new HashSet<InetSocketAddress>();
-    public static int ZK_CONNECT_BACKOFF_SEC = 1;
-
-    BookKeeper bk;
-    ScheduledExecutorService scheduler;
-
-    Set<InetSocketAddress> knownBookies = new HashSet<InetSocketAddress>();
-
-    SafeRunnable reReadTask = new SafeRunnable() {
-        @Override
-        public void safeRun() {
-            readBookies();
-        }
-    };
-
-    public BookieWatcher(BookKeeper bk) {
-        this.bk = bk;
-        this.scheduler = Executors.newSingleThreadScheduledExecutor();
-    }
-    
-    public void halt(){
-        scheduler.shutdown();
-    }
-
-    public void readBookies() {
-        readBookies(this);
-    }
-
-    public void readBookies(ChildrenCallback callback) {
-        bk.getZkHandle().getChildren( BOOKIE_REGISTRATION_PATH, this, callback, null);
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-        readBookies();
-    }
-
-    @Override
-    public void processResult(int rc, String path, Object ctx, List<String> children) {
-
-        if (rc != KeeperException.Code.OK.intValue()) {
-            //logger.error("Error while reading bookies", KeeperException.create(Code.get(rc), path));
-            // try the read after a second again
-            scheduler.schedule(reReadTask, ZK_CONNECT_BACKOFF_SEC, TimeUnit.SECONDS);
-            return;
-        }
-
-        // Read the bookie addresses into a set for efficient lookup
-        Set<InetSocketAddress> newBookieAddrs = new HashSet<InetSocketAddress>();
-        for (String bookieAddrString : children) {
-            InetSocketAddress bookieAddr;
-            try {
-                bookieAddr = StringUtils.parseAddr(bookieAddrString);
-            } catch (IOException e) {
-                logger.error("Could not parse bookie address: " + bookieAddrString + ", ignoring this bookie");
-                continue;
-            }
-            newBookieAddrs.add(bookieAddr);
-        }
-
-        synchronized (this) {
-            knownBookies = newBookieAddrs;
-        }
-    }
-
-    /**
-     * Blocks until bookies are read from zookeeper, used in the {@link BookKeeper} constructor.
-     * @throws InterruptedException
-     * @throws KeeperException
-     */
-    public void readBookiesBlocking() throws InterruptedException, KeeperException {
-        final LinkedBlockingQueue<Integer> queue = new LinkedBlockingQueue<Integer>();
-        readBookies(new ChildrenCallback() {
-            public void processResult(int rc, String path, Object ctx, List<String> children) {
-                try {
-                    BookieWatcher.this.processResult(rc, path, ctx, children);
-                    queue.put(rc);
-                } catch (InterruptedException e) {
-                    logger.error("Interruped when trying to read bookies in a blocking fashion");
-                    throw new RuntimeException(e);
-                }
-            }
-        });
-        int rc = queue.take();
-
-        if (rc != KeeperException.Code.OK.intValue()) {
-            throw KeeperException.create(Code.get(rc));
-        }
-    }
-
-    /**
-     * Wrapper over the {@link #getAdditionalBookies(Set, int)} method when there is no exclusion list (or exisiting bookies)
-     * @param numBookiesNeeded
-     * @return
-     * @throws BKNotEnoughBookiesException
-     */
-    public ArrayList<InetSocketAddress> getNewBookies(int numBookiesNeeded) throws BKNotEnoughBookiesException {
-        return getAdditionalBookies(EMPTY_SET, numBookiesNeeded);
-    }
-
-    /**
-     * Wrapper over the {@link #getAdditionalBookies(Set, int)} method when you just need 1 extra bookie
-     * @param existingBookies
-     * @return
-     * @throws BKNotEnoughBookiesException
-     */
-    public InetSocketAddress getAdditionalBookie(List<InetSocketAddress> existingBookies)
-            throws BKNotEnoughBookiesException {
-        return getAdditionalBookies(new HashSet<InetSocketAddress>(existingBookies), 1).get(0);
-    }
-
-    /**
-     * Returns additional bookies given an exclusion list and how many are needed
-     * @param existingBookies
-     * @param numAdditionalBookiesNeeded
-     * @return
-     * @throws BKNotEnoughBookiesException
-     */
-    public ArrayList<InetSocketAddress> getAdditionalBookies(Set<InetSocketAddress> existingBookies,
-            int numAdditionalBookiesNeeded) throws BKNotEnoughBookiesException {
-
-        ArrayList<InetSocketAddress> newBookies = new ArrayList<InetSocketAddress>();
-
-        if (numAdditionalBookiesNeeded <= 0) {
-            return newBookies;
-        }
-
-        List<InetSocketAddress> allBookies;
-
-        synchronized (this) {
-            allBookies = new ArrayList<InetSocketAddress>(knownBookies);
-        }
-
-        Collections.shuffle(allBookies);
-
-        for (InetSocketAddress bookie : allBookies) {
-            if (existingBookies.contains(bookie)) {
-                continue;
-            }
-
-            newBookies.add(bookie);
-            numAdditionalBookiesNeeded--;
-
-            if (numAdditionalBookiesNeeded == 0) {
-                return newBookies;
-            }
-        }
-
-        throw new BKNotEnoughBookiesException();
-    }
-
-}

+ 0 - 50
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/CRC32DigestManager.java

@@ -1,50 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-
-import java.nio.ByteBuffer;
-import java.util.zip.CRC32;
-
-class CRC32DigestManager extends DigestManager {
-    CRC32 crc = new CRC32();
-    
-    public CRC32DigestManager(long ledgerId) {
-        super(ledgerId);
-    }
-
-    @Override
-    int getMacCodeLength() {
-        return 8;
-    }
-    
-    @Override
-    byte[] getValueAndReset() {
-        byte[] value = new byte[8];
-        ByteBuffer buf = ByteBuffer.wrap(value);
-        buf.putLong(crc.getValue());
-        crc.reset();
-        return value;
-    }
-    
-    @Override
-    void update(byte[] data, int offset, int length) {
-        crc.update(data, offset, length);
-    }
-}

+ 0 - 184
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/DigestManager.java

@@ -1,184 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.nio.ByteBuffer;
-import java.security.GeneralSecurityException;
-
-import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBufferInputStream;
-import org.jboss.netty.buffer.ChannelBuffers;
-
-/**
- * This class takes an entry, attaches a digest to it and packages it with relevant
- * data so that it can be shipped to the bookie. On the return side, it also
- * gets a packet, checks that the digest matches, and extracts the original entry
- * for the packet. Currently 2 types of digests are supported: MAC (based on SHA-1) and CRC32
- */
-
-public abstract class DigestManager {
-    static final Logger logger = Logger.getLogger(DigestManager.class);
-    
-    static final int METADATA_LENGTH = 32;
-    
-    long ledgerId;
-    
-    abstract int getMacCodeLength();
-    
-    void update(byte[] data){
-        update(data, 0, data.length);
-    }
-    
-    abstract void update(byte[] data, int offset, int length);
-    abstract byte[] getValueAndReset();
-    
-    final int macCodeLength;
-
-    public DigestManager(long ledgerId) {
-        this.ledgerId = ledgerId;
-        macCodeLength = getMacCodeLength();
-    }
-    
-    static DigestManager instantiate(long ledgerId, byte[] passwd, DigestType digestType) throws GeneralSecurityException{
-        switch(digestType){
-        case MAC:
-            return new MacDigestManager(ledgerId, passwd);
-        case CRC32:
-            return new CRC32DigestManager(ledgerId);
-        default:
-            throw new GeneralSecurityException("Unknown checksum type: " + digestType);
-        }
-    }
-
-    /**
-     * Computes the digest for an entry and put bytes together for sending.
-     *  
-     * @param entryId
-     * @param lastAddConfirmed
-     * @param length
-     * @param data
-     * @return
-     */
-    
-    public ChannelBuffer computeDigestAndPackageForSending(long entryId, long lastAddConfirmed, long length, byte[] data) {
-
-        byte[] bufferArray = new byte[METADATA_LENGTH + macCodeLength];
-        ByteBuffer buffer = ByteBuffer.wrap(bufferArray);
-        buffer.putLong(ledgerId);
-        buffer.putLong(entryId);
-        buffer.putLong(lastAddConfirmed);
-        buffer.putLong(length);
-        buffer.flip();
-
-        update(buffer.array(), 0, METADATA_LENGTH);
-        update(data);
-        byte[] digest = getValueAndReset();
-
-        buffer.limit(buffer.capacity());
-        buffer.position(METADATA_LENGTH);
-        buffer.put(digest);
-        buffer.flip();
-
-        return ChannelBuffers.wrappedBuffer(ChannelBuffers.wrappedBuffer(buffer), ChannelBuffers.wrappedBuffer(data));
-    }
-
-    private void verifyDigest(ChannelBuffer dataReceived) throws BKDigestMatchException {
-        verifyDigest(-1, dataReceived, true);
-    }
-
-    private void verifyDigest(long entryId, ChannelBuffer dataReceived) throws BKDigestMatchException {
-        verifyDigest(entryId, dataReceived, false);
-    }
-
-    private void verifyDigest(long entryId, ChannelBuffer dataReceived, boolean skipEntryIdCheck)
-            throws BKDigestMatchException {
-
-        ByteBuffer dataReceivedBuffer = dataReceived.toByteBuffer();
-        byte[] digest;
-
-        update(dataReceivedBuffer.array(), dataReceivedBuffer.position(), METADATA_LENGTH);
-
-        int offset = METADATA_LENGTH + macCodeLength;
-        update(dataReceivedBuffer.array(), dataReceivedBuffer.position() + offset, dataReceived.readableBytes() - offset);
-        digest = getValueAndReset();
-
-        for (int i = 0; i < digest.length; i++) {
-            if (digest[i] != dataReceived.getByte(METADATA_LENGTH + i)) {
-                logger.error("Mac mismatch for ledger-id: " + ledgerId + ", entry-id: " + entryId);
-                throw new BKDigestMatchException();
-            }
-        }
-
-        long actualLedgerId = dataReceived.readLong();
-        long actualEntryId = dataReceived.readLong();
-
-        if (actualLedgerId != ledgerId) {
-            logger.error("Ledger-id mismatch in authenticated message, expected: " + ledgerId + " , actual: "
-                    + actualLedgerId);
-            throw new BKDigestMatchException();
-        }
-
-        if (!skipEntryIdCheck && actualEntryId != entryId) {
-            logger.error("Entry-id mismatch in authenticated message, expected: " + entryId + " , actual: "
-                    + actualEntryId);
-            throw new BKDigestMatchException();
-        }
-
-    }
-    
-    /**
-     * Verify that the digest matches and returns the data in the entry.
-     * 
-     * @param entryId
-     * @param dataReceived
-     * @return
-     * @throws BKDigestMatchException
-     */
-    ChannelBufferInputStream verifyDigestAndReturnData(long entryId, ChannelBuffer dataReceived)
-            throws BKDigestMatchException {
-        verifyDigest(entryId, dataReceived);
-        dataReceived.readerIndex(METADATA_LENGTH + macCodeLength);
-        return new ChannelBufferInputStream(dataReceived);
-    }
-
-    static class RecoveryData {
-        long lastAddConfirmed;
-        long entryId;
-
-        public RecoveryData(long lastAddConfirmed, long entryId) {
-            this.lastAddConfirmed = lastAddConfirmed;
-            this.entryId = entryId;
-        }
-
-    }
-
-    RecoveryData verifyDigestAndReturnLastConfirmed(ChannelBuffer dataReceived) throws BKDigestMatchException {
-        verifyDigest(dataReceived);
-        dataReceived.readerIndex(8);
-
-        long entryId = dataReceived.readLong();
-        long lastAddConfirmed = dataReceived.readLong();
-        long length = dataReceived.readLong();
-        return new RecoveryData(lastAddConfirmed, entryId);
-
-    }
-}

+ 0 - 61
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/DistributionSchedule.java

@@ -1,61 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This interface determins how entries are distributed among bookies.
- * 
- * Every entry gets replicated to some number of replicas. The first replica for
- * an entry is given a replicaIndex of 0, and so on. To distribute write load,
- * not all entries go to all bookies. Given an entry-id and replica index, an
- * {@link DistributionSchedule} determines which bookie that replica should go
- * to.
- */
-
-public interface DistributionSchedule {
-
-    /**
-     * 
-     * @param entryId
-     * @param replicaIndex
-     * @return index of bookie that should get this replica
-     */
-    public int getBookieIndex(long entryId, int replicaIndex);
-
-    /**
-     * 
-     * @param entryId
-     * @param bookieIndex
-     * @return -1 if the given bookie index is not a replica for the given
-     *         entryId
-     */
-    public int getReplicaIndex(long entryId, int bookieIndex);
-
-    /**
-     * Specifies whether its ok to proceed with recovery given that we have
-     * heard back from the given bookie index. These calls will be a made in a
-     * sequence and an implementation of this interface should accumulate
-     * history about which bookie indexes we have heard from. Once this method
-     * has returned true, it wont be called again on the same instance
-     * 
-     * @param bookieIndexHeardFrom
-     * @return true if its ok to proceed with recovery
-     */
-    public boolean canProceedWithRecovery(int bookieIndexHeardFrom);
-}

+ 0 - 167
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerCreateOp.java

@@ -1,167 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.client;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.GeneralSecurityException;
-import java.util.ArrayList;
-import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
-import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.util.StringUtils;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.AsyncCallback.StatCallback;
-import org.apache.zookeeper.AsyncCallback.StringCallback;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * Encapsulates asynchronous ledger create operation
- * 
- */
-class LedgerCreateOp implements StringCallback, StatCallback {
-
-    static final Logger LOG = Logger.getLogger(LedgerCreateOp.class);
-
-    CreateCallback cb;
-    LedgerMetadata metadata;
-    LedgerHandle lh;
-    Object ctx;
-    byte[] passwd;
-    BookKeeper bk;
-    DigestType digestType;
-
-   /**
-    * Constructor
-    * 
-    * @param bk
-    *       BookKeeper object
-    * @param ensembleSize
-    *       ensemble size
-    * @param quorumSize
-    *       quorum size
-    * @param digestType
-    *       digest type, either MAC or CRC32
-    * @param passwd
-    *       passowrd
-    * @param cb
-    *       callback implementation
-    * @param ctx
-    *       optional control object
-    */
-
-    LedgerCreateOp(BookKeeper bk, int ensembleSize, int quorumSize, DigestType digestType, byte[] passwd, CreateCallback cb, Object ctx) {
-        this.bk = bk;
-        this.metadata = new LedgerMetadata(ensembleSize, quorumSize);
-        this.digestType = digestType;
-        this.passwd = passwd;
-        this.cb = cb;
-        this.ctx = ctx;
-    }
-
-    /**
-     * Initiates the operation
-     */
-    public void initiate() {
-        /*
-         * Create ledger node on ZK. We get the id from the sequence number on
-         * the node.
-         */
-
-        bk.getZkHandle().create(StringUtils.prefix, new byte[0], Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT_SEQUENTIAL, this, null);
-
-        // calls the children callback method below
-    }
-
-
-    /**
-     * Implements ZooKeeper string callback.
-     * 
-     * @see org.apache.zookeeper.AsyncCallback.StringCallback#processResult(int, java.lang.String, java.lang.Object, java.lang.String)
-     */
-    public void processResult(int rc, String path, Object ctx, String name) {
-
-        if (rc != KeeperException.Code.OK.intValue()) {
-            LOG.error("Could not create node for ledger", KeeperException.create(KeeperException.Code.get(rc), path));
-            cb.createComplete(BKException.Code.ZKException, null, this.ctx);
-            return;
-        }
-
-        /*
-         * Extract ledger id.
-         */
-        long ledgerId;
-        try {
-            ledgerId = StringUtils.getLedgerId(name);
-        } catch (IOException e) {
-            LOG.error("Could not extract ledger-id from path:" + path, e);
-            cb.createComplete(BKException.Code.ZKException, null, this.ctx);
-            return;
-        }
-
-        /*
-         * Adding bookies to ledger handle
-         */
-
-        ArrayList<InetSocketAddress> ensemble;
-        try {
-            ensemble = bk.bookieWatcher.getNewBookies(metadata.ensembleSize);
-        } catch (BKNotEnoughBookiesException e) {
-            LOG.error("Not enough bookies to create ledger" + ledgerId);
-            cb.createComplete(e.getCode(), null, this.ctx);
-            return;
-        }
-
-        /*
-         * Add ensemble to the configuration
-         */
-        metadata.addEnsemble(new Long(0), ensemble);
-        try {
-            lh = new LedgerHandle(bk, ledgerId, metadata, digestType, passwd);
-        } catch (GeneralSecurityException e) {
-            LOG.error("Security exception while creating ledger: " + ledgerId, e);
-            cb.createComplete(BKException.Code.DigestNotInitializedException, null, this.ctx);
-            return;
-        } catch (NumberFormatException e) {
-            LOG.error("Incorrectly entered parameter throttle: " + System.getProperty("throttle"), e);
-            cb.createComplete(BKException.Code.IncorrectParameterException, null, this.ctx);
-            return;
-        }
-
-        lh.writeLedgerConfig(this, null);
-
-    }
-
-    /**
-     * Implements ZooKeeper stat callback.
-     * 
-     * @see org.apache.zookeeper.AsyncCallback.StatCallback#processResult(int, String, Object, Stat)
-     */
-    public void processResult(int rc, String path, Object ctx, Stat stat) {
-        cb.createComplete(rc, lh, this.ctx);
-    }
-
-}

+ 0 - 80
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerDeleteOp.java

@@ -1,80 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.client;
-
-import org.apache.bookkeeper.client.AsyncCallback.DeleteCallback;
-import org.apache.bookkeeper.util.StringUtils;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.AsyncCallback.VoidCallback;
-
-/**
- * Encapsulates asynchronous ledger delete operation
- * 
- */
-class LedgerDeleteOp implements VoidCallback {
-
-    static final Logger LOG = Logger.getLogger(LedgerDeleteOp.class);
-
-    BookKeeper bk;
-    long ledgerId;
-    DeleteCallback cb;
-    Object ctx;
-
-    /**
-     * Constructor
-     * 
-     * @param bk
-     *            BookKeeper object
-     * @param ledgerId
-     *            ledger Id
-     * @param cb
-     *            callback implementation
-     * @param ctx
-     *            optional control object
-     */
-    LedgerDeleteOp(BookKeeper bk, long ledgerId, DeleteCallback cb, Object ctx) {
-        this.bk = bk;
-        this.ledgerId = ledgerId;
-        this.cb = cb;
-        this.ctx = ctx;
-    }
-
-    /**
-     * Initiates the operation
-     */
-    public void initiate() {
-        // Asynchronously delete the ledger node in ZK.
-        // When this completes, it will invoke the callback method below.
-        bk.getZkHandle().delete(StringUtils.getLedgerNodePath(ledgerId), -1, this, null);
-    }
-
-    /**
-     * Implements ZooKeeper Void Callback.
-     * 
-     * @see org.apache.zookeeper.AsyncCallback.VoidCallback#processResult(int,
-     *      java.lang.String, java.lang.Object)
-     */
-    public void processResult(int rc, String path, Object ctx) {
-        cb.deleteComplete(rc, this.ctx);
-    }
-
-}

+ 0 - 83
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerEntry.java

@@ -1,83 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBufferInputStream;
-
-/**
- * Ledger entry. Its a simple tuple containing the ledger id, the entry-id, and
- * the entry content.
- * 
- */
-
-public class LedgerEntry {
-  Logger LOG = Logger.getLogger(LedgerEntry.class);
-
-  long ledgerId;
-  long entryId;
-  long length;
-  ChannelBufferInputStream entryDataStream;
-
-  int nextReplicaIndexToReadFrom = 0;
-
-  LedgerEntry(long lId, long eId) {
-    this.ledgerId = lId;
-    this.entryId = eId;
-  }
-
-  public long getLedgerId() {
-    return ledgerId;
-  }
-
-  public long getEntryId() {
-    return entryId;
-  }
-  
-  public long getLength() {
-      return length;
-  }
-
-  public byte[] getEntry() {
-    try {
-      // In general, you can't rely on the available() method of an input
-      // stream, but ChannelBufferInputStream is backed by a byte[] so it
-      // accurately knows the # bytes available
-      byte[] ret = new byte[entryDataStream.available()];
-      entryDataStream.readFully(ret);
-      return ret;
-    } catch (IOException e) {
-      // The channelbufferinput stream doesnt really throw the
-      // ioexceptions, it just has to be in the signature because
-      // InputStream says so. Hence this code, should never be reached.
-      LOG.fatal("Unexpected IOException while reading from channel buffer", e);
-      return new byte[0];
-    }
-  }
-
-  public InputStream getEntryInputStream() {
-    return entryDataStream;
-  }
-}

+ 0 - 547
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerHandle.java

@@ -1,547 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.net.InetSocketAddress;
-import java.security.GeneralSecurityException;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.Queue;
-import java.util.concurrent.Semaphore;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.client.AsyncCallback.CloseCallback;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.client.LedgerMetadata;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
-import org.apache.bookkeeper.util.SafeRunnable;
-import org.apache.bookkeeper.util.StringUtils;
-
-import org.apache.log4j.Logger;
-
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.AsyncCallback.StatCallback;
-import org.apache.zookeeper.data.Stat;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * Ledger handle contains ledger metadata and is used to access the read and
- * write operations to a ledger.
- */
-public class LedgerHandle implements ReadCallback, AddCallback, CloseCallback {
-  final static Logger LOG = Logger.getLogger(LedgerHandle.class);
-
-  final byte[] ledgerKey;
-  final LedgerMetadata metadata;
-  final BookKeeper bk;
-  final long ledgerId;
-  long lastAddPushed;
-  long lastAddConfirmed;
-  long length;
-  final DigestManager macManager;
-  final DistributionSchedule distributionSchedule;
-
-  final Semaphore opCounterSem;
-  private Integer throttling = 5000;
-  
-  final Queue<PendingAddOp> pendingAddOps = new ArrayDeque<PendingAddOp>();
-  
-  LedgerHandle(BookKeeper bk, long ledgerId, LedgerMetadata metadata,
-      DigestType digestType, byte[] password)
-      throws GeneralSecurityException, NumberFormatException {
-    this.bk = bk;
-    this.metadata = metadata;
-    if (metadata.isClosed()) {
-      lastAddConfirmed = lastAddPushed = metadata.close;
-      length = metadata.length;
-    } else {
-      lastAddConfirmed = lastAddPushed = -1;
-      length = 0;
-    }
-    
-    this.ledgerId = ledgerId;
-    
-    String throttleValue = System.getProperty("throttle");
-    if(throttleValue != null){
-        this.throttling = new Integer(throttleValue); 
-    }
-    this.opCounterSem = new Semaphore(throttling);
-    
-    macManager = DigestManager.instantiate(ledgerId, password, digestType);
-    this.ledgerKey = MacDigestManager.genDigest("ledger", password);
-    distributionSchedule = new RoundRobinDistributionSchedule(
-        metadata.quorumSize, metadata.ensembleSize);
-  }
-  
-  /**
-   * Get the id of the current ledger
-   * 
-   * @return
-   */
-  public long getId() {
-    return ledgerId;
-  }
-
-  /**
-   * Get the last confirmed entry id on this ledger
-   * 
-   * @return
-   */
-  public long getLastAddConfirmed() {
-    return lastAddConfirmed;
-  }
-
-  /**
-   * Get the entry id of the last entry that has been enqueued for addition (but
-   * may not have possibly been persited to the ledger)
-   * 
-   * @return
-   */
-  public long getLastAddPushed() {
-    return lastAddPushed;
-  }
-
-  /**
-   * Get the Ledger's key/password.
-   * 
-   * @return byte array for the ledger's key/password.
-   */
-  public byte[] getLedgerKey() {
-      return ledgerKey;
-  }
-  
-  /**
-   * Get the LedgerMetadata
-   * 
-   * @return LedgerMetadata for the LedgerHandle
-   */
-  public LedgerMetadata getLedgerMetadata() {
-      return metadata;
-  }
-  
-  /**
-   * Get the DigestManager
-   * 
-   * @return DigestManager for the LedgerHandle
-   */
-  public DigestManager getDigestManager() {
-      return macManager;
-  }
-  
-  /**
-   * Return total number of available slots.
-   * 
-   * @return int    available slots
-   */
-  Semaphore getAvailablePermits(){
-      return this.opCounterSem;
-  }
-  
-  /**
-   *  Add to the length of the ledger in bytes.
-   *  
-   * @param delta
-   * @return
-   */
-  long addToLength(long delta){
-      this.length += delta;
-      return this.length;
-  }
-  
-  /**
-   * Returns the length of the ledger in bytes. 
-   * 
-   * @return
-   */
-  public long getLength(){
-      return this.length;
-  }
-  
-  /**
-   * Get the Distribution Schedule
-   * 
-   * @return DistributionSchedule for the LedgerHandle
-   */
-  public DistributionSchedule getDistributionSchedule() {
-      return distributionSchedule;
-  }
-  
-  public void writeLedgerConfig(StatCallback callback, Object ctx) {
-    bk.getZkHandle().setData(StringUtils.getLedgerNodePath(ledgerId),
-        metadata.serialize(), -1, callback, ctx);
-  }
-
-  /**
-   * Close this ledger synchronously.
-   * 
-   */
-  public void close() throws InterruptedException {
-    SyncCounter counter = new SyncCounter();
-    counter.inc();
-
-    asyncClose(this, counter);
-
-    counter.block(0);
-  }
-
-  /**
-   * Asynchronous close, any adds in flight will return errors
-   * 
-   * @param cb
-   *          callback implementation
-   * @param ctx
-   *          control object
-   * @throws InterruptedException
-   */
-  public void asyncClose(CloseCallback cb, Object ctx) {
-    asyncClose(cb, ctx, BKException.Code.LedgerClosedException);
-  }
-
-  /**
-   * Same as public version of asynClose except that this one takes an
-   * additional parameter which is the return code to hand to all the pending
-   * add ops
-   * 
-   * @param cb
-   * @param ctx
-   * @param rc
-   */
-  private void asyncClose(final CloseCallback cb, final Object ctx, final int rc) {
-
-    bk.mainWorkerPool.submitOrdered(ledgerId, new SafeRunnable() {
-
-      @Override
-      public void safeRun() {
-        metadata.length = length;
-        // Close operation is idempotent, so no need to check if we are
-        // already closed
-        metadata.close(lastAddConfirmed);
-        errorOutPendingAdds(rc);
-        lastAddPushed = lastAddConfirmed;
-
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Closing ledger: " + ledgerId + " at entryId: "
-              + metadata.close + " with this many bytes: " + metadata.length);
-        }
-
-        writeLedgerConfig(new StatCallback() {
-          @Override
-          public void processResult(int rc, String path, Object subctx,
-              Stat stat) {
-            if (rc != KeeperException.Code.OK.intValue()) {
-              cb.closeComplete(BKException.Code.ZKException, LedgerHandle.this,
-                  ctx);
-            } else {
-              cb.closeComplete(BKException.Code.OK, LedgerHandle.this, ctx);
-            }
-          }
-        }, null);
-
-      }
-    });
-  }
-
-  /**
-   * Read a sequence of entries synchronously.
-   * 
-   * @param firstEntry
-   *          id of first entry of sequence (included)
-   * @param lastEntry
-   *          id of last entry of sequence (included)
-   * 
-   */
-  public Enumeration<LedgerEntry> readEntries(long firstEntry, long lastEntry)
-      throws InterruptedException, BKException {
-    SyncCounter counter = new SyncCounter();
-    counter.inc();
-
-    asyncReadEntries(firstEntry, lastEntry, this, counter);
-
-    counter.block(0);
-    if (counter.getrc() != BKException.Code.OK) {
-      throw BKException.create(counter.getrc());
-    }
-
-    return counter.getSequence();
-  }
-
-  /**
-   * Read a sequence of entries asynchronously.
-   * 
-   * @param firstEntry
-   *          id of first entry of sequence
-   * @param lastEntry
-   *          id of last entry of sequence
-   * @param cb
-   *          object implementing read callback interface
-   * @param ctx
-   *          control object
-   */
-  public void asyncReadEntries(long firstEntry, long lastEntry,
-      ReadCallback cb, Object ctx) {
-    // Little sanity check
-    if (firstEntry < 0 || lastEntry > lastAddConfirmed
-        || firstEntry > lastEntry) {
-      cb.readComplete(BKException.Code.ReadException, this, null, ctx);
-      return;
-    }
-
-    try{
-        new PendingReadOp(this, firstEntry, lastEntry, cb, ctx).initiate();
-  
-    } catch (InterruptedException e) {
-        cb.readComplete(BKException.Code.InterruptedException, this, null, ctx);
-    }
-  }
-
-  /**
-   * Add entry synchronously to an open ledger.
-   * 
-   * @param data
-   *         array of bytes to be written to the ledger
-   */
-
-  public long addEntry(byte[] data) throws InterruptedException, BKException {
-    LOG.debug("Adding entry " + data);
-    SyncCounter counter = new SyncCounter();
-    counter.inc();
-
-    asyncAddEntry(data, this, counter);
-    counter.block(0);
-
-    return counter.getrc();
-  }
-
-  /**
-   * Add entry asynchronously to an open ledger.
-   * 
-   * @param data
-   *          array of bytes to be written
-   * @param cb
-   *          object implementing callbackinterface
-   * @param ctx
-   *          some control object
-   */
-  public void asyncAddEntry(final byte[] data, final AddCallback cb,
-      final Object ctx) {
-      try{
-          opCounterSem.acquire();
-      } catch (InterruptedException e) {
-          cb.addComplete(BKException.Code.InterruptedException,
-                  LedgerHandle.this, -1, ctx);
-      }
-      
-      try{
-          bk.mainWorkerPool.submitOrdered(ledgerId, new SafeRunnable() {
-              @Override
-              public void safeRun() {
-                  if (metadata.isClosed()) {
-                      LOG.warn("Attempt to add to closed ledger: " + ledgerId);
-                      LedgerHandle.this.opCounterSem.release();
-                      cb.addComplete(BKException.Code.LedgerClosedException,
-                              LedgerHandle.this, -1, ctx);
-                      return;
-                  }
-
-                  long entryId = ++lastAddPushed;
-                  long currentLength = addToLength(data.length);
-                  PendingAddOp op = new PendingAddOp(LedgerHandle.this, cb, ctx, entryId);
-                  pendingAddOps.add(op);
-                  ChannelBuffer toSend = macManager.computeDigestAndPackageForSending(
-                          entryId, lastAddConfirmed, currentLength, data);
-                  op.initiate(toSend);
-              }
-          });
-      } catch (RuntimeException e) {
-          opCounterSem.release();
-          throw e;
-      }
-  }
-
-  // close the ledger and send fails to all the adds in the pipeline
-  void handleUnrecoverableErrorDuringAdd(int rc) {
-    asyncClose(NoopCloseCallback.instance, null, rc);
-  }
-
-  void errorOutPendingAdds(int rc) {
-    PendingAddOp pendingAddOp;
-    while ((pendingAddOp = pendingAddOps.poll()) != null) {
-      pendingAddOp.submitCallback(rc);
-    }
-  }
-
-  void sendAddSuccessCallbacks() {
-    // Start from the head of the queue and proceed while there are
-    // entries that have had all their responses come back
-    PendingAddOp pendingAddOp;
-    while ((pendingAddOp = pendingAddOps.peek()) != null) {
-      if (pendingAddOp.numResponsesPending != 0) {
-        return;
-      }
-      pendingAddOps.remove();
-      lastAddConfirmed = pendingAddOp.entryId;
-      pendingAddOp.submitCallback(BKException.Code.OK);
-    }
-
-  }
-
-  void handleBookieFailure(InetSocketAddress addr, final int bookieIndex) {
-    InetSocketAddress newBookie;
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handling failure of bookie: " + addr + " index: "
-          + bookieIndex);
-    }
-
-    try {
-      newBookie = bk.bookieWatcher
-          .getAdditionalBookie(metadata.currentEnsemble);
-    } catch (BKNotEnoughBookiesException e) {
-      LOG
-          .error("Could not get additional bookie to remake ensemble, closing ledger: "
-              + ledgerId);
-      handleUnrecoverableErrorDuringAdd(e.getCode());
-      return;
-    }
-
-    final ArrayList<InetSocketAddress> newEnsemble = new ArrayList<InetSocketAddress>(
-        metadata.currentEnsemble);
-    newEnsemble.set(bookieIndex, newBookie);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Changing ensemble from: " + metadata.currentEnsemble + " to: "
-          + newEnsemble + " for ledger: " + ledgerId + " starting at entry: "
-          + (lastAddConfirmed + 1));
-    }
-
-    metadata.addEnsemble(lastAddConfirmed + 1, newEnsemble);
-
-    writeLedgerConfig(new StatCallback() {
-      @Override
-      public void processResult(final int rc, String path, Object ctx, Stat stat) {
-
-        bk.mainWorkerPool.submitOrdered(ledgerId, new SafeRunnable() {
-          @Override
-          public void safeRun() {
-            if (rc != KeeperException.Code.OK.intValue()) {
-              LOG
-                  .error("Could not persist ledger metadata while changing ensemble to: "
-                      + newEnsemble + " , closing ledger");
-              handleUnrecoverableErrorDuringAdd(BKException.Code.ZKException);
-              return;
-            }
-
-            for (PendingAddOp pendingAddOp : pendingAddOps) {
-              pendingAddOp.unsetSuccessAndSendWriteRequest(bookieIndex);
-            }
-          }
-        });
-
-      }
-    }, null);
-
-  }
-
-  void recover(GenericCallback<Void> cb) {
-    if (metadata.isClosed()) {
-      // We are already closed, nothing to do
-      cb.operationComplete(BKException.Code.OK, null);
-      return;
-    }
-
-    new LedgerRecoveryOp(this, cb).initiate();
-  }
-
-  static class NoopCloseCallback implements CloseCallback {
-    static NoopCloseCallback instance = new NoopCloseCallback();
-
-    @Override
-    public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
-      // noop
-    }
-  }
-
-  /**
-   * Implementation of callback interface for synchronous read method.
-   * 
-   * @param rc
-   *          return code
-   * @param leder
-   *          ledger identifier
-   * @param seq
-   *          sequence of entries
-   * @param ctx
-   *          control object
-   */
-  public void readComplete(int rc, LedgerHandle lh,
-      Enumeration<LedgerEntry> seq, Object ctx) {
-
-    SyncCounter counter = (SyncCounter) ctx;
-    synchronized (counter) {
-      counter.setSequence(seq);
-      counter.setrc(rc);
-      counter.dec();
-      counter.notify();
-    }
-  }
-
-  /**
-   * Implementation of callback interface for synchronous read method.
-   * 
-   * @param rc
-   *          return code
-   * @param leder
-   *          ledger identifier
-   * @param entry
-   *          entry identifier
-   * @param ctx
-   *          control object
-   */
-  public void addComplete(int rc, LedgerHandle lh, long entry, Object ctx) {
-    SyncCounter counter = (SyncCounter) ctx;
-
-    counter.setrc(rc);
-    counter.dec();
-  }
-
-  /**
-   * Close callback method
-   * 
-   * @param rc
-   * @param lh
-   * @param ctx
-   */
-  public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
-
-    SyncCounter counter = (SyncCounter) ctx;
-    counter.setrc(rc);
-    synchronized (counter) {
-      counter.dec();
-      counter.notify();
-    }
-
-  }
-}

+ 0 - 198
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerMetadata.java

@@ -1,198 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.bookkeeper.util.StringUtils;
-import org.apache.log4j.Logger;
-
-/**
- * This class encapsulates all the ledger metadata that is persistently stored
- * in zookeeper. It provides parsing and serialization methods of such metadata.
- * 
- */
-public class LedgerMetadata {
-    static final Logger LOG = Logger.getLogger(LedgerMetadata.class);
-
-    private static final String closed = "CLOSED";
-    private static final String lSplitter = "\n";
-    private static final String tSplitter = "\t";
-
-    // can't use -1 for NOTCLOSED because that is reserved for a closed, empty
-    // ledger
-    public static final int NOTCLOSED = -101;
-    int ensembleSize;
-    int quorumSize;
-    long length;
-    long close;
-    private SortedMap<Long, ArrayList<InetSocketAddress>> ensembles = new TreeMap<Long, ArrayList<InetSocketAddress>>();
-    ArrayList<InetSocketAddress> currentEnsemble;
-
-    public LedgerMetadata(int ensembleSize, int quorumSize) {
-        this.ensembleSize = ensembleSize;
-        this.quorumSize = quorumSize;
-        
-        /*
-         * It is set in PendingReadOp.readEntryComplete, and 
-         * we read it in LedgerRecoveryOp.readComplete.
-         */
-        this.length = 0;
-        this.close = NOTCLOSED;
-    };
-
-    private LedgerMetadata() {
-        this(0, 0);
-    }
-
-    /**
-     * Get the Map of bookie ensembles for the various ledger fragments 
-     * that make up the ledger.
-     * 
-     * @return SortedMap of Ledger Fragments and the corresponding 
-     * bookie ensembles that store the entries.
-     */
-    public SortedMap<Long, ArrayList<InetSocketAddress>> getEnsembles() {
-        return ensembles;
-    }
-    
-    boolean isClosed() {
-        return close != NOTCLOSED;
-    }
-
-    void close(long entryId) {
-        close = entryId;
-    }
-    
-    void addEnsemble(long startEntryId, ArrayList<InetSocketAddress> ensemble) {
-        assert ensembles.isEmpty() || startEntryId >= ensembles.lastKey();
-
-        ensembles.put(startEntryId, ensemble);
-        currentEnsemble = ensemble;
-    }
-
-    ArrayList<InetSocketAddress> getEnsemble(long entryId) {
-        // the head map cannot be empty, since we insert an ensemble for
-        // entry-id 0, right when we start
-        return ensembles.get(ensembles.headMap(entryId + 1).lastKey());
-    }
-
-    /**
-     * the entry id > the given entry-id at which the next ensemble change takes
-     * place ( -1 if no further ensemble changes)
-     * 
-     * @param entryId
-     * @return
-     */
-    long getNextEnsembleChange(long entryId) {
-        SortedMap<Long, ArrayList<InetSocketAddress>> tailMap = ensembles.tailMap(entryId + 1);
-
-        if (tailMap.isEmpty()) {
-            return -1;
-        } else {
-            return tailMap.firstKey();
-        }
-    }
-
-    /**
-     * Generates a byte array based on a LedgerConfig object received.
-     * 
-     * @param config
-     *            LedgerConfig object
-     * @return byte[]
-     */
-    public byte[] serialize() {
-        StringBuilder s = new StringBuilder();
-        s.append(quorumSize).append(lSplitter).append(ensembleSize).append(lSplitter).append(length);
-
-        for (Map.Entry<Long, ArrayList<InetSocketAddress>> entry : ensembles.entrySet()) {
-            s.append(lSplitter).append(entry.getKey());
-            for (InetSocketAddress addr : entry.getValue()) {
-                s.append(tSplitter);
-                StringUtils.addrToString(s, addr);
-            }
-        }
-        
-        if (close != NOTCLOSED) {
-            s.append(lSplitter).append(close).append(tSplitter).append(closed);
-        }
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Serialized config: " + s.toString());
-        }
-
-        return s.toString().getBytes();
-    }
-
-    /**
-     * Parses a given byte array and transforms into a LedgerConfig object
-     * 
-     * @param array
-     *            byte array to parse
-     * @return LedgerConfig
-     * @throws IOException
-     *             if the given byte[] cannot be parsed
-     */
-
-    static LedgerMetadata parseConfig(byte[] bytes) throws IOException {
-
-        LedgerMetadata lc = new LedgerMetadata();
-        String config = new String(bytes);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Parsing Config: " + config);
-        }
-
-        String lines[] = config.split(lSplitter);
-
-        if (lines.length < 2) {
-            throw new IOException("Quorum size or ensemble size absent from config: " + config);
-        }
-
-        try {
-            lc.quorumSize = new Integer(lines[0]);
-            lc.ensembleSize = new Integer(lines[1]);
-            lc.length = new Long(lines[2]); 
-            
-            for (int i = 3; i < lines.length; i++) {
-                String parts[] = lines[i].split(tSplitter);
-
-                if (parts[1].equals(closed)) {
-                    lc.close = new Long(parts[0]);
-                    break;
-                }
-
-                ArrayList<InetSocketAddress> addrs = new ArrayList<InetSocketAddress>();
-                for (int j = 1; j < parts.length; j++) {
-                    addrs.add(StringUtils.parseAddr(parts[j]));
-                }
-                lc.addEnsemble(new Long(parts[0]), addrs);
-            }
-        } catch (NumberFormatException e) {
-            throw new IOException(e);
-        }
-        return lc;
-    }
-
-}

+ 0 - 140
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerOpenOp.java

@@ -1,140 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.client;
-
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.util.StringUtils;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.AsyncCallback.DataCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * Encapsulates the ledger open operation
- * 
- */
-class LedgerOpenOp implements DataCallback {
-    static final Logger LOG = Logger.getLogger(LedgerOpenOp.class);
-    
-    BookKeeper bk;
-    long ledgerId;
-    OpenCallback cb;
-    Object ctx;
-    LedgerHandle lh;
-    byte[] passwd;
-    DigestType digestType;
-    
-    /**
-     * Constructor.
-     * 
-     * @param bk
-     * @param ledgerId
-     * @param digestType
-     * @param passwd
-     * @param cb
-     * @param ctx
-     */
-    
-    public LedgerOpenOp(BookKeeper bk, long ledgerId, DigestType digestType, byte[] passwd, OpenCallback cb, Object ctx) {
-        this.bk = bk;
-        this.ledgerId = ledgerId;
-        this.passwd = passwd;
-        this.cb = cb;
-        this.ctx = ctx;
-        this.digestType = digestType;
-    }
-
-    /**
-     * Inititates the ledger open operation
-     */
-    public void initiate() {
-        /**
-         * Asynchronously read the ledger metadata node.
-         */
-
-        bk.getZkHandle().getData(StringUtils.getLedgerNodePath(ledgerId), false, this, ctx);
-
-    }
-
-    /**
-     * Implements ZooKeeper data callback.
-     * @see org.apache.zookeeper.AsyncCallback.DataCallback#processResult(int, String, Object, byte[], Stat)
-     */
-    public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
-
-        if (rc == KeeperException.Code.NONODE.intValue()) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("No such ledger: " + ledgerId, KeeperException.create(KeeperException.Code.get(rc), path));
-            }
-            cb.openComplete(BKException.Code.NoSuchLedgerExistsException, null, this.ctx);
-            return;
-        }
-        if (rc != KeeperException.Code.OK.intValue()) {
-            LOG.error("Could not read metadata for ledger: " + ledgerId, KeeperException.create(KeeperException.Code
-                    .get(rc), path));
-            cb.openComplete(BKException.Code.ZKException, null, this.ctx);
-            return;
-        }
-
-        LedgerMetadata metadata;
-        try {
-            metadata = LedgerMetadata.parseConfig(data);
-        } catch (IOException e) {
-            LOG.error("Could not parse ledger metadata for ledger: " + ledgerId, e);
-            cb.openComplete(BKException.Code.ZKException, null, this.ctx);
-            return;
-        }
-
-        try {
-            lh = new LedgerHandle(bk, ledgerId, metadata, digestType, passwd);
-        } catch (GeneralSecurityException e) {
-            LOG.error("Security exception while opening ledger: " + ledgerId, e);
-            cb.openComplete(BKException.Code.DigestNotInitializedException, null, this.ctx);
-            return;
-        } catch (NumberFormatException e) {
-            LOG.error("Incorrectly entered parameter throttle: " + System.getProperty("throttle"), e);
-            cb.openComplete(BKException.Code.IncorrectParameterException, null, this.ctx);
-            return;
-        }
-
-        if (metadata.close != LedgerMetadata.NOTCLOSED) {
-            // Ledger was closed properly
-            cb.openComplete(BKException.Code.OK, lh, this.ctx);
-            return;
-        }
-
-        lh.recover(new GenericCallback<Void>() {
-            @Override
-            public void operationComplete(int rc, Void result) {
-                if (rc != BKException.Code.OK) {
-                    cb.openComplete(BKException.Code.LedgerRecoveryException, null, LedgerOpenOp.this.ctx);
-                } else {
-                    cb.openComplete(BKException.Code.OK, lh, LedgerOpenOp.this.ctx);
-                }
-            }
-        });
-    }
-}

+ 0 - 178
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/LedgerRecoveryOp.java

@@ -1,178 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Enumeration;
-
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
-import org.apache.bookkeeper.client.LedgerHandle.NoopCloseCallback;
-import org.apache.bookkeeper.client.DigestManager.RecoveryData;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * This class encapsulated the ledger recovery operation. It first does a read
- * with entry-id of -1 to all bookies. Then starting from the last confirmed
- * entry (from hints in the ledger entries), it reads forward until it is not
- * able to find a particular entry. It closes the ledger at that entry.
- * 
- */
-class LedgerRecoveryOp implements ReadEntryCallback, ReadCallback, AddCallback {
-    static final Logger LOG = Logger.getLogger(LedgerRecoveryOp.class);
-    LedgerHandle lh;
-    int numResponsesPending;
-    boolean proceedingWithRecovery = false;
-    long maxAddPushed = -1;
-    long maxAddConfirmed = -1;
-    long maxLength = 0;
-
-    GenericCallback<Void> cb;
-
-    public LedgerRecoveryOp(LedgerHandle lh, GenericCallback<Void> cb) {
-        this.cb = cb;
-        this.lh = lh;
-        numResponsesPending = lh.metadata.ensembleSize;
-    }
-
-    public void initiate() {
-        for (int i = 0; i < lh.metadata.currentEnsemble.size(); i++) {
-            lh.bk.bookieClient.readEntry(lh.metadata.currentEnsemble.get(i), lh.ledgerId, -1, this, i);
-        }
-    }
-
-    public synchronized void readEntryComplete(final int rc, final long ledgerId, final long entryId,
-            final ChannelBuffer buffer, final Object ctx) {
-
-        // Already proceeding with recovery, nothing to do
-        if (proceedingWithRecovery) {
-            return;
-        }
-
-        int bookieIndex = (Integer) ctx;
-
-        numResponsesPending--;
-
-        boolean heardValidResponse = false;
-
-        if (rc == BKException.Code.OK) {
-            try {
-                RecoveryData recoveryData = lh.macManager.verifyDigestAndReturnLastConfirmed(buffer);
-                maxAddConfirmed = Math.max(maxAddConfirmed, recoveryData.lastAddConfirmed);
-                maxAddPushed = Math.max(maxAddPushed, recoveryData.entryId);
-                heardValidResponse = true;
-            } catch (BKDigestMatchException e) {
-                // Too bad, this bookie didnt give us a valid answer, we
-                // still might be able to recover though so continue
-                LOG.error("Mac mismatch while reading last entry from bookie: "
-                        + lh.metadata.currentEnsemble.get(bookieIndex));
-            }
-        }
-
-        if (rc == BKException.Code.NoSuchLedgerExistsException || rc == BKException.Code.NoSuchEntryException) {
-            // this still counts as a valid response, e.g., if the
-            // client
-            // crashed without writing any entry
-            heardValidResponse = true;
-        }
-
-        // other return codes dont count as valid responses
-        if (heardValidResponse && lh.distributionSchedule.canProceedWithRecovery(bookieIndex)) {
-            proceedingWithRecovery = true;
-            lh.lastAddPushed = lh.lastAddConfirmed = maxAddConfirmed;
-            lh.length = maxLength;
-            doRecoveryRead();
-            return;
-        }
-
-        if (numResponsesPending == 0) {
-            // Have got all responses back but was still not enough to
-            // start
-            // recovery, just fail the operation
-            LOG.error("While recovering ledger: " + ledgerId + " did not hear success responses from all quorums");
-            cb.operationComplete(BKException.Code.LedgerRecoveryException, null);
-        }
-
-    }
-
-    /**
-     * Try to read past the last confirmed.
-     */
-    private void doRecoveryRead() {
-        lh.lastAddConfirmed++;
-        lh.asyncReadEntries(lh.lastAddConfirmed, lh.lastAddConfirmed, this, null);
-    }
-
-    @Override
-    public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
-        // get back to prev value
-        lh.lastAddConfirmed--;
-        if (rc == BKException.Code.OK) {
-            LedgerEntry entry = seq.nextElement(); 
-            byte[] data = entry.getEntry();
-            
-            /*
-             * We will add this entry again to make sure it is written to enough
-             * replicas. We subtract the length of the data itself, since it will
-             * be added again when processing the call to add it.
-             */
-            lh.length = entry.getLength() - (long) data.length;
-            lh.asyncAddEntry(data, this, null);
-            
-            return;
-        }
-
-        if (rc == BKException.Code.NoSuchEntryException || rc == BKException.Code.NoSuchLedgerExistsException) {
-            lh.asyncClose(NoopCloseCallback.instance, null);
-            // we don't need to wait for the close to complete. Since we mark
-            // the
-            // ledger closed in memory, the application wont be able to add to
-            // it
-
-            cb.operationComplete(BKException.Code.OK, null);
-            LOG.debug("After closing length is: " + lh.getLength());
-            return;
-        }
-
-        // otherwise, some other error, we can't handle
-        LOG.error("Failure " + BKException.getMessage(rc) + " while reading entry: " + lh.lastAddConfirmed + 1
-                + " ledger: " + lh.ledgerId + " while recovering ledger");
-        cb.operationComplete(rc, null);
-        return;
-    }
-
-    @Override
-    public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
-        if (rc != BKException.Code.OK) {
-            // Give up, we can't recover from this error
-
-            LOG.error("Failure " + BKException.getMessage(rc) + " while writing entry: " + lh.lastAddConfirmed + 1
-                    + " ledger: " + lh.ledgerId + " while recovering ledger");
-            cb.operationComplete(rc, null);
-            return;
-        }
-
-        doRecoveryRead();
-
-    }
-
-}

+ 0 - 67
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/MacDigestManager.java

@@ -1,67 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-import java.security.GeneralSecurityException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-import javax.crypto.Mac;
-import javax.crypto.spec.SecretKeySpec;
-
-class MacDigestManager extends DigestManager {
-    public static String DIGEST_ALGORITHM = "SHA-1";
-    public static String KEY_ALGORITHM = "HmacSHA1";
-    Mac mac;
-
-    public MacDigestManager(long ledgerId, byte[] passwd) throws GeneralSecurityException {
-        super(ledgerId);
-        byte[] macKey = genDigest("mac", passwd);
-        SecretKeySpec keySpec = new SecretKeySpec(macKey, KEY_ALGORITHM);
-        mac = Mac.getInstance(KEY_ALGORITHM);
-        mac.init(keySpec);
-        
-        
-    }
-
-    static byte[] genDigest(String pad, byte[] passwd) throws NoSuchAlgorithmException {
-        MessageDigest digest = MessageDigest.getInstance(DIGEST_ALGORITHM);
-        digest.update(pad.getBytes());
-        digest.update(passwd);
-                return digest.digest();
-    }
-
-    @Override
-    int getMacCodeLength() {
-        return 20;
-    }
-
-    
-    @Override
-    byte[] getValueAndReset() {
-        return mac.doFinal();
-    }
-    
-    @Override
-    void update(byte[] data, int offset, int length) {
-        mac.update(data, offset, length);
-    }
-    
-    
-}

+ 0 - 138
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/PendingAddOp.java

@@ -1,138 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.net.InetSocketAddress;
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * This represents a pending add operation. When it has got success from all
- * bookies, it sees if its at the head of the pending adds queue, and if yes,
- * sends ack back to the application. If a bookie fails, a replacement is made
- * and placed at the same position in the ensemble. The pending adds are then
- * rereplicated.
- * 
- * 
- */
-class PendingAddOp implements WriteCallback {
-    final static Logger LOG = Logger.getLogger(PendingAddOp.class);
-
-    ChannelBuffer toSend;
-    AddCallback cb;
-    Object ctx;
-    long entryId;
-    boolean[] successesSoFar;
-    int numResponsesPending;
-    LedgerHandle lh;
-
-    PendingAddOp(LedgerHandle lh, AddCallback cb, Object ctx, long entryId) {
-        this.lh = lh;
-        this.cb = cb;
-        this.ctx = ctx;
-        this.entryId = entryId;
-        successesSoFar = new boolean[lh.metadata.quorumSize];
-        numResponsesPending = successesSoFar.length;
-    }
-
-    void sendWriteRequest(int bookieIndex, int arrayIndex) {
-        lh.bk.bookieClient.addEntry(lh.metadata.currentEnsemble.get(bookieIndex), lh.ledgerId, lh.ledgerKey, entryId, toSend,
-                this, arrayIndex);
-    }
-
-    void unsetSuccessAndSendWriteRequest(int bookieIndex) {
-        if (toSend == null) {
-            // this addOp hasn't yet had its mac computed. When the mac is
-            // computed, its write requests will be sent, so no need to send it
-            // now
-            return;
-        }
-
-        int replicaIndex = lh.distributionSchedule.getReplicaIndex(entryId, bookieIndex);
-        if (replicaIndex < 0) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Leaving unchanged, ledger: " + lh.ledgerId + " entry: " + entryId + " bookie index: "
-                        + bookieIndex);
-            }
-            return;
-        }
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Unsetting success for ledger: " + lh.ledgerId + " entry: " + entryId + " bookie index: "
-                    + bookieIndex);
-        }
-
-        // if we had already heard a success from this array index, need to
-        // increment our number of responses that are pending, since we are
-        // going to unset this success
-        if (successesSoFar[replicaIndex]) {
-            successesSoFar[replicaIndex] = false;
-            numResponsesPending++;
-        }
-        
-         sendWriteRequest(bookieIndex, replicaIndex);
-    }
-
-    void initiate(ChannelBuffer toSend) {
-        this.toSend = toSend;
-        for (int i = 0; i < successesSoFar.length; i++) {
-            int bookieIndex = lh.distributionSchedule.getBookieIndex(entryId, i);
-            sendWriteRequest(bookieIndex, i);
-        }
-    }
-
-    @Override
-    public void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr, Object ctx) {
-
-        Integer replicaIndex = (Integer) ctx;
-        int bookieIndex = lh.distributionSchedule.getBookieIndex(entryId, replicaIndex);
-
-        if (!lh.metadata.currentEnsemble.get(bookieIndex).equals(addr)) {
-            // ensemble has already changed, failure of this addr is immaterial
-            LOG.warn("Write did not succeed: " + ledgerId + ", " + entryId + ". But we have already fixed it.");
-            return;
-        }
-        
-        if (rc != BKException.Code.OK) {
-            LOG.warn("Write did not succeed: " + ledgerId + ", " + entryId);
-            lh.handleBookieFailure(addr, bookieIndex);
-            return;
-        }
-
-
-        if (!successesSoFar[replicaIndex]) {
-            successesSoFar[replicaIndex] = true;
-            numResponsesPending--;
-            
-            // do some quick checks to see if some adds may have finished. All
-            // this will be checked under locks again
-            if (numResponsesPending == 0 && lh.pendingAddOps.peek() == this) {
-                lh.sendAddSuccessCallbacks();
-            }
-        } 
-    }
-
-    void submitCallback(final int rc) {
-        cb.addComplete(rc, lh, entryId, ctx);
-        lh.opCounterSem.release();
-    }
-
-}

+ 0 - 170
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/PendingReadOp.java

@@ -1,170 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.NoSuchElementException;
-import java.util.Queue;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBufferInputStream;
-
-import java.io.IOException;
-
-/**
- * Sequence of entries of a ledger that represents a pending read operation.
- * When all the data read has come back, the application callback is called.
- * This class could be improved because we could start pushing data to the
- * application as soon as it arrives rather than waiting for the whole thing.
- * 
- */
-
-class PendingReadOp implements Enumeration<LedgerEntry>, ReadEntryCallback {
-    Logger LOG = Logger.getLogger(PendingReadOp.class);
-
-    Queue<LedgerEntry> seq;
-    ReadCallback cb;
-    Object ctx;
-    LedgerHandle lh;
-    long numPendingReads;
-    long startEntryId;
-    long endEntryId;
-
-    PendingReadOp(LedgerHandle lh, long startEntryId, long endEntryId, ReadCallback cb, Object ctx) {
-
-        seq = new ArrayDeque<LedgerEntry>((int) (endEntryId - startEntryId));
-        this.cb = cb;
-        this.ctx = ctx;
-        this.lh = lh;
-        this.startEntryId = startEntryId;
-        this.endEntryId = endEntryId;
-        numPendingReads = endEntryId - startEntryId + 1;
-    }
-
-    public void initiate() throws InterruptedException {
-        long nextEnsembleChange = startEntryId, i = startEntryId;
-
-        ArrayList<InetSocketAddress> ensemble = null;
-        do {
-
-            if(LOG.isDebugEnabled()){
-                LOG.debug("Acquiring lock: " + i);
-            }
-           
-            lh.opCounterSem.acquire();
-            
-            if (i == nextEnsembleChange) {
-                ensemble = lh.metadata.getEnsemble(i);
-                nextEnsembleChange = lh.metadata.getNextEnsembleChange(i);
-            }
-            LedgerEntry entry = new LedgerEntry(lh.ledgerId, i);
-            seq.add(entry);
-            i++;
-            sendRead(ensemble, entry, BKException.Code.ReadException);
-
-        } while (i <= endEntryId);
-    }
-
-    void sendRead(ArrayList<InetSocketAddress> ensemble, LedgerEntry entry, int lastErrorCode) {
-        if (entry.nextReplicaIndexToReadFrom >= lh.metadata.quorumSize) {
-            // we are done, the read has failed from all replicas, just fail the
-            // read
-            submitCallback(lastErrorCode);
-            return;
-        }
-
-        int bookieIndex = lh.distributionSchedule.getBookieIndex(entry.entryId, entry.nextReplicaIndexToReadFrom);
-        entry.nextReplicaIndexToReadFrom++;
-        lh.bk.bookieClient.readEntry(ensemble.get(bookieIndex), lh.ledgerId, entry.entryId, this, entry);
-    }
-
-    void logErrorAndReattemptRead(LedgerEntry entry, String errMsg, int rc) {
-        ArrayList<InetSocketAddress> ensemble = lh.metadata.getEnsemble(entry.entryId);
-        int bookeIndex = lh.distributionSchedule.getBookieIndex(entry.entryId, entry.nextReplicaIndexToReadFrom - 1);
-        LOG.error(errMsg + " while reading entry: " + entry.entryId + " ledgerId: " + lh.ledgerId + " from bookie: "
-                + ensemble.get(bookeIndex));
-        sendRead(ensemble, entry, rc);
-        return;
-    }
-
-    @Override
-    public void readEntryComplete(int rc, long ledgerId, final long entryId, final ChannelBuffer buffer, Object ctx) {
-        final LedgerEntry entry = (LedgerEntry) ctx;
-
-        if (rc != BKException.Code.OK) {
-            logErrorAndReattemptRead(entry, "Error: " + BKException.getMessage(rc), rc);
-            return;
-        }
-        
-        ChannelBufferInputStream is;
-        try {
-            is = lh.macManager.verifyDigestAndReturnData(entryId, buffer);
-        } catch (BKDigestMatchException e) {
-            logErrorAndReattemptRead(entry, "Mac mismatch", BKException.Code.DigestMatchException);
-            return;
-        }
-        
-        entry.entryDataStream = is;
-        
-        /*
-         * The length is a long and it is the last field of the metadata of an entry.
-         * Consequently, we have to subtract 8 from METADATA_LENGTH to get the length.
-         */
-        entry.length = buffer.getLong(DigestManager.METADATA_LENGTH - 8);
-
-        numPendingReads--;
-        if (numPendingReads == 0) {
-            submitCallback(BKException.Code.OK);
-        }
-        
-        if(LOG.isDebugEnabled()){
-            LOG.debug("Releasing lock: " + entryId);
-        }
-        
-        lh.opCounterSem.release();
-        
-        if(numPendingReads < 0)
-            LOG.error("Read too many values");
-    }
-
-    private void submitCallback(int code){
-        cb.readComplete(code, lh, PendingReadOp.this, PendingReadOp.this.ctx);
-    }
-    public boolean hasMoreElements() {
-        return !seq.isEmpty();
-    }
-
-    public LedgerEntry nextElement() throws NoSuchElementException {
-        return seq.remove();
-    }
-
-    public int size() {
-        return seq.size();
-    }
-}

+ 0 - 87
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/RoundRobinDistributionSchedule.java

@@ -1,87 +0,0 @@
-package org.apache.bookkeeper.client;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.bookkeeper.util.MathUtils;
-
-/**
- * A specific {@link DistributionSchedule} that places entries in round-robin
- * fashion. For ensemble size 3, and quorum size 2, Entry 0 goes to bookie 0 and
- * 1, entry 1 goes to bookie 1 and 2, and entry 2 goes to bookie 2 and 0, and so
- * on.
- * 
- */
-class RoundRobinDistributionSchedule implements DistributionSchedule {
-    int quorumSize;
-    int ensembleSize;
-
-    // covered[i] is true if the quorum starting at bookie index i has been
-    // covered by a recovery reply
-    boolean[] covered = null;
-    int numQuorumsUncovered;
-
-    public RoundRobinDistributionSchedule(int quorumSize, int ensembleSize) {
-        this.quorumSize = quorumSize;
-        this.ensembleSize = ensembleSize;
-    }
-
-    @Override
-    public int getBookieIndex(long entryId, int replicaIndex) {
-        return (int) ((entryId + replicaIndex) % ensembleSize);
-    }
-
-    @Override
-    public int getReplicaIndex(long entryId, int bookieIndex) {
-        // NOTE: Java's % operator returns the sign of the dividend and is hence
-        // not always positive
-
-        int replicaIndex = MathUtils.signSafeMod(bookieIndex - entryId, ensembleSize);
-
-        return replicaIndex < quorumSize ? replicaIndex : -1;
-
-    }
-
-    public synchronized boolean canProceedWithRecovery(int bookieIndexHeardFrom) {
-        if (covered == null) {
-            covered = new boolean[ensembleSize];
-            numQuorumsUncovered = ensembleSize;
-        }
-
-        if (numQuorumsUncovered == 0) {
-            return true;
-        }
-
-        for (int i = 0; i < quorumSize; i++) {
-            int quorumStartIndex = MathUtils.signSafeMod(bookieIndexHeardFrom - i, ensembleSize);
-            if (!covered[quorumStartIndex]) {
-                covered[quorumStartIndex] = true;
-                numQuorumsUncovered--;
-
-                if (numQuorumsUncovered == 0) {
-                    return true;
-                }
-            }
-
-        }
-
-        return false;
-
-    }
-
-}

+ 0 - 85
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/client/SyncCounter.java

@@ -1,85 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.client;
-
-import java.util.Enumeration;
-
-/**
- * Implements objects to help with the synchronization of asynchronous calls
- * 
- */
-
-class SyncCounter {
-    int i;
-    int rc;
-    int total;
-    Enumeration<LedgerEntry> seq = null;
-    LedgerHandle lh = null;
-
-    synchronized void inc() {
-        i++;
-        total++;
-    }
-
-    synchronized void dec() {
-        i--;
-        notifyAll();
-    }
-
-    synchronized void block(int limit) throws InterruptedException {
-        while (i > limit) {
-            int prev = i;
-            wait();
-            if (i == prev) {
-                break;
-            }
-        }
-    }
-
-    synchronized int total() {
-        return total;
-    }
-
-    void setrc(int rc) {
-        this.rc = rc;
-    }
-
-    int getrc() {
-        return rc;
-    }
-
-    void setSequence(Enumeration<LedgerEntry> seq) {
-        this.seq = seq;
-    }
-
-    Enumeration<LedgerEntry> getSequence() {
-        return seq;
-    }
-
-    void setLh(LedgerHandle lh) {
-        this.lh = lh;
-    }
-
-    LedgerHandle getLh() {
-        return lh;
-    }
-}

+ 0 - 178
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieClient.java

@@ -1,178 +0,0 @@
-package org.apache.bookkeeper.proto;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.bookkeeper.util.OrderedSafeExecutor;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-
-/**
- * Implements the client-side part of the BookKeeper protocol.
- * 
- */
-public class BookieClient {
-    static final Logger LOG = Logger.getLogger(BookieClient.class);
-
-    // This is global state that should be across all BookieClients
-    AtomicLong totalBytesOutstanding = new AtomicLong();
-
-    OrderedSafeExecutor executor;
-    ClientSocketChannelFactory channelFactory;
-    ConcurrentHashMap<InetSocketAddress, PerChannelBookieClient> channels = new ConcurrentHashMap<InetSocketAddress, PerChannelBookieClient>();
-
-    public BookieClient(ClientSocketChannelFactory channelFactory, OrderedSafeExecutor executor) {
-        this.channelFactory = channelFactory;
-        this.executor = executor;
-    }
-
-    public PerChannelBookieClient lookupClient(InetSocketAddress addr) {
-        PerChannelBookieClient channel = channels.get(addr);
-
-        if (channel == null) {
-            channel = new PerChannelBookieClient(executor, channelFactory, addr, totalBytesOutstanding);
-            PerChannelBookieClient prevChannel = channels.putIfAbsent(addr, channel);
-            if (prevChannel != null) {
-                channel = prevChannel;
-            }
-        }
-
-        return channel;
-    }
-
-    public void addEntry(final InetSocketAddress addr, final long ledgerId, final byte[] masterKey, final long entryId,
-            final ChannelBuffer toSend, final WriteCallback cb, final Object ctx) {
-
-        final PerChannelBookieClient client = lookupClient(addr);
-
-        client.connectIfNeededAndDoOp(new GenericCallback<Void>() {
-            @Override
-            public void operationComplete(int rc, Void result) {
-                if (rc != BKException.Code.OK) {
-                    cb.writeComplete(rc, ledgerId, entryId, addr, ctx);
-                    return;
-                }
-                client.addEntry(ledgerId, masterKey, entryId, toSend, cb, ctx);
-            }
-        });
-    }
-
-    public void readEntry(final InetSocketAddress addr, final long ledgerId, final long entryId,
-            final ReadEntryCallback cb, final Object ctx) {
-
-        final PerChannelBookieClient client = lookupClient(addr);
-
-        client.connectIfNeededAndDoOp(new GenericCallback<Void>() {
-            @Override
-            public void operationComplete(int rc, Void result) {
-
-                if (rc != BKException.Code.OK) {
-                    cb.readEntryComplete(rc, ledgerId, entryId, null, ctx);
-                    return;
-                }
-                client.readEntry(ledgerId, entryId, cb, ctx);
-            }
-        });
-    }
-
-    public void close(){
-        for (PerChannelBookieClient channel: channels.values()){
-            channel.close();
-        }
-    }
-
-    private static class Counter {
-        int i;
-        int total;
-
-        synchronized void inc() {
-            i++;
-            total++;
-        }
-
-        synchronized void dec() {
-            i--;
-            notifyAll();
-        }
-
-        synchronized void wait(int limit) throws InterruptedException {
-            while (i > limit) {
-                wait();
-            }
-        }
-
-        synchronized int total() {
-            return total;
-        }
-    }
-
-    /**
-     * @param args
-     * @throws IOException
-     * @throws NumberFormatException
-     * @throws InterruptedException
-     */
-    public static void main(String[] args) throws NumberFormatException, IOException, InterruptedException {
-        if (args.length != 3) {
-            System.err.println("USAGE: BookieClient bookieHost port ledger#");
-            return;
-        }
-        WriteCallback cb = new WriteCallback() {
-
-            public void writeComplete(int rc, long ledger, long entry, InetSocketAddress addr, Object ctx) {
-                Counter counter = (Counter) ctx;
-                counter.dec();
-                if (rc != 0) {
-                    System.out.println("rc = " + rc + " for " + entry + "@" + ledger);
-                }
-            }
-        };
-        Counter counter = new Counter();
-        byte hello[] = "hello".getBytes();
-        long ledger = Long.parseLong(args[2]);
-        ClientSocketChannelFactory channelFactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors
-                .newCachedThreadPool());
-        OrderedSafeExecutor executor = new OrderedSafeExecutor(1);
-        BookieClient bc = new BookieClient(channelFactory, executor);
-        InetSocketAddress addr = new InetSocketAddress(args[0], Integer.parseInt(args[1]));
-
-        for (int i = 0; i < 100000; i++) {
-            counter.inc();
-            bc.addEntry(addr, ledger, new byte[0], i, ChannelBuffers.wrappedBuffer(hello), cb, counter);
-        }
-        counter.wait(0);
-        System.out.println("Total = " + counter.total());
-        channelFactory.releaseExternalResources();
-        executor.shutdown();
-    }
-}

+ 0 - 75
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieProtocol.java

@@ -1,75 +0,0 @@
-package org.apache.bookkeeper.proto;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-/**
- * The packets of the Bookie protocol all have a 4-byte integer indicating the
- * type of request or response at the very beginning of the packet followed by a
- * payload.
- * 
- */
-public interface BookieProtocol {
-    /**
-     * The Add entry request payload will be a ledger entry exactly as it should
-     * be logged. The response payload will be a 4-byte integer that has the
-     * error code followed by the 8-byte ledger number and 8-byte entry number
-     * of the entry written.
-     */
-    public static final int ADDENTRY = 1;
-    /**
-     * The Read entry request payload will be the ledger number and entry number
-     * to read. (The ledger number is an 8-byte integer and the entry number is
-     * a 8-byte integer.) The response payload will be a 4-byte integer
-     * representing an error code and a ledger entry if the error code is EOK,
-     * otherwise it will be the 8-byte ledger number and the 4-byte entry number
-     * requested. (Note that the first sixteen bytes of the entry happen to be
-     * the ledger number and entry number as well.)
-     */
-    public static final int READENTRY = 2;
-
-    /**
-     * The error code that indicates success
-     */
-    public static final int EOK = 0;
-    /**
-     * The error code that indicates that the ledger does not exist
-     */
-    public static final int ENOLEDGER = 1;
-    /**
-     * The error code that indicates that the requested entry does not exist
-     */
-    public static final int ENOENTRY = 2;
-    /**
-     * The error code that indicates an invalid request type
-     */
-    public static final int EBADREQ = 100;
-    /**
-     * General error occurred at the server
-     */
-    public static final int EIO = 101;
-
-    /**
-     * Unauthorized access to ledger
-     */
-    public static final int EUA = 102;
-
-}

+ 0 - 209
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookieServer.java

@@ -1,209 +0,0 @@
-package org.apache.bookkeeper.proto;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-
-import org.apache.bookkeeper.bookie.Bookie;
-import org.apache.bookkeeper.bookie.BookieException;
-import org.apache.bookkeeper.proto.NIOServerFactory.Cnxn;
-import org.apache.log4j.Logger;
-
-/**
- * Implements the server-side part of the BookKeeper protocol.
- * 
- */
-public class BookieServer implements NIOServerFactory.PacketProcessor, BookkeeperInternalCallbacks.WriteCallback {
-    int port;
-    NIOServerFactory nioServerFactory;
-    private volatile boolean running = false;
-    Bookie bookie;
-    static Logger LOG = Logger.getLogger(BookieServer.class);
-
-    public BookieServer(int port, String zkServers, File journalDirectory, File ledgerDirectories[]) throws IOException {
-        this.port = port;
-        this.bookie = new Bookie(port, zkServers, journalDirectory, ledgerDirectories);
-    }
-
-    public void start() throws IOException {
-        nioServerFactory = new NIOServerFactory(port, this);
-        running = true;
-    }
-
-    public void shutdown() throws InterruptedException {
-        running = false;
-        nioServerFactory.shutdown();
-        bookie.shutdown();
-    }
-
-    public boolean isRunning(){
-        return bookie.isRunning() && nioServerFactory.isRunning() && running;
-    }
-
-    public void join() throws InterruptedException {
-        nioServerFactory.join();
-    }
-
-    /**
-     * @param args
-     * @throws IOException
-     * @throws InterruptedException
-     */
-    public static void main(String[] args) throws IOException, InterruptedException {
-        if (args.length < 4) {
-            System.err.println("USAGE: BookieServer port zkServers journalDirectory ledgerDirectory [ledgerDirectory]*");
-            return;
-        }
-        int port = Integer.parseInt(args[0]);
-        String zkServers = args[1];
-        File journalDirectory = new File(args[2]);
-        File ledgerDirectory[] = new File[args.length - 3];
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < ledgerDirectory.length; i++) {
-            ledgerDirectory[i] = new File(args[i + 3]);
-            if (i != 0) {
-                sb.append(',');
-            }
-            sb.append(ledgerDirectory[i]);
-        }
-        String hello = String.format(
-                "Hello, I'm your bookie, listening on port %1$s. ZKServers are on %2$s. Journals are in %3$s. Ledgers are stored in %4$s.",
-                port, zkServers, journalDirectory, sb);
-        LOG.info(hello);
-        BookieServer bs = new BookieServer(port, zkServers, journalDirectory, ledgerDirectory);
-        bs.start();
-        bs.join();
-    }
-
-    public void processPacket(ByteBuffer packet, Cnxn src) {
-        int type = packet.getInt();
-        switch (type) {
-        case BookieProtocol.ADDENTRY:
-            try {
-                byte[] masterKey = new byte[20];
-                packet.get(masterKey, 0, 20);
-                // LOG.debug("Master key: " + new String(masterKey));
-                bookie.addEntry(packet.slice(), this, src, masterKey);
-            } catch (IOException e) {
-                ByteBuffer bb = packet.duplicate();
-
-                long ledgerId = bb.getLong();
-                long entryId = bb.getLong();
-                LOG.error("Error writing " + entryId + "@" + ledgerId, e);
-                ByteBuffer eio = ByteBuffer.allocate(8 + 16);
-                eio.putInt(type);
-                eio.putInt(BookieProtocol.EIO);
-                eio.putLong(ledgerId);
-                eio.putLong(entryId);
-                eio.flip();
-                src.sendResponse(new ByteBuffer[] { eio });
-            } catch (BookieException e) {
-                ByteBuffer bb = packet.duplicate();
-                long ledgerId = bb.getLong();
-                long entryId = bb.getLong();
-
-                LOG.error("Unauthorized access to ledger " + ledgerId);
-
-                ByteBuffer eio = ByteBuffer.allocate(8 + 16);
-                eio.putInt(type);
-                eio.putInt(BookieProtocol.EUA);
-                eio.putLong(ledgerId);
-                eio.putLong(entryId);
-                eio.flip();
-                src.sendResponse(new ByteBuffer[] { eio });
-            }
-            break;
-        case BookieProtocol.READENTRY:
-            ByteBuffer[] rsp = new ByteBuffer[2];
-            ByteBuffer rc = ByteBuffer.allocate(8 + 8 + 8);
-            rsp[0] = rc;
-            rc.putInt(type);
-
-            long ledgerId = packet.getLong();
-            long entryId = packet.getLong();
-            LOG.debug("Received new read request: " + ledgerId + ", " + entryId);
-            try {
-                rsp[1] = bookie.readEntry(ledgerId, entryId);
-                LOG.debug("##### Read entry ##### " + rsp[1].remaining());
-                rc.putInt(BookieProtocol.EOK);
-            } catch (Bookie.NoLedgerException e) {
-                if (LOG.isTraceEnabled()) {
-                    LOG.error("Error reading " + entryId + "@" + ledgerId, e);
-                }
-                rc.putInt(BookieProtocol.ENOLEDGER);
-            } catch (Bookie.NoEntryException e) {
-                if (LOG.isTraceEnabled()) {
-                    LOG.error("Error reading " + entryId + "@" + ledgerId, e);
-                }
-                rc.putInt(BookieProtocol.ENOENTRY);
-            } catch (IOException e) {
-                if (LOG.isTraceEnabled()) {
-                    LOG.error("Error reading " + entryId + "@" + ledgerId, e);
-                }
-                rc.putInt(BookieProtocol.EIO);
-            }
-            rc.putLong(ledgerId);
-            rc.putLong(entryId);
-            rc.flip();
-            if (LOG.isTraceEnabled()) {
-                int rcCode = rc.getInt();
-                rc.rewind();
-                LOG.trace("Read entry rc = " + rcCode + " for " + entryId + "@" + ledgerId);
-            }
-            if (rsp[1] == null) {
-                // We haven't filled in entry data, so we have to send back
-                // the ledger and entry ids here
-                rsp[1] = ByteBuffer.allocate(16);
-                rsp[1].putLong(ledgerId);
-                rsp[1].putLong(entryId);
-                rsp[1].flip();
-            }
-            LOG.debug("Sending response for: " + entryId + ", " + new String(rsp[1].array()));
-            src.sendResponse(rsp);
-            break;
-        default:
-            ByteBuffer badType = ByteBuffer.allocate(8);
-            badType.putInt(type);
-            badType.putInt(BookieProtocol.EBADREQ);
-            badType.flip();
-            src.sendResponse(new ByteBuffer[] { packet });
-        }
-    }
-
-    public void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr, Object ctx) {
-        Cnxn src = (Cnxn) ctx;
-        ByteBuffer bb = ByteBuffer.allocate(24);
-        bb.putInt(BookieProtocol.ADDENTRY);
-        bb.putInt(rc);
-        bb.putLong(ledgerId);
-        bb.putLong(entryId);
-        bb.flip();
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Add entry rc = " + rc + " for " + entryId + "@" + ledgerId);
-        }
-        src.sendResponse(new ByteBuffer[] { bb });
-    }
-    
-}

+ 0 - 57
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java

@@ -1,57 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-package org.apache.bookkeeper.proto;
-
-import java.net.InetSocketAddress;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * Declaration of a callback interfaces used in bookkeeper client library but
- * not exposed to the client application.
- */
-
-public class BookkeeperInternalCallbacks {
-    /**
-     * Callback for calls from BookieClient objects. Such calls are for replies
-     * of write operations (operations to add an entry to a ledger).
-     * 
-     */
-
-    public interface WriteCallback {
-        void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr, Object ctx);
-    }
-
-    public interface GenericCallback<T> {
-        void operationComplete(int rc, T result);
-    }
-    
-    /**
-     * Declaration of a callback implementation for calls from BookieClient objects.
-     * Such calls are for replies of read operations (operations to read an entry
-     * from a ledger).
-     * 
-     */
-
-    public interface ReadEntryCallback {
-        void readEntryComplete(int rc, long ledgerId, long entryId, ChannelBuffer buffer, Object ctx);
-    }
-}

+ 0 - 521
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/NIOServerFactory.java

@@ -1,521 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.bookkeeper.proto;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.CancelledKeyException;
-import java.nio.channels.Channel;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.Selector;
-import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.log4j.Logger;
-
-/**
- * This class handles communication with clients using NIO. There is one Cnxn
- * per client, but only one thread doing the communication.
- */
-public class NIOServerFactory extends Thread {
-
-    public interface PacketProcessor {
-        public void processPacket(ByteBuffer packet, Cnxn src);
-    }
-    
-    ServerStats stats = new ServerStats();
-
-    Logger LOG = Logger.getLogger(NIOServerFactory.class);
-
-    ServerSocketChannel ss;
-
-    Selector selector = Selector.open();
-
-    /**
-     * We use this buffer to do efficient socket I/O. Since there is a single
-     * sender thread per NIOServerCnxn instance, we can use a member variable to
-     * only allocate it once.
-     */
-    ByteBuffer directBuffer = ByteBuffer.allocateDirect(64 * 1024);
-
-    HashSet<Cnxn> cnxns = new HashSet<Cnxn>();
-
-    int outstandingLimit = 2000;
-
-    PacketProcessor processor;
-
-    long minLatency = 99999999;
-
-    public NIOServerFactory(int port, PacketProcessor processor) throws IOException {
-        super("NIOServerFactory");
-        setDaemon(true);
-        this.processor = processor;
-        this.ss = ServerSocketChannel.open();
-        ss.socket().bind(new InetSocketAddress(port));
-        ss.configureBlocking(false);
-        ss.register(selector, SelectionKey.OP_ACCEPT);
-        start();
-    }
-
-    public InetSocketAddress getLocalAddress() {
-        return (InetSocketAddress) ss.socket().getLocalSocketAddress();
-    }
-
-    private void addCnxn(Cnxn cnxn) {
-        synchronized (cnxns) {
-            cnxns.add(cnxn);
-        }
-    }
-
-    public boolean isRunning() {
-        return !ss.socket().isClosed();
-    }
-    
-    @Override
-    public void run() {
-        while (!ss.socket().isClosed()) {
-            try {
-                selector.select(1000);
-                Set<SelectionKey> selected;
-                synchronized (this) {
-                    selected = selector.selectedKeys();
-                }
-                ArrayList<SelectionKey> selectedList = new ArrayList<SelectionKey>(selected);
-                Collections.shuffle(selectedList);
-                for (SelectionKey k : selectedList) {
-                    if ((k.readyOps() & SelectionKey.OP_ACCEPT) != 0) {
-                        SocketChannel sc = ((ServerSocketChannel) k.channel()).accept();
-                        sc.configureBlocking(false);
-                        SelectionKey sk = sc.register(selector, SelectionKey.OP_READ);
-                        Cnxn cnxn = new Cnxn(sc, sk);
-                        sk.attach(cnxn);
-                        addCnxn(cnxn);
-                    } else if ((k.readyOps() & (SelectionKey.OP_READ | SelectionKey.OP_WRITE)) != 0) {
-                        Cnxn c = (Cnxn) k.attachment();
-                        c.doIO(k);
-                    }
-                }
-                selected.clear();
-            } catch (Exception e) {
-                LOG.warn(e);
-            }
-        }
-        LOG.debug("NIOServerCnxn factory exitedloop.");
-        clear();
-        // System.exit(0);
-    }
-
-    /**
-     * clear all the connections in the selector
-     * 
-     */
-    synchronized public void clear() {
-        selector.wakeup();
-        synchronized (cnxns) {
-            // got to clear all the connections that we have in the selector
-            for (Iterator<Cnxn> it = cnxns.iterator(); it.hasNext();) {
-                Cnxn cnxn = it.next();
-                it.remove();
-                try {
-                    cnxn.close();
-                } catch (Exception e) {
-                    // Do nothing.
-                }
-            }
-        }
-
-    }
-
-    public void shutdown() {
-        try {
-            ss.close();
-            clear();
-            this.interrupt();
-            this.join();
-        } catch (InterruptedException e) {
-            LOG.warn("Interrupted", e);
-        } catch (Exception e) {
-            LOG.error("Unexpected exception", e);
-        }
-    }
-
-    /**
-     * The buffer will cause the connection to be close when we do a send.
-     */
-    static final ByteBuffer closeConn = ByteBuffer.allocate(0);
-
-    public class Cnxn {
-
-        private SocketChannel sock;
-
-        private SelectionKey sk;
-
-        boolean initialized;
-
-        ByteBuffer lenBuffer = ByteBuffer.allocate(4);
-
-        ByteBuffer incomingBuffer = lenBuffer;
-
-        LinkedBlockingQueue<ByteBuffer> outgoingBuffers = new LinkedBlockingQueue<ByteBuffer>();
-
-        int sessionTimeout;
-
-        int packetsSent;
-
-        int packetsReceived;
-
-        void doIO(SelectionKey k) throws InterruptedException {
-            try {
-                if (sock == null) {
-                    return;
-                }
-                if (k.isReadable()) {
-                    int rc = sock.read(incomingBuffer);
-                    if (rc < 0) {
-                        throw new IOException("Read error");
-                    }
-                    if (incomingBuffer.remaining() == 0) {
-                        incomingBuffer.flip();
-                        if (incomingBuffer == lenBuffer) {
-                            readLength(k);
-                        } else {
-                            cnxnStats.packetsReceived++;
-                            stats.incrementPacketsReceived();
-                            try {
-                                readRequest();
-                            } finally {
-                                lenBuffer.clear();
-                                incomingBuffer = lenBuffer;
-                            }
-                        }
-                    }
-                }
-                if (k.isWritable()) {
-                    if (outgoingBuffers.size() > 0) {
-                        // ZooLog.logTraceMessage(LOG,
-                        // ZooLog.CLIENT_DATA_PACKET_TRACE_MASK,
-                        // "sk " + k + " is valid: " +
-                        // k.isValid());
-
-                        /*
-                         * This is going to reset the buffer position to 0 and
-                         * the limit to the size of the buffer, so that we can
-                         * fill it with data from the non-direct buffers that we
-                         * need to send.
-                         */
-                        directBuffer.clear();
-
-                        for (ByteBuffer b : outgoingBuffers) {
-                            if (directBuffer.remaining() < b.remaining()) {
-                                /*
-                                 * When we call put later, if the directBuffer
-                                 * is to small to hold everything, nothing will
-                                 * be copied, so we've got to slice the buffer
-                                 * if it's too big.
-                                 */
-                                b = (ByteBuffer) b.slice().limit(directBuffer.remaining());
-                            }
-                            /*
-                             * put() is going to modify the positions of both
-                             * buffers, put we don't want to change the position
-                             * of the source buffers (we'll do that after the
-                             * send, if needed), so we save and reset the
-                             * position after the copy
-                             */
-                            int p = b.position();
-                            directBuffer.put(b);
-                            b.position(p);
-                            if (directBuffer.remaining() == 0) {
-                                break;
-                            }
-                        }
-                        /*
-                         * Do the flip: limit becomes position, position gets
-                         * set to 0. This sets us up for the write.
-                         */
-                        directBuffer.flip();
-
-                        int sent = sock.write(directBuffer);
-                        ByteBuffer bb;
-
-                        // Remove the buffers that we have sent
-                        while (outgoingBuffers.size() > 0) {
-                            bb = outgoingBuffers.peek();
-                            if (bb == closeConn) {
-                                throw new IOException("closing");
-                            }
-                            int left = bb.remaining() - sent;
-                            if (left > 0) {
-                                /*
-                                 * We only partially sent this buffer, so we
-                                 * update the position and exit the loop.
-                                 */
-                                bb.position(bb.position() + sent);
-                                break;
-                            }
-                            cnxnStats.packetsSent++;
-                            /* We've sent the whole buffer, so drop the buffer */
-                            sent -= bb.remaining();
-                            ServerStats.getInstance().incrementPacketsSent();
-                            outgoingBuffers.remove();
-                        }
-                        // ZooLog.logTraceMessage(LOG,
-                        // ZooLog.CLIENT_DATA_PACKET_TRACE_MASK, "after send,
-                        // outgoingBuffers.size() = " + outgoingBuffers.size());
-                    }
-                    synchronized (this) {
-                        if (outgoingBuffers.size() == 0) {
-                            if (!initialized && (sk.interestOps() & SelectionKey.OP_READ) == 0) {
-                                throw new IOException("Responded to info probe");
-                            }
-                            sk.interestOps(sk.interestOps() & (~SelectionKey.OP_WRITE));
-                        } else {
-                            sk.interestOps(sk.interestOps() | SelectionKey.OP_WRITE);
-                        }
-                    }
-                }
-            } catch (CancelledKeyException e) {
-                close();
-            } catch (IOException e) {
-                // LOG.error("FIXMSG",e);
-                close();
-            }
-        }
-
-        private void readRequest() throws IOException {
-            incomingBuffer = incomingBuffer.slice();
-            processor.processPacket(incomingBuffer, this);
-        }
-
-        public void disableRecv() {
-            sk.interestOps(sk.interestOps() & (~SelectionKey.OP_READ));
-        }
-
-        public void enableRecv() {
-            if (sk.isValid()) {
-                int interest = sk.interestOps();
-                if ((interest & SelectionKey.OP_READ) == 0) {
-                    sk.interestOps(interest | SelectionKey.OP_READ);
-                }
-            }
-        }
-
-        private void readLength(SelectionKey k) throws IOException {
-            // Read the length, now get the buffer
-            int len = lenBuffer.getInt();
-            if (len < 0 || len > 0xfffff) {
-                throw new IOException("Len error " + len);
-            }
-            incomingBuffer = ByteBuffer.allocate(len);
-        }
-
-        /**
-         * The number of requests that have been submitted but not yet responded
-         * to.
-         */
-        int outstandingRequests;
-
-        /*
-         * (non-Javadoc)
-         * 
-         * @see org.apache.zookeeper.server.ServerCnxnIface#getSessionTimeout()
-         */
-        public int getSessionTimeout() {
-            return sessionTimeout;
-        }
-
-        String peerName;
-
-        public Cnxn(SocketChannel sock, SelectionKey sk) throws IOException {
-            this.sock = sock;
-            this.sk = sk;
-            sock.socket().setTcpNoDelay(true);
-            sock.socket().setSoLinger(true, 2);
-            sk.interestOps(SelectionKey.OP_READ);
-            if (LOG.isTraceEnabled()) {
-                peerName = sock.socket().toString();
-            }
-
-            lenBuffer.clear();
-            incomingBuffer = lenBuffer;
-        }
-
-        @Override
-        public String toString() {
-            return "NIOServerCnxn object with sock = " + sock + " and sk = " + sk;
-        }
-
-        boolean closed;
-
-        /*
-         * (non-Javadoc)
-         * 
-         * @see org.apache.zookeeper.server.ServerCnxnIface#close()
-         */
-        public void close() {
-            if (closed) {
-                return;
-            }
-            closed = true;
-            synchronized (cnxns) {
-                cnxns.remove(this);
-            }
-            LOG.debug("close  NIOServerCnxn: " + sock);
-            try {
-                /*
-                 * The following sequence of code is stupid! You would think
-                 * that only sock.close() is needed, but alas, it doesn't work
-                 * that way. If you just do sock.close() there are cases where
-                 * the socket doesn't actually close...
-                 */
-                sock.socket().shutdownOutput();
-            } catch (IOException e) {
-                // This is a relatively common exception that we can't avoid
-            }
-            try {
-                sock.socket().shutdownInput();
-            } catch (IOException e) {
-            }
-            try {
-                sock.socket().close();
-            } catch (IOException e) {
-                LOG.error("FIXMSG", e);
-            }
-            try {
-                sock.close();
-                // XXX The next line doesn't seem to be needed, but some posts
-                // to forums suggest that it is needed. Keep in mind if errors
-                // in
-                // this section arise.
-                // factory.selector.wakeup();
-            } catch (IOException e) {
-                LOG.error("FIXMSG", e);
-            }
-            sock = null;
-            if (sk != null) {
-                try {
-                    // need to cancel this selection key from the selector
-                    sk.cancel();
-                } catch (Exception e) {
-                }
-            }
-        }
-
-        private void makeWritable(SelectionKey sk) {
-            try {
-                selector.wakeup();
-                if (sk.isValid()) {
-                    sk.interestOps(sk.interestOps() | SelectionKey.OP_WRITE);
-                }
-            } catch (RuntimeException e) {
-                LOG.error("Problem setting writable", e);
-                throw e;
-            }
-        }
-
-        private void sendBuffers(ByteBuffer bb[]) {
-            ByteBuffer len = ByteBuffer.allocate(4);
-            int total = 0;
-            for (int i = 0; i < bb.length; i++) {
-                if (bb[i] != null) {
-                    total += bb[i].remaining();
-                }
-            }
-            if (LOG.isTraceEnabled()) {
-                LOG.debug("Sending response of size " + total + " to " + peerName);
-            }
-            len.putInt(total);
-            len.flip();
-            outgoingBuffers.add(len);
-            for (int i = 0; i < bb.length; i++) {
-                if (bb[i] != null) {
-                    outgoingBuffers.add(bb[i]);
-                }
-            }
-            makeWritable(sk);
-        }
-
-        synchronized public void sendResponse(ByteBuffer bb[]) {
-            if (closed) {
-                return;
-            }
-            sendBuffers(bb);
-            synchronized (NIOServerFactory.this) {
-                outstandingRequests--;
-                // check throttling
-                if (outstandingRequests < outstandingLimit) {
-                    sk.selector().wakeup();
-                    enableRecv();
-                }
-            }
-        }
-
-        public InetSocketAddress getRemoteAddress() {
-            return (InetSocketAddress) sock.socket().getRemoteSocketAddress();
-        }
-
-        private class CnxnStats {
-            long packetsReceived;
-
-            long packetsSent;
-
-            /**
-             * The number of requests that have been submitted but not yet
-             * responded to.
-             */
-            public long getOutstandingRequests() {
-                return outstandingRequests;
-            }
-
-            public long getPacketsReceived() {
-                return packetsReceived;
-            }
-
-            public long getPacketsSent() {
-                return packetsSent;
-            }
-
-            @Override
-            public String toString() {
-                StringBuilder sb = new StringBuilder();
-                Channel channel = sk.channel();
-                if (channel instanceof SocketChannel) {
-                    sb.append(" ").append(((SocketChannel) channel).socket().getRemoteSocketAddress()).append("[")
-                            .append(Integer.toHexString(sk.interestOps())).append("](queued=").append(
-                                    getOutstandingRequests()).append(",recved=").append(getPacketsReceived()).append(
-                                    ",sent=").append(getPacketsSent()).append(")\n");
-                }
-                return sb.toString();
-            }
-        }
-
-        private CnxnStats cnxnStats = new CnxnStats();
-
-        public CnxnStats getStats() {
-            return cnxnStats;
-        }
-    }
-}

+ 0 - 573
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java

@@ -1,573 +0,0 @@
-package org.apache.bookkeeper.proto;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayDeque;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
-import org.apache.bookkeeper.util.OrderedSafeExecutor;
-import org.apache.bookkeeper.util.SafeRunnable;
-import org.apache.log4j.Logger;
-import org.jboss.netty.bootstrap.ClientBootstrap;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFactory;
-import org.jboss.netty.channel.ChannelFuture;
-import org.jboss.netty.channel.ChannelFutureListener;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineCoverage;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.ChannelStateEvent;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.ExceptionEvent;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelHandler;
-import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
-import org.jboss.netty.handler.codec.frame.CorruptedFrameException;
-import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder;
-import org.jboss.netty.handler.codec.frame.TooLongFrameException;
-
-/**
- * This class manages all details of connection to a particular bookie. It also
- * has reconnect logic if a connection to a bookie fails.
- * 
- */
-
-@ChannelPipelineCoverage("one")
-public class PerChannelBookieClient extends SimpleChannelHandler implements ChannelPipelineFactory {
-
-    static final Logger LOG = Logger.getLogger(PerChannelBookieClient.class);
-
-    static final long maxMemory = Runtime.getRuntime().maxMemory() / 5;
-    public static int MAX_FRAME_LENGTH = 2 * 1024 * 1024; // 2M
-
-    InetSocketAddress addr;
-    boolean connected = false;
-    Semaphore opCounterSem = new Semaphore(2000);
-    AtomicLong totalBytesOutstanding;
-    ClientSocketChannelFactory channelFactory;
-    OrderedSafeExecutor executor;
-
-    ConcurrentHashMap<CompletionKey, AddCompletion> addCompletions = new ConcurrentHashMap<CompletionKey, AddCompletion>();
-    ConcurrentHashMap<CompletionKey, ReadCompletion> readCompletions = new ConcurrentHashMap<CompletionKey, ReadCompletion>();
-
-    /**
-     * The following member variables do not need to be concurrent, or volatile
-     * because they are always updated under a lock
-     */
-    Queue<GenericCallback<Void>> pendingOps = new ArrayDeque<GenericCallback<Void>>();
-    boolean connectionAttemptInProgress;
-    Channel channel = null;
-
-    public PerChannelBookieClient(OrderedSafeExecutor executor, ClientSocketChannelFactory channelFactory,
-            InetSocketAddress addr, AtomicLong totalBytesOutstanding) {
-        this.addr = addr;
-        this.executor = executor;
-        this.totalBytesOutstanding = totalBytesOutstanding;
-        this.channelFactory = channelFactory;
-        connect(channelFactory);
-    }
-
-    void connect(ChannelFactory channelFactory) {
-
-        if (LOG.isDebugEnabled())
-            LOG.debug("Connecting to bookie: " + addr);
-
-        // Set up the ClientBootStrap so we can create a new Channel connection
-        // to the bookie.
-        ClientBootstrap bootstrap = new ClientBootstrap(channelFactory);
-        bootstrap.setPipelineFactory(this);
-        bootstrap.setOption("tcpNoDelay", true);
-        bootstrap.setOption("keepAlive", true);
-
-        // Start the connection attempt to the input server host.
-        connectionAttemptInProgress = true;
-
-        ChannelFuture future = bootstrap.connect(addr);
-
-        future.addListener(new ChannelFutureListener() {
-            @Override
-            public void operationComplete(ChannelFuture future) throws Exception {
-                int rc;
-                Queue<GenericCallback<Void>> oldPendingOps;
-
-                synchronized (PerChannelBookieClient.this) {
-
-                    if (future.isSuccess()) {
-                        LOG.info("Successfully connected to bookie: " + addr);
-                        rc = BKException.Code.OK;
-                        channel = future.getChannel();
-                        connected = true;
-                    } else {
-                        LOG.error("Could not connect to bookie: " + addr);
-                        rc = BKException.Code.BookieHandleNotAvailableException;
-                        channel = null;
-                        connected = false;
-                    }
-
-                    connectionAttemptInProgress = false;
-                    PerChannelBookieClient.this.channel = channel;
-
-                    // trick to not do operations under the lock, take the list
-                    // of pending ops and assign it to a new variable, while
-                    // emptying the pending ops by just assigning it to a new
-                    // list
-                    oldPendingOps = pendingOps;
-                    pendingOps = new ArrayDeque<GenericCallback<Void>>();
-                }
-
-                for (GenericCallback<Void> pendingOp : oldPendingOps) {
-                    pendingOp.operationComplete(rc, null);
-                }
-
-            }
-        });
-    }
-
-    void connectIfNeededAndDoOp(GenericCallback<Void> op) {
-        boolean doOpNow;
-
-        // common case without lock first
-        if (channel != null && connected) {
-            doOpNow = true;
-        } else {
-
-            synchronized (this) {
-                // check again under lock
-                if (channel != null && connected) {
-                    doOpNow = true;
-                } else {
-
-                    // if reached here, channel is either null (first connection
-                    // attempt),
-                    // or the channel is disconnected
-                    doOpNow = false;
-
-                    // connection attempt is still in progress, queue up this
-                    // op. Op will be executed when connection attempt either
-                    // fails
-                    // or
-                    // succeeds
-                    pendingOps.add(op);
-
-                    if (!connectionAttemptInProgress) {
-                        connect(channelFactory);
-                    }
-
-                }
-            }
-        }
-
-        if (doOpNow) {
-            op.operationComplete(BKException.Code.OK, null);
-        }
-
-    }
-
-    /**
-     * This method should be called only after connection has been checked for
-     * {@link #connectIfNeededAndDoOp(GenericCallback)}
-     * 
-     * @param ledgerId
-     * @param masterKey
-     * @param entryId
-     * @param lastConfirmed
-     * @param macCode
-     * @param data
-     * @param cb
-     * @param ctx
-     */
-    void addEntry(final long ledgerId, byte[] masterKey, final long entryId, ChannelBuffer toSend, WriteCallback cb,
-            Object ctx) {
-
-        final int entrySize = toSend.readableBytes();
-        
-        // if (totalBytesOutstanding.get() > maxMemory) {
-        // // TODO: how to throttle, throw an exception, or call the callback?
-        // // Maybe this should be done at the layer above?
-        // }
-
-        final CompletionKey completionKey = new CompletionKey(ledgerId, entryId);
-
-        addCompletions.put(completionKey, new AddCompletion(cb, entrySize, ctx));
-
-        int totalHeaderSize = 4 // for the length of the packet
-        + 4 // for the type of request
-        + masterKey.length; // for the master key
-
-        ChannelBuffer header = channel.getConfig().getBufferFactory().getBuffer(totalHeaderSize);
-        header.writeInt(totalHeaderSize - 4 + entrySize);
-        header.writeInt(BookieProtocol.ADDENTRY);
-        header.writeBytes(masterKey);
-
-        ChannelBuffer wrappedBuffer = ChannelBuffers.wrappedBuffer(header, toSend);
-
-        ChannelFuture future = channel.write(wrappedBuffer);
-        future.addListener(new ChannelFutureListener() {
-            @Override
-            public void operationComplete(ChannelFuture future) throws Exception {
-                if (future.isSuccess()) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Successfully wrote request for adding entry: " + entryId + " ledger-id: " + ledgerId
-                                + " bookie: " + channel.getRemoteAddress() + " entry length: " + entrySize);
-                    }
-                    // totalBytesOutstanding.addAndGet(entrySize);
-                } else {
-                    errorOutAddKey(completionKey);
-                }
-            }
-        });
-
-    }
-
-    public void readEntry(final long ledgerId, final long entryId, ReadEntryCallback cb, Object ctx) {
-
-        final CompletionKey key = new CompletionKey(ledgerId, entryId);
-        readCompletions.put(key, new ReadCompletion(cb, ctx));
-
-        int totalHeaderSize = 4 // for the length of the packet
-        + 4 // for request type
-        + 8 // for ledgerId
-        + 8; // for entryId
-
-        ChannelBuffer tmpEntry = channel.getConfig().getBufferFactory().getBuffer(totalHeaderSize);
-        tmpEntry.writeInt(totalHeaderSize - 4);
-        tmpEntry.writeInt(BookieProtocol.READENTRY);
-        tmpEntry.writeLong(ledgerId);
-        tmpEntry.writeLong(entryId);
-
-        ChannelFuture future = channel.write(tmpEntry);
-        future.addListener(new ChannelFutureListener() {
-            @Override
-            public void operationComplete(ChannelFuture future) throws Exception {
-                if (future.isSuccess()) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Successfully wrote request for reading entry: " + entryId + " ledger-id: "
-                                + ledgerId + " bookie: " + channel.getRemoteAddress());
-                    }
-                } else {
-                    errorOutReadKey(key);
-                }
-            }
-        });
-
-    }
-
-    public void close() {
-        if (channel != null) {
-            channel.close();
-        }
-    }
-
-    void errorOutReadKey(final CompletionKey key) {
-        executor.submitOrdered(key.ledgerId, new SafeRunnable() {
-            @Override
-            public void safeRun() {
-
-                ReadCompletion readCompletion = readCompletions.remove(key);
-
-                if (readCompletion != null) {
-                    LOG.error("Could not write  request for reading entry: " + key.entryId + " ledger-id: "
-                            + key.ledgerId + " bookie: " + channel.getRemoteAddress());
-
-                    readCompletion.cb.readEntryComplete(BKException.Code.BookieHandleNotAvailableException,
-                            key.ledgerId, key.entryId, null, readCompletion.ctx);
-                }
-            }
-
-        });
-    }
-
-    void errorOutAddKey(final CompletionKey key) {
-        executor.submitOrdered(key.ledgerId, new SafeRunnable() {
-            @Override
-            public void safeRun() {
-
-                AddCompletion addCompletion = addCompletions.remove(key);
-
-                if (addCompletion != null) {
-                    String bAddress = "null";
-                    if(channel != null)
-                        bAddress = channel.getRemoteAddress().toString();
-                    LOG.error("Could not write request for adding entry: " + key.entryId + " ledger-id: "
-                            + key.ledgerId + " bookie: " + bAddress);
-
-                    addCompletion.cb.writeComplete(BKException.Code.BookieHandleNotAvailableException, key.ledgerId,
-                            key.entryId, addr, addCompletion.ctx);
-                    LOG.error("Invoked callback method: " + key.entryId);
-                }
-            }
-
-        });
-
-    }
-
-    /**
-     * Errors out pending entries. We call this method from one thread to avoid
-     * concurrent executions to QuorumOpMonitor (implements callbacks). It seems
-     * simpler to call it from BookieHandle instead of calling directly from
-     * here.
-     */
-
-    void errorOutOutstandingEntries() {
-
-        // DO NOT rewrite these using Map.Entry iterations. We want to iterate
-        // on keys and see if we are successfully able to remove the key from
-        // the map. Because the add and the read methods also do the same thing
-        // in case they get a write failure on the socket. The one who
-        // successfully removes the key from the map is the one responsible for
-        // calling the application callback.
-
-        for (CompletionKey key : addCompletions.keySet()) {
-            errorOutAddKey(key);
-        }
-
-        for (CompletionKey key : readCompletions.keySet()) {
-            errorOutReadKey(key);
-        }
-    }
-
-    /**
-     * In the netty pipeline, we need to split packets based on length, so we
-     * use the {@link LengthFieldBasedFrameDecoder}. Other than that all actions
-     * are carried out in this class, e.g., making sense of received messages,
-     * prepending the length to outgoing packets etc.
-     */
-    @Override
-    public ChannelPipeline getPipeline() throws Exception {
-        ChannelPipeline pipeline = Channels.pipeline();
-        pipeline.addLast("lengthbasedframedecoder", new LengthFieldBasedFrameDecoder(MAX_FRAME_LENGTH, 0, 4, 0, 4));
-        pipeline.addLast("mainhandler", this);
-        return pipeline;
-    }
-
-    /**
-     * If our channel has disconnected, we just error out the pending entries
-     */
-    @Override
-    public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
-        LOG.info("Disconnected from bookie: " + addr);
-    	errorOutOutstandingEntries();
-        channel.close();
-
-        connected = false;
-
-        // we don't want to reconnect right away. If someone sends a request to
-        // this address, we will reconnect.
-    }
-
-    /**
-     * Called by netty when an exception happens in one of the netty threads
-     * (mostly due to what we do in the netty threads)
-     */
-    @Override
-    public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
-        Throwable t = e.getCause();
-        if (t instanceof CorruptedFrameException || t instanceof TooLongFrameException) {
-            LOG.error("Corrupted fram recieved from bookie: " + e.getChannel().getRemoteAddress());
-            return;
-        }
-        if (t instanceof IOException) {
-            // these are thrown when a bookie fails, logging them just pollutes
-            // the logs (the failure is logged from the listeners on the write
-            // operation), so I'll just ignore it here.
-            return;
-        }
-
-        LOG.fatal("Unexpected exception caught by bookie client channel handler", t);
-        // Since we are a library, cant terminate App here, can we?
-    }
-
-    /**
-     * Called by netty when a message is received on a channel
-     */
-    @Override
-    public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
-        if (!(e.getMessage() instanceof ChannelBuffer)) {
-            ctx.sendUpstream(e);
-            return;
-        }
-
-        final ChannelBuffer buffer = (ChannelBuffer) e.getMessage();
-        final int type, rc;
-        final long ledgerId, entryId;
-
-        try {
-            type = buffer.readInt();
-            rc = buffer.readInt();
-            ledgerId = buffer.readLong();
-            entryId = buffer.readLong();
-        } catch (IndexOutOfBoundsException ex) {
-            LOG.error("Unparseable response from bookie: " + addr, ex);
-            return;
-        }
-
-        executor.submitOrdered(ledgerId, new SafeRunnable() {
-            @Override
-            public void safeRun() {
-                switch (type) {
-                case BookieProtocol.ADDENTRY:
-                    handleAddResponse(ledgerId, entryId, rc);
-                    break;
-                case BookieProtocol.READENTRY:
-                    handleReadResponse(ledgerId, entryId, rc, buffer);
-                    break;
-                default:
-                    LOG.error("Unexpected response, type: " + type + " recieved from bookie: " + addr + " , ignoring");
-                }
-            }
-
-        });
-    }
-
-    void handleAddResponse(long ledgerId, long entryId, int rc) {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Got response for add request from bookie: " + addr + " for ledger: " + ledgerId + " entry: "
-                    + entryId + " rc: " + rc);
-        }
-
-        // convert to BKException code because thats what the uppper
-        // layers expect. This is UGLY, there should just be one set of
-        // error codes.
-        if (rc != BookieProtocol.EOK) {
-            LOG.error("Add for ledger: " + ledgerId + ", entry: " + entryId + " failed on bookie: " + addr
-                    + " with code: " + rc);
-            rc = BKException.Code.WriteException;
-        } else {
-            rc = BKException.Code.OK;
-        }
-
-        AddCompletion ac;
-        ac = addCompletions.remove(new CompletionKey(ledgerId, entryId));
-        if (ac == null) {
-            LOG.error("Unexpected add response received from bookie: " + addr + " for ledger: " + ledgerId
-                    + ", entry: " + entryId + " , ignoring");
-            return;
-        }
-
-        // totalBytesOutstanding.addAndGet(-ac.size);
-
-        ac.cb.writeComplete(rc, ledgerId, entryId, addr, ac.ctx);
-
-    }
-
-    void handleReadResponse(long ledgerId, long entryId, int rc, ChannelBuffer buffer) {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Got response for read request from bookie: " + addr + " for ledger: " + ledgerId + " entry: "
-                    + entryId + " rc: " + rc + "entry length: " + buffer.readableBytes());
-        }
-
-        // convert to BKException code because thats what the uppper
-        // layers expect. This is UGLY, there should just be one set of
-        // error codes.
-        if (rc == BookieProtocol.EOK) {
-            rc = BKException.Code.OK;
-        } else if (rc == BookieProtocol.ENOENTRY || rc == BookieProtocol.ENOLEDGER) {
-            rc = BKException.Code.NoSuchEntryException;
-        } else {
-            LOG.error("Read for ledger: " + ledgerId + ", entry: " + entryId + " failed on bookie: " + addr
-                    + " with code: " + rc);
-            rc = BKException.Code.ReadException;
-        }
-
-        CompletionKey key = new CompletionKey(ledgerId, entryId);
-        ReadCompletion readCompletion = readCompletions.remove(key);
-
-        if (readCompletion == null) {
-            /*
-             * This is a special case. When recovering a ledger, a client
-             * submits a read request with id -1, and receives a response with a
-             * different entry id.
-             */
-            readCompletion = readCompletions.remove(new CompletionKey(ledgerId, -1));
-        }
-
-        if (readCompletion == null) {
-            LOG.error("Unexpected read response recieved from bookie: " + addr + " for ledger: " + ledgerId
-                    + ", entry: " + entryId + " , ignoring");
-            return;
-        }
-
-        readCompletion.cb.readEntryComplete(rc, ledgerId, entryId, buffer.slice(), readCompletion.ctx);
-    }
-
-    /**
-     * Boiler-plate wrapper classes follow
-     * 
-     */
-
-    private static class ReadCompletion {
-        final ReadEntryCallback cb;
-        final Object ctx;
-
-        public ReadCompletion(ReadEntryCallback cb, Object ctx) {
-            this.cb = cb;
-            this.ctx = ctx;
-        }
-    }
-
-    private static class AddCompletion {
-        final WriteCallback cb;
-        //final long size;
-        final Object ctx;
-
-        public AddCompletion(WriteCallback cb, long size, Object ctx) {
-            this.cb = cb;
-            //this.size = size;
-            this.ctx = ctx;
-        }
-    }
-
-    private static class CompletionKey {
-        long ledgerId;
-        long entryId;
-
-        CompletionKey(long ledgerId, long entryId) {
-            this.ledgerId = ledgerId;
-            this.entryId = entryId;
-        }
-
-        @Override
-        public boolean equals(Object obj) {
-            if (!(obj instanceof CompletionKey) || obj == null) {
-                return false;
-            }
-            CompletionKey that = (CompletionKey) obj;
-            return this.ledgerId == that.ledgerId && this.entryId == that.entryId;
-        }
-
-        @Override
-        public int hashCode() {
-            return ((int) ledgerId << 16) ^ ((int) entryId);
-        }
-
-    }
-
-}

+ 0 - 148
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/proto/ServerStats.java

@@ -1,148 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.bookkeeper.proto;
-
-public class ServerStats {
-    private static ServerStats instance = new ServerStats();
-    private long packetsSent;
-    private long packetsReceived;
-    private long maxLatency;
-    private long minLatency = Long.MAX_VALUE;
-    private long totalLatency = 0;
-    private long count = 0;
-
-    public interface Provider {
-        public long getOutstandingRequests();
-
-        public long getLastProcessedZxid();
-    }
-
-    private Provider provider = null;
-    private Object mutex = new Object();
-
-    static public ServerStats getInstance() {
-        return instance;
-    }
-
-    static public void registerAsConcrete() {
-        setInstance(new ServerStats());
-    }
-
-    static synchronized public void unregister() {
-        instance = null;
-    }
-
-    static synchronized protected void setInstance(ServerStats newInstance) {
-        assert instance == null;
-        instance = newInstance;
-    }
-
-    protected ServerStats() {
-    }
-
-    // getters
-    synchronized public long getMinLatency() {
-        return (minLatency == Long.MAX_VALUE) ? 0 : minLatency;
-    }
-
-    synchronized public long getAvgLatency() {
-        if (count != 0)
-            return totalLatency / count;
-        return 0;
-    }
-
-    synchronized public long getMaxLatency() {
-        return maxLatency;
-    }
-
-    public long getOutstandingRequests() {
-        synchronized (mutex) {
-            return (provider != null) ? provider.getOutstandingRequests() : -1;
-        }
-    }
-
-    public long getLastProcessedZxid() {
-        synchronized (mutex) {
-            return (provider != null) ? provider.getLastProcessedZxid() : -1;
-        }
-    }
-
-    synchronized public long getPacketsReceived() {
-        return packetsReceived;
-    }
-
-    synchronized public long getPacketsSent() {
-        return packetsSent;
-    }
-
-    public String getServerState() {
-        return "standalone";
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder sb = new StringBuilder();
-        sb.append("Latency min/avg/max: " + getMinLatency() + "/" + getAvgLatency() + "/" + getMaxLatency() + "\n");
-        sb.append("Received: " + getPacketsReceived() + "\n");
-        sb.append("Sent: " + getPacketsSent() + "\n");
-        if (provider != null) {
-            sb.append("Outstanding: " + getOutstandingRequests() + "\n");
-            sb.append("Zxid: 0x" + Long.toHexString(getLastProcessedZxid()) + "\n");
-        }
-        sb.append("Mode: " + getServerState() + "\n");
-        return sb.toString();
-    }
-
-    // mutators
-    public void setStatsProvider(Provider zk) {
-        synchronized (mutex) {
-            provider = zk;
-        }
-    }
-
-    synchronized void updateLatency(long requestCreateTime) {
-        long latency = System.currentTimeMillis() - requestCreateTime;
-        totalLatency += latency;
-        count++;
-        if (latency < minLatency) {
-            minLatency = latency;
-        }
-        if (latency > maxLatency) {
-            maxLatency = latency;
-        }
-    }
-
-    synchronized public void resetLatency() {
-        totalLatency = count = maxLatency = 0;
-        minLatency = Long.MAX_VALUE;
-    }
-
-    synchronized public void resetMaxLatency() {
-        maxLatency = getMinLatency();
-    }
-
-    synchronized public void incrementPacketsReceived() {
-        packetsReceived++;
-    }
-
-    synchronized public void incrementPacketsSent() {
-        packetsSent++;
-    }
-
-    synchronized public void resetRequestCounters() {
-        packetsReceived = packetsSent = 0;
-    }
-
-}

+ 0 - 173
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/streaming/LedgerInputStream.java

@@ -1,173 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-package org.apache.bookkeeper.streaming;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.util.Enumeration;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.log4j.Logger;
-
-public class LedgerInputStream extends InputStream {
-    Logger LOG = Logger.getLogger(LedgerInputStream.class);
-    private LedgerHandle lh;
-    private ByteBuffer bytebuff;
-    byte[] bbytes;
-    long lastEntry = 0;
-    int increment = 50;
-    int defaultSize = 1024 * 1024; // 1MB default size
-    Enumeration<LedgerEntry> ledgerSeq = null;
-
-    /**
-     * construct a outputstream from a ledger handle
-     * 
-     * @param lh
-     *            ledger handle
-     * @throws {@link BKException}, {@link InterruptedException}
-     */
-    public LedgerInputStream(LedgerHandle lh) throws BKException, InterruptedException {
-        this.lh = lh;
-        bbytes = new byte[defaultSize];
-        this.bytebuff = ByteBuffer.wrap(bbytes);
-        this.bytebuff.position(this.bytebuff.limit());
-        lastEntry = Math.min(lh.getLastAddConfirmed(), increment);
-        ledgerSeq = lh.readEntries(0, lastEntry);
-    }
-
-    /**
-     * construct a outputstream from a ledger handle
-     * 
-     * @param lh
-     *            the ledger handle
-     * @param size
-     *            the size of the buffer
-     * @throws {@link BKException}, {@link InterruptedException}
-     */
-    public LedgerInputStream(LedgerHandle lh, int size) throws BKException, InterruptedException {
-        this.lh = lh;
-        bbytes = new byte[size];
-        this.bytebuff = ByteBuffer.wrap(bbytes);
-        this.bytebuff.position(this.bytebuff.limit());
-        lastEntry = Math.min(lh.getLastAddConfirmed(), increment);
-        ledgerSeq = lh.readEntries(0, lastEntry);
-    }
-
-    /**
-     * Method close currently doesn't do anything. The application
-     * is supposed to open and close the ledger handle backing up 
-     * a stream ({@link LedgerHandle}).
-     */
-    @Override
-    public void close() {
-        // do nothing
-        // let the application
-        // close the ledger
-    }
-
-    /**
-     * refill the buffer, we need to read more bytes
-     * 
-     * @return if we can refill or not
-     */
-    private synchronized boolean refill() throws IOException {
-        bytebuff.clear();
-        if (!ledgerSeq.hasMoreElements() && lastEntry >= lh.getLastAddConfirmed()) {
-            return false;
-        }
-        if (!ledgerSeq.hasMoreElements()) {
-            // do refill
-            long last = Math.min(lastEntry + increment, lh.getLastAddConfirmed());
-            try {
-                ledgerSeq = lh.readEntries(lastEntry + 1, last);
-            } catch (BKException bk) {
-                IOException ie = new IOException(bk.getMessage());
-                ie.initCause(bk);
-                throw ie;
-            } catch (InterruptedException ie) {
-                Thread.currentThread().interrupt();
-            }
-            lastEntry = last;
-        }
-        LedgerEntry le = ledgerSeq.nextElement();
-        bbytes = le.getEntry();
-        bytebuff = ByteBuffer.wrap(bbytes);
-        return true;
-    }
-
-    @Override
-    public synchronized int read() throws IOException {
-        boolean toread = true;
-        if (bytebuff.remaining() == 0) {
-            // their are no remaining bytes
-            toread = refill();
-        }
-        if (toread) {
-            int ret = 0xFF & bytebuff.get();
-            return ret;
-        }
-        return -1;
-    }
-
-    @Override
-    public synchronized int read(byte[] b) throws IOException {
-        // be smart ... just copy the bytes
-        // once and return the size
-        // user will call it again
-        boolean toread = true;
-        if (bytebuff.remaining() == 0) {
-            toread = refill();
-        }
-        if (toread) {
-            int bcopied = bytebuff.remaining();
-            int tocopy = Math.min(bcopied, b.length);
-            // cannot used gets because of
-            // the underflow/overflow exceptions
-            System.arraycopy(bbytes, bytebuff.position(), b, 0, tocopy);
-            bytebuff.position(bytebuff.position() + tocopy);
-            return tocopy;
-        }
-        return -1;
-    }
-
-    @Override
-    public synchronized int read(byte[] b, int off, int len) throws IOException {
-        // again dont need ot fully
-        // fill b, just return
-        // what we have and let the application call read
-        // again
-        boolean toread = true;
-        if (bytebuff.remaining() == 0) {
-            toread = refill();
-        }
-        if (toread) {
-            int bcopied = bytebuff.remaining();
-            int tocopy = Math.min(bcopied, len);
-            System.arraycopy(bbytes, bytebuff.position(), b, off, tocopy);
-            bytebuff.position(bytebuff.position() + tocopy);
-            return tocopy;
-        }
-        return -1;
-    }
-}

+ 0 - 147
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java

@@ -1,147 +0,0 @@
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-package org.apache.bookkeeper.streaming;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.log4j.Logger;
-
-/**
- * this class provides a streaming api to get an output stream from a ledger
- * handle and write to it as a stream of bytes. This is built on top of
- * ledgerhandle api and uses a buffer to cache the data written to it and writes
- * out the entry to the ledger.
- */
-public class LedgerOutputStream extends OutputStream {
-    Logger LOG = Logger.getLogger(LedgerOutputStream.class);
-    private LedgerHandle lh;
-    private ByteBuffer bytebuff;
-    byte[] bbytes;
-    int defaultSize = 1024 * 1024; // 1MB default size
-
-    /**
-     * construct a outputstream from a ledger handle
-     * 
-     * @param lh
-     *            ledger handle
-     */
-    public LedgerOutputStream(LedgerHandle lh) {
-        this.lh = lh;
-        bbytes = new byte[defaultSize];
-        this.bytebuff = ByteBuffer.wrap(bbytes);
-    }
-
-    /**
-     * construct a outputstream from a ledger handle
-     * 
-     * @param lh
-     *            the ledger handle
-     * @param size
-     *            the size of the buffer
-     */
-    public LedgerOutputStream(LedgerHandle lh, int size) {
-        this.lh = lh;
-        bbytes = new byte[size];
-        this.bytebuff = ByteBuffer.wrap(bbytes);
-    }
-
-    @Override
-    public void close() {
-        // flush everything
-        // we have
-        flush();
-    }
-
-    @Override
-    public synchronized void flush() {
-        // lets flush all the data
-        // into the ledger entry
-        if (bytebuff.position() > 0) {
-            // copy the bytes into
-            // a new byte buffer and send it out
-            byte[] b = new byte[bytebuff.position()];
-            LOG.info("Comment: flushing with params " + " " + bytebuff.position());
-            System.arraycopy(bbytes, 0, b, 0, bytebuff.position());
-            try {
-                lh.addEntry(b);
-            } catch (InterruptedException ie) {
-                LOG.warn("Interrupted while flusing " + ie);
-                Thread.currentThread().interrupt();
-            } catch (BKException bke) {
-                LOG.warn("BookKeeper exception ", bke);
-            }
-        }
-    }
-
-    /**
-     * make space for len bytes to be written to the buffer.
-     * 
-     * @param len
-     * @return if true then we can make space for len if false we cannot
-     */
-    private boolean makeSpace(int len) {
-        if (bytebuff.remaining() < len) {
-            flush();
-            bytebuff.clear();
-            if (bytebuff.capacity() < len) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    @Override
-    public synchronized void write(byte[] b) {
-        if (makeSpace(b.length)) {
-            bytebuff.put(b);
-        } else {
-            try {
-                lh.addEntry(b);
-            } catch (InterruptedException ie) {
-                LOG.warn("Interrupted while writing", ie);
-                Thread.currentThread().interrupt();
-            } catch (BKException bke) {
-                LOG.warn("BookKeeper exception", bke);
-            }
-        }
-    }
-
-    @Override
-    public synchronized void write(byte[] b, int off, int len) {
-        if (!makeSpace(len)) {
-            // lets try making the buffer bigger
-            bbytes = new byte[len];
-            bytebuff = ByteBuffer.wrap(bbytes);
-        }
-        bytebuff.put(b, off, len);
-    }
-
-    @Override
-    public synchronized void write(int b) throws IOException {
-        makeSpace(1);
-        byte oneB = (byte) (b & 0xFF);
-        bytebuff.put(oneB);
-    }
-}

+ 0 - 763
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/tools/BookKeeperTools.java

@@ -1,763 +0,0 @@
-package org.apache.bookkeeper.tools;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.AsyncCallback.RecoverCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.AsyncCallback;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.KeeperException.Code;
-import org.apache.zookeeper.data.Stat;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * Provides Admin Tools to manage the BookKeeper cluster.
- * 
- */
-public class BookKeeperTools {
-
-    private static Logger LOG = Logger.getLogger(BookKeeperTools.class);
-
-    // ZK client instance
-    private ZooKeeper zk;
-    // ZK ledgers related String constants
-    static final String LEDGERS_PATH = "/ledgers";
-    static final String LEDGER_NODE_PREFIX = "L";
-    static final String AVAILABLE_NODE = "available";
-    static final String BOOKIES_PATH = LEDGERS_PATH + "/" + AVAILABLE_NODE;
-    static final String COLON = ":";
-
-    // BookKeeper client instance
-    private BookKeeper bkc;
-
-    /*
-     * Random number generator used to choose an available bookie server to
-     * replicate data from a dead bookie.
-     */
-    private Random rand = new Random();
-
-    /*
-     * For now, assume that all ledgers were created with the same DigestType
-     * and password. In the future, this admin tool will need to know for each
-     * ledger, what was the DigestType and password used to create it before it
-     * can open it. These values will come from System properties, though hard
-     * coded defaults are defined here.
-     */
-    private DigestType DIGEST_TYPE = DigestType.valueOf(System.getProperty("digestType", DigestType.CRC32.toString()));
-    private byte[] PASSWD = System.getProperty("passwd", "").getBytes();
-
-    /**
-     * Constructor that takes in a ZooKeeper servers connect string so we know
-     * how to connect to ZooKeeper to retrieve information about the BookKeeper
-     * cluster. We need this before we can do any type of admin operations on
-     * the BookKeeper cluster.
-     * 
-     * @param zkServers
-     *            Comma separated list of hostname:port pairs for the ZooKeeper
-     *            servers cluster.
-     * @throws IOException
-     *             Throws this exception if there is an error instantiating the
-     *             ZooKeeper client.
-     * @throws InterruptedException
-     *             Throws this exception if there is an error instantiating the
-     *             BookKeeper client.
-     * @throws KeeperException
-     *             Throws this exception if there is an error instantiating the
-     *             BookKeeper client.
-     */
-    public BookKeeperTools(String zkServers) throws IOException, InterruptedException, KeeperException {
-        // Create the ZooKeeper client instance
-        zk = new ZooKeeper(zkServers, 10000, new Watcher() {
-            @Override
-            public void process(WatchedEvent event) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Process: " + event.getType() + " " + event.getPath());
-                }
-            }
-        });
-        // Create the BookKeeper client instance
-        bkc = new BookKeeper(zk);
-    }
-
-    /**
-     * Shutdown method to gracefully release resources that this class uses.
-     * 
-     * @throws InterruptedException
-     *             if there is an error shutting down the clients that this
-     *             class uses.
-     */
-    public void shutdown() throws InterruptedException {
-        bkc.halt();
-        zk.close();
-    }
-
-    /**
-     * This is a multi callback object for bookie recovery that waits for all of
-     * the multiple async operations to complete. If any fail, then we invoke
-     * the final callback with a BK LedgerRecoveryException.
-     */
-    class MultiCallback implements AsyncCallback.VoidCallback {
-        // Number of expected callbacks
-        final int expected;
-        // Final callback and the corresponding context to invoke
-        final AsyncCallback.VoidCallback cb;
-        final Object context;
-        // This keeps track of how many operations have completed
-        final AtomicInteger done = new AtomicInteger();
-        // List of the exceptions from operations that completed unsuccessfully
-        final LinkedBlockingQueue<Integer> exceptions = new LinkedBlockingQueue<Integer>();
-
-        MultiCallback(int expected, AsyncCallback.VoidCallback cb, Object context) {
-            this.expected = expected;
-            this.cb = cb;
-            this.context = context;
-            if (expected == 0) {
-                cb.processResult(Code.OK.intValue(), null, context);
-            }
-        }
-
-        private void tick() {
-            if (done.incrementAndGet() == expected) {
-                if (exceptions.isEmpty()) {
-                    cb.processResult(Code.OK.intValue(), null, context);
-                } else {
-                    cb.processResult(BKException.Code.LedgerRecoveryException, null, context);
-                }
-            }
-        }
-
-        @Override
-        public void processResult(int rc, String path, Object ctx) {
-            if (rc != Code.OK.intValue()) {
-                LOG.error("BK error recovering ledger data", BKException.create(rc));
-                exceptions.add(rc);
-            }
-            tick();
-        }
-
-    }
-
-    /**
-     * Method to get the input ledger's digest type. For now, this is just a
-     * placeholder function since there is no way we can get this information
-     * easily. In the future, BookKeeper should store this ledger metadata
-     * somewhere such that an admin tool can access it.
-     * 
-     * @param ledgerId
-     *            LedgerId we are retrieving the digestType for.
-     * @return DigestType for the input ledger
-     */
-    private DigestType getLedgerDigestType(long ledgerId) {
-        return DIGEST_TYPE;
-    }
-
-    /**
-     * Method to get the input ledger's password. For now, this is just a
-     * placeholder function since there is no way we can get this information
-     * easily. In the future, BookKeeper should store this ledger metadata
-     * somewhere such that an admin tool can access it.
-     * 
-     * @param ledgerId
-     *            LedgerId we are retrieving the password for.
-     * @return Password for the input ledger
-     */
-    private byte[] getLedgerPasswd(long ledgerId) {
-        return PASSWD;
-    }
-
-    // Object used for calling async methods and waiting for them to complete.
-    class SyncObject {
-        boolean value;
-
-        public SyncObject() {
-            value = false;
-        }
-    }
-
-    /**
-     * Synchronous method to rebuild and recover the ledger fragments data that
-     * was stored on the source bookie. That bookie could have failed completely
-     * and now the ledger data that was stored on it is under replicated. An
-     * optional destination bookie server could be given if we want to copy all
-     * of the ledger fragments data on the failed source bookie to it.
-     * Otherwise, we will just randomly distribute the ledger fragments to the
-     * active set of bookies, perhaps based on load. All ZooKeeper ledger
-     * metadata will be updated to point to the new bookie(s) that contain the
-     * replicated ledger fragments.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param bookieDest
-     *            Optional destination bookie that if passed, we will copy all
-     *            of the ledger fragments from the source bookie over to it.
-     */
-    public void recoverBookieData(final InetSocketAddress bookieSrc, final InetSocketAddress bookieDest)
-            throws InterruptedException {
-        SyncObject sync = new SyncObject();
-        // Call the async method to recover bookie data.
-        asyncRecoverBookieData(bookieSrc, bookieDest, new RecoverCallback() {
-            @Override
-            public void recoverComplete(int rc, Object ctx) {
-                LOG.info("Recover bookie operation completed with rc: " + rc);
-                SyncObject syncObj = (SyncObject) ctx;
-                synchronized (syncObj) {
-                    syncObj.value = true;
-                    syncObj.notify();
-                }
-            }
-        }, sync);
-
-        // Wait for the async method to complete.
-        synchronized (sync) {
-            while (sync.value == false) {
-                sync.wait();
-            }
-        }
-    }
-
-    /**
-     * Async method to rebuild and recover the ledger fragments data that was
-     * stored on the source bookie. That bookie could have failed completely and
-     * now the ledger data that was stored on it is under replicated. An
-     * optional destination bookie server could be given if we want to copy all
-     * of the ledger fragments data on the failed source bookie to it.
-     * Otherwise, we will just randomly distribute the ledger fragments to the
-     * active set of bookies, perhaps based on load. All ZooKeeper ledger
-     * metadata will be updated to point to the new bookie(s) that contain the
-     * replicated ledger fragments.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param bookieDest
-     *            Optional destination bookie that if passed, we will copy all
-     *            of the ledger fragments from the source bookie over to it.
-     * @param cb
-     *            RecoverCallback to invoke once all of the data on the dead
-     *            bookie has been recovered and replicated.
-     * @param context
-     *            Context for the RecoverCallback to call.
-     */
-    public void asyncRecoverBookieData(final InetSocketAddress bookieSrc, final InetSocketAddress bookieDest,
-            final RecoverCallback cb, final Object context) {
-        // Sync ZK to make sure we're reading the latest bookie/ledger data.
-        zk.sync(LEDGERS_PATH, new AsyncCallback.VoidCallback() {
-            @Override
-            public void processResult(int rc, String path, Object ctx) {
-                if (rc != Code.OK.intValue()) {
-                    LOG.error("ZK error syncing: ", KeeperException.create(KeeperException.Code.get(rc), path));
-                    cb.recoverComplete(BKException.Code.ZKException, context);
-                    return;
-                }
-                getAvailableBookies(bookieSrc, bookieDest, cb, context);
-            };
-        }, null);
-    }
-
-    /**
-     * This method asynchronously gets the set of available Bookies that the
-     * dead input bookie's data will be copied over into. If the user passed in
-     * a specific destination bookie, then just use that one. Otherwise, we'll
-     * randomly pick one of the other available bookies to use for each ledger
-     * fragment we are replicating.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param bookieDest
-     *            Optional destination bookie that if passed, we will copy all
-     *            of the ledger fragments from the source bookie over to it.
-     * @param cb
-     *            RecoverCallback to invoke once all of the data on the dead
-     *            bookie has been recovered and replicated.
-     * @param context
-     *            Context for the RecoverCallback to call.
-     */
-    private void getAvailableBookies(final InetSocketAddress bookieSrc, final InetSocketAddress bookieDest,
-            final RecoverCallback cb, final Object context) {
-        final List<InetSocketAddress> availableBookies = new LinkedList<InetSocketAddress>();
-        if (bookieDest != null) {
-            availableBookies.add(bookieDest);
-            // Now poll ZK to get the active ledgers
-            getActiveLedgers(bookieSrc, bookieDest, cb, context, availableBookies);
-        } else {
-            zk.getChildren(BOOKIES_PATH, null, new AsyncCallback.ChildrenCallback() {
-                @Override
-                public void processResult(int rc, String path, Object ctx, List<String> children) {
-                    if (rc != Code.OK.intValue()) {
-                        LOG.error("ZK error getting bookie nodes: ", KeeperException.create(KeeperException.Code
-                                .get(rc), path));
-                        cb.recoverComplete(BKException.Code.ZKException, context);
-                        return;
-                    }
-                    for (String bookieNode : children) {
-                        String parts[] = bookieNode.split(COLON);
-                        if (parts.length < 2) {
-                            LOG.error("Bookie Node retrieved from ZK has invalid name format: " + bookieNode);
-                            cb.recoverComplete(BKException.Code.ZKException, context);
-                            return;
-                        }
-                        availableBookies.add(new InetSocketAddress(parts[0], Integer.parseInt(parts[1])));
-                    }
-                    // Now poll ZK to get the active ledgers
-                    getActiveLedgers(bookieSrc, bookieDest, cb, context, availableBookies);
-                }
-            }, null);
-        }
-    }
-
-    /**
-     * This method asynchronously polls ZK to get the current set of active
-     * ledgers. From this, we can open each ledger and look at the metadata to
-     * determine if any of the ledger fragments for it were stored at the dead
-     * input bookie.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param bookieDest
-     *            Optional destination bookie that if passed, we will copy all
-     *            of the ledger fragments from the source bookie over to it.
-     * @param cb
-     *            RecoverCallback to invoke once all of the data on the dead
-     *            bookie has been recovered and replicated.
-     * @param context
-     *            Context for the RecoverCallback to call.
-     * @param availableBookies
-     *            List of Bookie Servers that are available to use for
-     *            replicating data on the failed bookie. This could contain a
-     *            single bookie server if the user explicitly chose a bookie
-     *            server to replicate data to.
-     */
-    private void getActiveLedgers(final InetSocketAddress bookieSrc, final InetSocketAddress bookieDest,
-            final RecoverCallback cb, final Object context, final List<InetSocketAddress> availableBookies) {
-        zk.getChildren(LEDGERS_PATH, null, new AsyncCallback.ChildrenCallback() {
-            @Override
-            public void processResult(int rc, String path, Object ctx, List<String> children) {
-                if (rc != Code.OK.intValue()) {
-                    LOG.error("ZK error getting ledger nodes: ", KeeperException.create(KeeperException.Code.get(rc),
-                            path));
-                    cb.recoverComplete(BKException.Code.ZKException, context);
-                    return;
-                }
-                // Wrapper class around the RecoverCallback so it can be used
-                // as the final VoidCallback to invoke within the MultiCallback.
-                class RecoverCallbackWrapper implements AsyncCallback.VoidCallback {
-                    final RecoverCallback cb;
-
-                    RecoverCallbackWrapper(RecoverCallback cb) {
-                        this.cb = cb;
-                    }
-
-                    @Override
-                    public void processResult(int rc, String path, Object ctx) {
-                        cb.recoverComplete(rc, ctx);
-                    }
-                }
-                // Recover each of the ledgers asynchronously
-                MultiCallback ledgerMcb = new MultiCallback(children.size(), new RecoverCallbackWrapper(cb), context);
-                for (final String ledgerNode : children) {
-                    recoverLedger(bookieSrc, ledgerNode, ledgerMcb, availableBookies);
-                }
-            }
-        }, null);
-    }
-
-    /**
-     * This method asynchronously recovers a given ledger if any of the ledger
-     * entries were stored on the failed bookie.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param ledgerNode
-     *            Ledger Node name as retrieved from ZooKeeper we want to
-     *            recover.
-     * @param ledgerMcb
-     *            MultiCallback to invoke once we've recovered the current
-     *            ledger.
-     * @param availableBookies
-     *            List of Bookie Servers that are available to use for
-     *            replicating data on the failed bookie. This could contain a
-     *            single bookie server if the user explicitly chose a bookie
-     *            server to replicate data to.
-     */
-    private void recoverLedger(final InetSocketAddress bookieSrc, final String ledgerNode,
-            final MultiCallback ledgerMcb, final List<InetSocketAddress> availableBookies) {
-        /*
-         * The available node is also stored in this path so ignore that. That
-         * node is the path for the set of available Bookie Servers.
-         */
-        if (ledgerNode.equals(AVAILABLE_NODE)) {
-            ledgerMcb.processResult(BKException.Code.OK, null, null);
-            return;
-        }
-        // Parse out the ledgerId from the ZK ledger node.
-        String parts[] = ledgerNode.split(LEDGER_NODE_PREFIX);
-        if (parts.length < 2) {
-            LOG.error("Ledger Node retrieved from ZK has invalid name format: " + ledgerNode);
-            ledgerMcb.processResult(BKException.Code.ZKException, null, null);
-            return;
-        }
-        final long lId;
-        try {
-            lId = Long.parseLong(parts[parts.length - 1]);
-        } catch (NumberFormatException e) {
-            LOG.error("Error retrieving ledgerId from ledgerNode: " + ledgerNode, e);
-            ledgerMcb.processResult(BKException.Code.ZKException, null, null);
-            return;
-        }
-        /*
-         * For the current ledger, open it to retrieve the LedgerHandle. This
-         * will contain the LedgerMetadata indicating which bookie servers the
-         * ledger fragments are stored on. Check if any of the ledger fragments
-         * for the current ledger are stored on the input dead bookie.
-         */
-        DigestType digestType = getLedgerDigestType(lId);
-        byte[] passwd = getLedgerPasswd(lId);
-        bkc.asyncOpenLedger(lId, digestType, passwd, new OpenCallback() {
-            @Override
-            public void openComplete(int rc, final LedgerHandle lh, Object ctx) {
-                if (rc != Code.OK.intValue()) {
-                    LOG.error("BK error opening ledger: " + lId, BKException.create(rc));
-                    ledgerMcb.processResult(rc, null, null);
-                    return;
-                }
-                /*
-                 * This List stores the ledger fragments to recover indexed by
-                 * the start entry ID for the range. The ensembles TreeMap is
-                 * keyed off this.
-                 */
-                final List<Long> ledgerFragmentsToRecover = new LinkedList<Long>();
-                /*
-                 * This Map will store the start and end entry ID values for
-                 * each of the ledger fragment ranges. The only exception is the
-                 * current active fragment since it has no end yet. In the event
-                 * of a bookie failure, a new ensemble is created so the current
-                 * ensemble should not contain the dead bookie we are trying to
-                 * recover.
-                 */
-                Map<Long, Long> ledgerFragmentsRange = new HashMap<Long, Long>();
-                Long curEntryId = null;
-                for (Map.Entry<Long, ArrayList<InetSocketAddress>> entry : lh.getLedgerMetadata().getEnsembles()
-                        .entrySet()) {
-                    if (curEntryId != null)
-                        ledgerFragmentsRange.put(curEntryId, entry.getKey() - 1);
-                    curEntryId = entry.getKey();
-                    if (entry.getValue().contains(bookieSrc)) {
-                        /*
-                         * Current ledger fragment has entries stored on the
-                         * dead bookie so we'll need to recover them.
-                         */
-                        ledgerFragmentsToRecover.add(entry.getKey());
-                    }
-                }
-                /*
-                 * See if this current ledger contains any ledger fragment that
-                 * needs to be re-replicated. If not, then just invoke the
-                 * multiCallback and return.
-                 */
-                if (ledgerFragmentsToRecover.size() == 0) {
-                    ledgerMcb.processResult(BKException.Code.OK, null, null);
-                    return;
-                }
-                /*
-                 * We have ledger fragments that need to be re-replicated to a
-                 * new bookie. Choose one randomly from the available set of
-                 * bookies.
-                 */
-                final InetSocketAddress newBookie = availableBookies.get(rand.nextInt(availableBookies.size()));
-
-                /*
-                 * Wrapper class around the ledger MultiCallback. Once all
-                 * ledger fragments for the ledger have been replicated to a new
-                 * bookie, we need to update ZK with this new metadata to point
-                 * to the new bookie instead of the old dead one. That should be
-                 * done at the end prior to invoking the ledger MultiCallback.
-                 */
-                class LedgerMultiCallbackWrapper implements AsyncCallback.VoidCallback {
-                    final MultiCallback ledgerMcb;
-
-                    LedgerMultiCallbackWrapper(MultiCallback ledgerMcb) {
-                        this.ledgerMcb = ledgerMcb;
-                    }
-
-                    @Override
-                    public void processResult(int rc, String path, Object ctx) {
-                        if (rc != Code.OK.intValue()) {
-                            LOG.error("BK error replicating ledger fragments for ledger: " + lId, BKException
-                                    .create(rc));
-                            ledgerMcb.processResult(rc, null, null);
-                            return;
-                        }
-                        /*
-                         * Update the ledger metadata's ensemble info to point
-                         * to the new bookie.
-                         */
-                        for (final Long startEntryId : ledgerFragmentsToRecover) {
-                            ArrayList<InetSocketAddress> ensemble = lh.getLedgerMetadata().getEnsembles().get(
-                                    startEntryId);
-                            int deadBookieIndex = ensemble.indexOf(bookieSrc);
-                            ensemble.remove(deadBookieIndex);
-                            ensemble.add(deadBookieIndex, newBookie);
-                        }
-                        lh.writeLedgerConfig(new AsyncCallback.StatCallback() {
-                            @Override
-                            public void processResult(int rc, String path, Object ctx, Stat stat) {
-                                if (rc != Code.OK.intValue()) {
-                                    LOG.error("ZK error updating ledger config metadata for ledgerId: " + lh.getId(),
-                                            KeeperException.create(KeeperException.Code.get(rc), path));
-                                } else {
-                                    LOG.info("Updated ZK for ledgerId: (" + lh.getId()
-                                            + ") to point ledger fragments from old dead bookie: (" + bookieSrc
-                                            + ") to new bookie: (" + newBookie + ")");
-                                }
-                                /*
-                                 * Pass the return code result up the chain with
-                                 * the parent callback.
-                                 */
-                                ledgerMcb.processResult(rc, null, null);
-                            }
-                        }, null);
-                    }
-                }
-
-                /*
-                 * Now recover all of the necessary ledger fragments
-                 * asynchronously using a MultiCallback for every fragment.
-                 */
-                MultiCallback ledgerFragmentMcb = new MultiCallback(ledgerFragmentsToRecover.size(),
-                        new LedgerMultiCallbackWrapper(ledgerMcb), null);
-                for (final Long startEntryId : ledgerFragmentsToRecover) {
-                    Long endEntryId = ledgerFragmentsRange.get(startEntryId);
-                    try {
-                        recoverLedgerFragment(bookieSrc, lh, startEntryId, endEntryId, ledgerFragmentMcb, newBookie);
-                    } catch(InterruptedException e) {
-                        Thread.currentThread().interrupt();
-                        return;
-                    }
-                }
-            }
-        }, null);
-    }
-
-    /**
-     * This method asynchronously recovers a ledger fragment which is a
-     * contiguous portion of a ledger that was stored in an ensemble that
-     * included the failed bookie.
-     * 
-     * @param bookieSrc
-     *            Source bookie that had a failure. We want to replicate the
-     *            ledger fragments that were stored there.
-     * @param lh
-     *            LedgerHandle for the ledger
-     * @param startEntryId
-     *            Start entry Id for the ledger fragment
-     * @param endEntryId
-     *            End entry Id for the ledger fragment
-     * @param ledgerFragmentMcb
-     *            MultiCallback to invoke once we've recovered the current
-     *            ledger fragment.
-     * @param newBookie
-     *            New bookie we want to use to recover and replicate the ledger
-     *            entries that were stored on the failed bookie.
-     */
-    private void recoverLedgerFragment(final InetSocketAddress bookieSrc, final LedgerHandle lh,
-            final Long startEntryId, final Long endEntryId, final MultiCallback ledgerFragmentMcb,
-            final InetSocketAddress newBookie) throws InterruptedException {
-        if (endEntryId == null) {
-            /*
-             * Ideally this should never happen if bookie failure is taken care
-             * of properly. Nothing we can do though in this case.
-             */
-            LOG.warn("Dead bookie (" + bookieSrc + ") is still part of the current active ensemble for ledgerId: "
-                    + lh.getId());
-            ledgerFragmentMcb.processResult(BKException.Code.OK, null, null);
-            return;
-        }
-
-        ArrayList<InetSocketAddress> curEnsemble = lh.getLedgerMetadata().getEnsembles().get(startEntryId);
-        int bookieIndex = 0;
-        for (int i = 0; i < curEnsemble.size(); i++) {
-            if (curEnsemble.get(i).equals(bookieSrc)) {
-                bookieIndex = i;
-                break;
-            }
-        }
-        /*
-         * Loop through all entries in the current ledger fragment range and
-         * find the ones that were stored on the dead bookie.
-         */
-        List<Long> entriesToReplicate = new LinkedList<Long>();
-        for (long i = startEntryId; i <= endEntryId; i++) {
-            if (lh.getDistributionSchedule().getReplicaIndex(i, bookieIndex) >= 0) {
-                /*
-                 * Current entry is stored on the dead bookie so we'll need to
-                 * read it and replicate it to a new bookie.
-                 */
-                entriesToReplicate.add(i);
-            }
-        }
-        /*
-         * Now asynchronously replicate all of the entries for the ledger
-         * fragment that were on the dead bookie.
-         */
-        MultiCallback ledgerFragmentEntryMcb = new MultiCallback(entriesToReplicate.size(), ledgerFragmentMcb, null);
-        for (final Long entryId : entriesToReplicate) {
-            recoverLedgerFragmentEntry(entryId, lh, ledgerFragmentEntryMcb, newBookie);
-        }
-    }
-
-    /**
-     * This method asynchronously recovers a specific ledger entry by reading
-     * the values via the BookKeeper Client (which would read it from the other
-     * replicas) and then writing it to the chosen new bookie.
-     * 
-     * @param entryId
-     *            Ledger Entry ID to recover.
-     * @param lh
-     *            LedgerHandle for the ledger
-     * @param ledgerFragmentEntryMcb
-     *            MultiCallback to invoke once we've recovered the current
-     *            ledger entry.
-     * @param newBookie
-     *            New bookie we want to use to recover and replicate the ledger
-     *            entries that were stored on the failed bookie.
-     */
-    private void recoverLedgerFragmentEntry(final Long entryId, final LedgerHandle lh,
-            final MultiCallback ledgerFragmentEntryMcb, final InetSocketAddress newBookie) throws InterruptedException {
-        /*
-         * Read the ledger entry using the LedgerHandle. This will allow us to
-         * read the entry from one of the other replicated bookies other than
-         * the dead one.
-         */
-        lh.asyncReadEntries(entryId, entryId, new ReadCallback() {
-            @Override
-            public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
-                if (rc != Code.OK.intValue()) {
-                    LOG.error("BK error reading ledger entry: " + entryId, BKException.create(rc));
-                    ledgerFragmentEntryMcb.processResult(rc, null, null);
-                    return;
-                }
-                /*
-                 * Now that we've read the ledger entry, write it to the new
-                 * bookie we've selected.
-                 */
-                LedgerEntry entry = seq.nextElement();
-                ChannelBuffer toSend = lh.getDigestManager().computeDigestAndPackageForSending(entryId,
-                        lh.getLastAddConfirmed(), entry.getLength(), entry.getEntry());
-                bkc.getBookieClient().addEntry(newBookie, lh.getId(), lh.getLedgerKey(), entryId, toSend,
-                        new WriteCallback() {
-                            @Override
-                            public void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr,
-                                    Object ctx) {
-                                if (rc != Code.OK.intValue()) {
-                                    LOG.error("BK error writing entry for ledgerId: " + ledgerId + ", entryId: "
-                                            + entryId + ", bookie: " + addr, BKException.create(rc));
-                                } else {
-                                    LOG.debug("Success writing ledger entry to a new bookie!");
-                                }
-                                /*
-                                 * Pass the return code result up the chain with
-                                 * the parent callback.
-                                 */
-                                ledgerFragmentEntryMcb.processResult(rc, null, null);
-                            }
-                        }, null);
-            }
-        }, null);
-    }
-
-    /**
-     * Main method so we can invoke the bookie recovery via command line.
-     * 
-     * @param args
-     *            Arguments to BookKeeperTools. 2 are required and the third is
-     *            optional. The first is a comma separated list of ZK server
-     *            host:port pairs. The second is the host:port socket address
-     *            for the bookie we are trying to recover. The third is the
-     *            host:port socket address of the optional destination bookie
-     *            server we want to replicate the data over to.
-     * @throws InterruptedException
-     * @throws IOException
-     * @throws KeeperException
-     */
-    public static void main(String[] args) throws InterruptedException, IOException, KeeperException {
-        // Validate the inputs
-        if (args.length < 2) {
-            System.err.println("USAGE: BookKeeperTools zkServers bookieSrc [bookieDest]");
-            return;
-        }
-        // Parse out the input arguments
-        String zkServers = args[0];
-        String bookieSrcString[] = args[1].split(COLON);
-        if (bookieSrcString.length < 2) {
-            System.err.println("BookieSrc inputted has invalid name format (host:port expected): " + bookieSrcString);
-            return;
-        }
-        final InetSocketAddress bookieSrc = new InetSocketAddress(bookieSrcString[0], Integer
-                .parseInt(bookieSrcString[1]));
-        InetSocketAddress bookieDest = null;
-        if (args.length < 3) {
-            String bookieDestString[] = args[2].split(COLON);
-            if (bookieDestString.length < 2) {
-                System.err.println("BookieDest inputted has invalid name format (host:port expected): "
-                        + bookieDestString);
-                return;
-            }
-            bookieDest = new InetSocketAddress(bookieDestString[0], Integer.parseInt(bookieDestString[1]));
-        }
-
-        // Create the BookKeeperTools instance and perform the bookie recovery
-        // synchronously.
-        BookKeeperTools bkTools = new BookKeeperTools(zkServers);
-        bkTools.recoverBookieData(bookieSrc, bookieDest);
-
-        // Shutdown the resources used in the BookKeeperTools instance.
-        bkTools.shutdown();
-    }
-
-}

+ 0 - 209
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/LocalBookKeeper.java

@@ -1,209 +0,0 @@
-package org.apache.bookkeeper.util;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.server.NIOServerCnxnFactory;
-import org.apache.zookeeper.server.ZooKeeperServer;
-
-public class LocalBookKeeper {
-    protected static final Logger LOG = Logger.getLogger(LocalBookKeeper.class);
-    public static final int CONNECTION_TIMEOUT = 30000;
-    
-	ConsoleAppender ca;
-	int numberOfBookies;
-	
-	public LocalBookKeeper() {
-		ca = new ConsoleAppender(new PatternLayout());
-		LOG.addAppender(ca);
-		LOG.setLevel(Level.INFO);
-		numberOfBookies = 3;
-	}
-	
-	public LocalBookKeeper(int numberOfBookies){
-		this();
-		this.numberOfBookies = numberOfBookies;
-		LOG.info("Running " + this.numberOfBookies + " bookie(s).");
-	}
-	
-	private final String HOSTPORT = "127.0.0.1:2181";
-	NIOServerCnxnFactory serverFactory;
-	ZooKeeperServer zks;
-	ZooKeeper zkc;
-	int ZooKeeperDefaultPort = 2181;
-	File ZkTmpDir;
-
-	//BookKeeper variables
-	File tmpDirs[];
-	BookieServer bs[];
-	Integer initialPort = 5000;
-
-	/**
-	 * @param args
-	 */
-	
-	private void runZookeeper(int maxCC) throws IOException{
-		// create a ZooKeeper server(dataDir, dataLogDir, port)
-		LOG.info("Starting ZK server");
-		//ServerStats.registerAsConcrete();
-		//ClientBase.setupTestEnv();
-		ZkTmpDir = File.createTempFile("zookeeper", "test");
-        ZkTmpDir.delete();
-        ZkTmpDir.mkdir();
-		    
-		try {
-			zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort);
-			serverFactory =  new NIOServerCnxnFactory();
-			serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC);
-			serverFactory.startup(zks);
-		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			LOG.fatal("Exception while instantiating ZooKeeper", e);
-		} 
-
-        boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
-        LOG.debug("ZooKeeper server up: " + b);
-	}
-	
-	private void initializeZookeper(){
-		LOG.info("Instantiate ZK Client");
-		//initialize the zk client with values
-		try {
-			zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher());
-			zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-			zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-            // No need to create an entry for each requested bookie anymore as the 
-            // BookieServers will register themselves with ZooKeeper on startup.
-		} catch (KeeperException e) {
-			// TODO Auto-generated catch block
-			LOG.fatal("Exception while creating znodes", e);
-		} catch (InterruptedException e) {
-			// TODO Auto-generated catch block
-			LOG.fatal("Interrupted while creating znodes", e);
-		} catch (IOException e) {
-			// TODO Auto-generated catch block
-			LOG.fatal("Exception while creating znodes", e);
-		}		
-	}
-	private void runBookies() throws IOException{
-		LOG.info("Starting Bookie(s)");
-		// Create Bookie Servers (B1, B2, B3)
-		
-		tmpDirs = new File[numberOfBookies];		
-		bs = new BookieServer[numberOfBookies];
-		
-		for(int i = 0; i < numberOfBookies; i++){
-			tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test");
-			tmpDirs[i].delete();
-			tmpDirs[i].mkdir();
-			
-			bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":"
-                    + ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]});
-			bs[i].start();
-		}		
-	}
-	
-	public static void main(String[] args) throws IOException, InterruptedException {
-		if(args.length < 1){
-			usage();
-			System.exit(-1);
-		}
-		LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0]));
-		lb.runZookeeper(1000);
-		lb.initializeZookeper();
-		lb.runBookies();
-		while (true){
-			Thread.sleep(5000);
-		}
-	}
-
-	private static void usage() {
-		System.err.println("Usage: LocalBookKeeper number-of-bookies");	
-	}
-
-	/*	User for testing purposes, void */
-	class emptyWatcher implements Watcher{
-		public void process(WatchedEvent event) {}
-	}
-	
-	public static boolean waitForServerUp(String hp, long timeout) {
-        long start = System.currentTimeMillis();
-        String split[] = hp.split(":");
-        String host = split[0];
-        int port = Integer.parseInt(split[1]);
-        while (true) {
-            try {
-                Socket sock = new Socket(host, port);
-                BufferedReader reader = null;
-                try {
-                    OutputStream outstream = sock.getOutputStream();
-                    outstream.write("stat".getBytes());
-                    outstream.flush();
-
-                    reader =
-                        new BufferedReader(
-                                new InputStreamReader(sock.getInputStream()));
-                    String line = reader.readLine();
-                    if (line != null && line.startsWith("Zookeeper version:")) {
-                        LOG.info("Server UP");
-                        return true;
-                    }
-                } finally {
-                    sock.close();
-                    if (reader != null) {
-                        reader.close();
-                    }
-                }
-            } catch (IOException e) {
-                // ignore as this is expected
-                LOG.info("server " + hp + " not up " + e);
-            }
-
-            if (System.currentTimeMillis() > start + timeout) {
-                break;
-            }
-            try {
-                Thread.sleep(250);
-            } catch (InterruptedException e) {
-                // ignore
-            }
-        }
-        return false;
-    }
-	
-}

+ 0 - 54
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/Main.java

@@ -1,54 +0,0 @@
-package org.apache.bookkeeper.util;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-
-import org.apache.bookkeeper.proto.BookieClient;
-import org.apache.bookkeeper.proto.BookieServer;
-
-public class Main {
-
-    static void usage() {
-        System.err.println("USAGE: bookeeper client|bookie");
-    }
-
-    /**
-     * @param args
-     * @throws InterruptedException
-     * @throws IOException
-     */
-    public static void main(String[] args) throws IOException, InterruptedException {
-        if (args.length < 1 || !(args[0].equals("client") || args[0].equals("bookie"))) {
-            usage();
-            return;
-        }
-        String newArgs[] = new String[args.length - 1];
-        System.arraycopy(args, 1, newArgs, 0, newArgs.length);
-        if (args[0].equals("bookie")) {
-            BookieServer.main(newArgs);
-        } else {
-            BookieClient.main(newArgs);
-        }
-    }
-
-}

+ 0 - 38
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/MathUtils.java

@@ -1,38 +0,0 @@
-package org.apache.bookkeeper.util;
-
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Provides misc math functions that dont come standard
- */
-public class MathUtils {
-
-    public static int signSafeMod(long dividend, int divisor){
-        int mod = (int) (dividend % divisor);
-        
-        if (mod < 0){
-            mod += divisor;
-        }
-        
-        return mod;
-        
-    }
-
-}

+ 0 - 98
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/OrderedSafeExecutor.java

@@ -1,98 +0,0 @@
-package org.apache.bookkeeper.util;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Random;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-
-/**
- * This class provides 2 things over the java {@link ScheduledExecutorService}.
- * 
- * 1. It takes {@link SafeRunnable objects} instead of plain Runnable objects.
- * This means that exceptions in scheduled tasks wont go unnoticed and will be
- * logged.
- * 
- * 2. It supports submitting tasks with an ordering key, so that tasks submitted
- * with the same key will always be executed in order, but tasks across
- * different keys can be unordered. This retains parallelism while retaining the
- * basic amount of ordering we want (e.g. , per ledger handle). Ordering is
- * achieved by hashing the key objects to threads by their {@link #hashCode()}
- * method.
- * 
- */
-public class OrderedSafeExecutor {
-    ExecutorService threads[];
-    Random rand = new Random();
-
-    public OrderedSafeExecutor(int numThreads) {
-        if (numThreads <= 0) {
-            throw new IllegalArgumentException();
-        }
-
-        threads = new ExecutorService[numThreads];
-        for (int i = 0; i < numThreads; i++) {
-            threads[i] = Executors.newSingleThreadExecutor();
-        }
-    }
-
-    ExecutorService chooseThread() {
-        // skip random # generation in this special case
-        if (threads.length == 1) {
-            return threads[0];
-        }
-
-        return threads[rand.nextInt(threads.length)];
-
-    }
-
-    ExecutorService chooseThread(Object orderingKey) {
-        // skip hashcode generation in this special case
-        if (threads.length == 1) {
-            return threads[0];
-        }
-
-        return threads[MathUtils.signSafeMod(orderingKey.hashCode(), threads.length)];
-
-    }
-
-    /**
-     * schedules a one time action to execute 
-     */
-    public void submit(SafeRunnable r) {
-        chooseThread().submit(r);
-    }
-
-    /**
-     * schedules a one time action to execute with an ordering guarantee on the key
-     * @param orderingKey
-     * @param r
-     */
-    public void submitOrdered(Object orderingKey, SafeRunnable r) {
-        chooseThread(orderingKey).submit(r);
-    }
-
-    public void shutdown() {
-        for (int i = 0; i < threads.length; i++) {
-            threads[i].shutdown();
-        }
-    }
-
-}

+ 0 - 38
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/SafeRunnable.java

@@ -1,38 +0,0 @@
-package org.apache.bookkeeper.util;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.log4j.Logger;
-
-public abstract class SafeRunnable implements Runnable{
-
-    static final Logger logger = Logger.getLogger(SafeRunnable.class);
-    
-@Override
-    public void run() {
-        try{
-            safeRun();
-        }catch(Throwable t){
-            logger.fatal("Unexpected throwable caught ", t);
-        }
-    }
-    
-    public abstract void safeRun();
-    
-}

+ 0 - 94
src/contrib/bookkeeper/src/java/org/apache/bookkeeper/util/StringUtils.java

@@ -1,94 +0,0 @@
-package org.apache.bookkeeper.util;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-/**
- * Provided utilites for parsing network addresses, ledger-id from node paths
- * etc.
- * 
- */
-public class StringUtils {
-
-    /*
-     * Path to ledger metadata. ZooKeeper appends a sequence number to L.
-     */
-    static public final String prefix = "/ledgers/L";
-
-    /**
-     * Parses address into IP and port.
-     * 
-     * @param addr
-     *            String
-     */
-
-    public static InetSocketAddress parseAddr(String s) throws IOException {
-
-        String parts[] = s.split(":");
-        if (parts.length != 2) {
-            throw new IOException(s + " does not have the form host:port");
-        }
-        int port;
-        try {
-            port = Integer.parseInt(parts[1]);
-        } catch (NumberFormatException e) {
-            throw new IOException(s + " does not have the form host:port");
-        }
-
-        InetSocketAddress addr = new InetSocketAddress(parts[0], port);
-        return addr;
-    }
-
-    public static StringBuilder addrToString(StringBuilder sb, InetSocketAddress addr) {
-        return sb.append(addr.getAddress().getHostAddress()).append(":").append(addr.getPort());
-    }
-
-    /**
-     * Formats ledger ID according to ZooKeeper rules
-     * 
-     * @param id
-     *            znode id
-     */
-    public static String getZKStringId(long id) {
-        return String.format("%010d", id);
-    }
-
-    /**
-     * Get the path for the ledger metadata node
-     * 
-     * @return
-     */
-    public static String getLedgerNodePath(long ledgerId) {
-        return prefix + StringUtils.getZKStringId(ledgerId);
-    }
-
-    public static long getLedgerId(String nodeName) throws IOException {
-        long ledgerId;
-        try {
-            String parts[] = nodeName.split(prefix);
-            ledgerId = Long.parseLong(parts[parts.length - 1]);
-        } catch (NumberFormatException e) {
-            throw new IOException(e);
-        }
-        return ledgerId;
-    }
-
-}

+ 0 - 256
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/AsyncLedgerOpsTest.java

@@ -1,256 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Enumeration;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.AsyncCallback.CloseCallback;
-import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
-import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.log4j.Logger;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runners.Parameterized.Parameters;
-
-/**
- * This test tests read and write, synchronous and asynchronous, strings and
- * integers for a BookKeeper client. The test deployment uses a ZooKeeper server
- * and three BookKeepers.
- * 
- */
-public class AsyncLedgerOpsTest extends BaseTestCase implements AddCallback, ReadCallback, CreateCallback,
-        CloseCallback, OpenCallback {
-    static Logger LOG = Logger.getLogger(BookieClientTest.class);
-
-    DigestType digestType;
-    
-    public AsyncLedgerOpsTest(DigestType digestType) {
-        super(3);
-        this.digestType = digestType;
-    }
-    
-    @Parameters
-    public static Collection<Object[]> configs(){
-        return Arrays.asList(new Object[][]{ {DigestType.MAC }, {DigestType.CRC32}});
-    }
-    
- 
-    byte[] ledgerPassword = "aaa".getBytes();
-    LedgerHandle lh, lh2;
-    long ledgerId;
-    Enumeration<LedgerEntry> ls;
-
-    // test related variables
-    int numEntriesToWrite = 20;
-    int maxInt = 2147483647;
-    Random rng; // Random Number Generator
-    ArrayList<byte[]> entries; // generated entries
-    ArrayList<Integer> entriesSize;
-
-    // Synchronization
-    SyncObj sync;
-    Set<Object> syncObjs;
-
-    class SyncObj {
-        int counter;
-        boolean value;
-
-        public SyncObj() {
-            counter = 0;
-            value = false;
-        }
-    }
-
-    class ControlObj {
-        LedgerHandle lh;
-
-        void setLh(LedgerHandle lh) {
-            this.lh = lh;
-        }
-
-        LedgerHandle getLh() {
-            return lh;
-        }
-    }
-
-    @Test
-    public void testAsyncCreateClose() throws IOException {
-        try {
-            
-            ControlObj ctx = new ControlObj();
-
-            synchronized (ctx) {
-                LOG.info("Going to create ledger asynchronously");
-                bkc.asyncCreateLedger(3, 2, digestType, ledgerPassword, this, ctx);
-
-                ctx.wait();
-            }
-
-            // bkc.initMessageDigest("SHA1");
-            LedgerHandle lh = ctx.getLh();
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-            }
-
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.info("*** WRITE COMPLETE ***");
-            // close ledger
-            synchronized (ctx) {
-                lh.asyncClose(this, ctx);
-                ctx.wait();
-            }
-
-            // *** WRITING PART COMPLETE // READ PART BEGINS ***
-
-            // open ledger
-            synchronized (ctx) {
-                bkc.asyncOpenLedger(ledgerId, digestType, ledgerPassword, this, ctx);
-                ctx.wait();
-            }
-            lh = ctx.getLh();
-
-            LOG.debug("Number of entries written: " + lh.getLastAddConfirmed());
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            // read entries
-            lh.asyncReadEntries(0, numEntriesToWrite - 1, this, sync);
-
-            synchronized (sync) {
-                while (sync.value == false) {
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** READ COMPLETE ***");
-
-            // at this point, Enumeration<LedgerEntry> ls is filled with the returned
-            // values
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer origbb = ByteBuffer.wrap(entries.get(i));
-                Integer origEntry = origbb.getInt();
-                byte[] entry = ls.nextElement().getEntry();
-                ByteBuffer result = ByteBuffer.wrap(entry);
-                LOG.debug("Length of result: " + result.capacity());
-                LOG.debug("Original entry: " + origEntry);
-
-                Integer retrEntry = result.getInt();
-                LOG.debug("Retrieved entry: " + retrEntry);
-                assertTrue("Checking entry " + i + " for equality", origEntry.equals(retrEntry));
-                assertTrue("Checking entry " + i + " for size", entry.length == entriesSize.get(i).intValue());
-                i++;
-            }
-            assertTrue("Checking number of read entries", i == numEntriesToWrite);
-            lh.close();
-        } catch (InterruptedException e) {
-            LOG.error(e);
-            fail("InterruptedException");
-        } // catch (NoSuchAlgorithmException e) {
-        // e.printStackTrace();
-        // }
-
-    }
-
-    public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
-        SyncObj x = (SyncObj) ctx;
-        synchronized (x) {
-            x.counter++;
-            x.notify();
-        }
-    }
-
-    public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
-        ls = seq;
-        synchronized (sync) {
-            sync.value = true;
-            sync.notify();
-        }
-
-    }
-
-    public void createComplete(int rc, LedgerHandle lh, Object ctx) {
-        synchronized (ctx) {
-            ControlObj cobj = (ControlObj) ctx;
-            cobj.setLh(lh);
-            cobj.notify();
-        }
-    }
-
-    public void openComplete(int rc, LedgerHandle lh, Object ctx) {
-        synchronized (ctx) {
-            ControlObj cobj = (ControlObj) ctx;
-            cobj.setLh(lh);
-            cobj.notify();
-        }
-    }
-
-    public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
-        synchronized (ctx) {
-            ControlObj cobj = (ControlObj) ctx;
-            cobj.notify();
-        }
-    }
-
-
-    @Before
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        rng = new Random(System.currentTimeMillis()); // Initialize the Random
-                                                      // Number Generator
-        entries = new ArrayList<byte[]>(); // initialize the entries list
-        entriesSize = new ArrayList<Integer>();
-        sync = new SyncObj(); // initialize the synchronization data structure
-    }
-
-    
-
-
-
-}

+ 0 - 176
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BaseTestCase.java

@@ -1,176 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.test;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.server.NIOServerCnxnFactory;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.test.ClientBase;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import junit.framework.TestCase;
-
-@RunWith(Parameterized.class)
-public abstract class BaseTestCase extends TestCase {
-    static final Logger LOG = Logger.getLogger(BaseTestCase.class);
-    // ZooKeeper related variables
-    static final String HOSTPORT = "127.0.0.1:2181";
-    static Integer ZooKeeperDefaultPort = 2181;
-    ZooKeeperServer zks;
-    ZooKeeper zkc; // zookeeper client
-    NIOServerCnxnFactory serverFactory;
-    File ZkTmpDir;
-
-    // BookKeeper
-    List<File> tmpDirs = new ArrayList<File>();
-    List<BookieServer> bs = new ArrayList<BookieServer>();
-    Integer initialPort = 5000;
-    int numBookies;
-    BookKeeper bkc;
-
-    public BaseTestCase(int numBookies) {
-        this.numBookies = numBookies;
-    }
-    
-    @Parameters
-    public static Collection<Object[]> configs(){
-        return Arrays.asList(new Object[][]{ {DigestType.MAC }, {DigestType.CRC32}});
-    }
-
-
-    @Before
-    @Override
-    public void setUp() throws Exception {
-        try {
-        // create a ZooKeeper server(dataDir, dataLogDir, port)
-        LOG.debug("Running ZK server");
-        // ServerStats.registerAsConcrete();
-        ClientBase.setupTestEnv();
-        ZkTmpDir = File.createTempFile("zookeeper", "test");
-        ZkTmpDir.delete();
-        ZkTmpDir.mkdir();
-
-        zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort);
-        serverFactory = new NIOServerCnxnFactory();
-        serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), 100);
-        serverFactory.startup(zks);
-
-        boolean b = ClientBase.waitForServerUp(HOSTPORT, ClientBase.CONNECTION_TIMEOUT);
-
-        LOG.debug("Server up: " + b);
-
-        // create a zookeeper client
-        LOG.debug("Instantiate ZK Client");
-        zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher());
-
-        // initialize the zk client with values
-        zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-        zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-
-        // Create Bookie Servers (B1, B2, B3)
-        for (int i = 0; i < numBookies; i++) {
-            File f = File.createTempFile("bookie", "test");
-            tmpDirs.add(f);
-            f.delete();
-            f.mkdir();
-
-            BookieServer server = new BookieServer(initialPort + i, HOSTPORT, f, new File[] { f });
-            server.start();
-            bs.add(server);
-        }
-        zkc.close();
-        bkc = new BookKeeper("127.0.0.1");
-        } catch(Exception e) {
-            e.printStackTrace();
-            throw e;
-        }
-    }
-
-    @After
-    @Override
-    public void tearDown() throws Exception {
-        LOG.info("TearDown");
-
-        if (bkc != null) {
-            bkc.halt();;
-        }
-        
-        for (BookieServer server : bs) {
-            server.shutdown();
-        }
-
-        for (File f : tmpDirs) {
-            cleanUpDir(f);
-        }
-
-        // shutdown ZK server
-        if (serverFactory != null) {
-            serverFactory.shutdown();
-            assertTrue("waiting for server down", ClientBase.waitForServerDown(HOSTPORT, ClientBase.CONNECTION_TIMEOUT));
-        }
-        // ServerStats.unregister();
-        cleanUpDir(ZkTmpDir);
-        
-
-    }
-
-    /* Clean up a directory recursively */
-    protected boolean cleanUpDir(File dir) {
-        if (dir.isDirectory()) {
-            LOG.info("Cleaning up " + dir.getName());
-            String[] children = dir.list();
-            for (String string : children) {
-                boolean success = cleanUpDir(new File(dir, string));
-                if (!success)
-                    return false;
-            }
-        }
-        // The directory is now empty so delete it
-        return dir.delete();
-    }
-
-    /* User for testing purposes, void */
-    class emptyWatcher implements Watcher {
-        public void process(WatchedEvent event) {
-        }
-    }
-
-}

+ 0 - 232
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieClientTest.java

@@ -1,232 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.concurrent.Executors;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-import org.junit.Test;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.proto.BookieClient;
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.bookkeeper.util.OrderedSafeExecutor;
-import org.apache.log4j.Logger;
-
-import junit.framework.TestCase;
-
-public class BookieClientTest extends TestCase {
-    static Logger LOG = Logger.getLogger(BookieClientTest.class);
-    BookieServer bs;
-    File tmpDir;
-    int port = 13645;
-    ClientSocketChannelFactory channelFactory;
-    OrderedSafeExecutor executor;
-
-    @Override
-    protected void setUp() throws Exception {
-        tmpDir = File.createTempFile("bookie", "test");
-        tmpDir.delete();
-        tmpDir.mkdir();
-        // Since this test does not rely on the BookKeeper client needing to
-        // know via ZooKeeper which Bookies are available, okay, so pass in null
-        // for the zkServers input parameter when constructing the BookieServer.
-        bs = new BookieServer(port, null, tmpDir, new File[] { tmpDir });
-        bs.start();
-        channelFactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors
-                .newCachedThreadPool());
-        executor = new OrderedSafeExecutor(2);
-    }
-
-    @Override
-    protected void tearDown() throws Exception {
-        bs.shutdown();
-        recursiveDelete(tmpDir);
-        channelFactory.releaseExternalResources();
-        executor.shutdown();
-    }
-
-    private static void recursiveDelete(File dir) {
-        File children[] = dir.listFiles();
-        if (children != null) {
-            for (File child : children) {
-                recursiveDelete(child);
-            }
-        }
-        dir.delete();
-    }
-
-    static class ResultStruct {
-        int rc;
-        ByteBuffer entry;
-    }
-
-    ReadEntryCallback recb = new ReadEntryCallback() {
-
-        public void readEntryComplete(int rc, long ledgerId, long entryId, ChannelBuffer bb, Object ctx) {
-            ResultStruct rs = (ResultStruct) ctx;
-            synchronized (rs) {
-                rs.rc = rc;
-                if (bb != null) {
-                    bb.readerIndex(16);
-                    rs.entry = bb.toByteBuffer();
-                    rs.notifyAll();
-                }
-            }
-        }
-
-    };
-
-    WriteCallback wrcb = new WriteCallback() {
-        public void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr, Object ctx) {
-            if (ctx != null) {
-                synchronized (ctx) {
-                    ctx.notifyAll();
-                }
-            }
-        }
-    };
-
-    @Test
-    public void testWriteGaps() throws Exception {
-        final Object notifyObject = new Object();
-        byte[] passwd = new byte[20];
-        Arrays.fill(passwd, (byte) 'a');
-        InetSocketAddress addr = new InetSocketAddress("127.0.0.1", port);
-        ResultStruct arc = new ResultStruct();
-
-        BookieClient bc = new BookieClient(channelFactory, executor);
-        ChannelBuffer bb;
-        bb = createByteBuffer(1, 1, 1);
-        bc.addEntry(addr, 1, passwd, 1, bb, wrcb, null);
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 1, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(1, arc.entry.getInt());
-        }
-        bb = createByteBuffer(2, 1, 2);
-        bc.addEntry(addr, 1, passwd, 2, bb, wrcb, null);
-        bb = createByteBuffer(3, 1, 3);
-        bc.addEntry(addr, 1, passwd, 3, bb, wrcb, null);
-        bb = createByteBuffer(5, 1, 5);
-        bc.addEntry(addr, 1, passwd, 5, bb, wrcb, null);
-        bb = createByteBuffer(7, 1, 7);
-        bc.addEntry(addr, 1, passwd, 7, bb, wrcb, null);
-        synchronized (notifyObject) {
-            bb = createByteBuffer(11, 1, 11);
-            bc.addEntry(addr, 1, passwd, 11, bb, wrcb, notifyObject);
-            notifyObject.wait();
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 6, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 7, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(7, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 1, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(1, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 2, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(2, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 3, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(3, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 4, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 11, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(11, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 5, recb, arc);
-            arc.wait(1000);
-            assertEquals(0, arc.rc);
-            assertEquals(5, arc.entry.getInt());
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 10, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 12, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-        synchronized (arc) {
-            bc.readEntry(addr, 1, 13, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-    }
-
-    private ChannelBuffer createByteBuffer(int i, long lid, long eid) {
-        ByteBuffer bb;
-        bb = ByteBuffer.allocate(4 + 16);
-        bb.putLong(lid);
-        bb.putLong(eid);
-        bb.putInt(i);
-        bb.flip();
-        return ChannelBuffers.wrappedBuffer(bb);
-    }
-
-    @Test
-    public void testNoLedger() throws Exception {
-        ResultStruct arc = new ResultStruct();
-        InetSocketAddress addr = new InetSocketAddress("127.0.0.1", port);
-        BookieClient bc = new BookieClient(channelFactory, executor);
-        synchronized (arc) {
-            bc.readEntry(addr, 2, 13, recb, arc);
-            arc.wait(1000);
-            assertEquals(BKException.Code.NoSuchEntryException, arc.rc);
-        }
-    }
-}

+ 0 - 305
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieFailureTest.java

@@ -1,305 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This test tests read and write, synchronous and asynchronous, strings and
- * integers for a BookKeeper client. The test deployment uses a ZooKeeper server
- * and three BookKeepers.
- * 
- */
-
-public class BookieFailureTest extends BaseTestCase implements AddCallback, ReadCallback {
-
-    // Depending on the taste, select the amount of logging
-    // by decommenting one of the two lines below
-    // static Logger LOG = Logger.getRootLogger();
-    static Logger LOG = Logger.getLogger(BookieFailureTest.class);
-
-    byte[] ledgerPassword = "aaa".getBytes();
-    LedgerHandle lh, lh2;
-    long ledgerId;
-    Enumeration<LedgerEntry> ls;
-
-    // test related variables
-    int numEntriesToWrite = 200;
-    int maxInt = 2147483647;
-    Random rng; // Random Number Generator
-    ArrayList<byte[]> entries; // generated entries
-    ArrayList<Integer> entriesSize;
-    DigestType digestType;
-    
-    // Synchronization
-    SyncObj sync;
-    Set<Object> syncObjs;
-
-    class SyncObj {
-        int counter;
-        boolean value;
-
-        public SyncObj() {
-            counter = 0;
-            value = false;
-        }
-    }
-
-    public BookieFailureTest(DigestType digestType) {
-        super(4);
-        this.digestType = digestType;        
-    }
-    
-    /**
-     * Tests writes and reads when a bookie fails.
-     * 
-     * @throws {@link IOException}
-     */
-    @Test
-    public void testAsyncBK1() throws IOException {
-        LOG.info("#### BK1 ####");
-        auxTestReadWriteAsyncSingleClient(bs.get(0));
-    }
-    
-    @Test
-    public void testAsyncBK2() throws IOException {
-        LOG.info("#### BK2 ####");
-        auxTestReadWriteAsyncSingleClient(bs.get(1));
-    }
-
-    @Test
-    public void testAsyncBK3() throws IOException {
-        LOG.info("#### BK3 ####");
-        auxTestReadWriteAsyncSingleClient(bs.get(2));
-    }
-
-    @Test
-    public void testAsyncBK4() throws IOException {
-        LOG.info("#### BK4 ####");
-        auxTestReadWriteAsyncSingleClient(bs.get(3));
-    }
-    
-    @Test
-    public void testBookieRecovery() throws Exception{
-        bkc = new BookKeeper("127.0.0.1");
-        
-        //Shutdown all but 1 bookie
-        bs.get(0).shutdown();
-        bs.get(1).shutdown();
-        bs.get(2).shutdown();
-        
-        byte[] passwd = "blah".getBytes();
-        LedgerHandle lh = bkc.createLedger(1, 1,digestType, passwd);
-        
-        int numEntries = 100;
-        for (int i=0; i< numEntries; i++){
-            byte[] data = (""+i).getBytes();
-            lh.addEntry(data);
-        }
-        
-        bs.get(3).shutdown();
-        BookieServer server = new BookieServer(initialPort + 3, HOSTPORT, tmpDirs.get(3), new File[] { tmpDirs.get(3)});
-        server.start();
-        bs.set(3, server);
-
-        assertEquals(numEntries - 1 , lh.getLastAddConfirmed());
-        Enumeration<LedgerEntry> entries = lh.readEntries(0, lh.getLastAddConfirmed());
-        
-        int numScanned = 0;
-        while (entries.hasMoreElements()){
-            assertEquals((""+numScanned), new String(entries.nextElement().getEntry()));
-            numScanned++;
-        }
-        assertEquals(numEntries, numScanned);
-        
-        
-    }
-
-    void auxTestReadWriteAsyncSingleClient(BookieServer bs) throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            lh = bkc.createLedger(3, 2, digestType, ledgerPassword);
-
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-                
-            }
-            
-            LOG.info("Wrote " + numEntriesToWrite + " and now going to fail bookie.");
-            // Bookie fail
-            bs.shutdown();
-
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** WRITE COMPLETE ***");
-            // close ledger
-            lh.close();
-
-            // *** WRITING PART COMPLETE // READ PART BEGINS ***
-
-            // open ledger
-            bkc.halt();
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + (lh.getLastAddConfirmed() + 1));
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            // read entries
-
-            lh.asyncReadEntries(0, numEntriesToWrite - 1, this, sync);
-
-            synchronized (sync) {
-                while (sync.value == false) {
-                    sync.wait(10000);
-                    assertTrue("Haven't received entries", sync.value);
-                }
-            }
-
-            LOG.debug("*** READ COMPLETE ***");
-
-            // at this point, Enumeration<LedgerEntry> ls is filled with the returned
-            // values
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer origbb = ByteBuffer.wrap(entries.get(i));
-                Integer origEntry = origbb.getInt();
-                byte[] entry = ls.nextElement().getEntry();
-                ByteBuffer result = ByteBuffer.wrap(entry);
-
-                Integer retrEntry = result.getInt();
-                LOG.debug("Retrieved entry: " + i);
-                assertTrue("Checking entry " + i + " for equality", origEntry.equals(retrEntry));
-                assertTrue("Checking entry " + i + " for size", entry.length == entriesSize.get(i).intValue());
-                i++;
-            }
-
-            assertTrue("Checking number of read entries", i == numEntriesToWrite);
-
-            LOG.info("Verified that entries are ok, and now closing ledger");
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Caught KeeperException", e);
-            fail(e.toString());
-        } catch (BKException e) {
-            LOG.error("Caught BKException", e);
-            fail(e.toString());
-        } catch (InterruptedException e) {
-            LOG.error("Caught InterruptedException", e);
-            fail(e.toString());
-        }
-
-    }
-
-    public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
-        if (rc != 0)
-            fail("Failed to write entry: " + entryId);
-        SyncObj x = (SyncObj) ctx;
-        synchronized (x) {
-            x.counter++;
-            x.notify();
-        }
-    }
-
-    public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
-        if (rc != 0)
-            fail("Failed to write entry");
-        ls = seq;
-        synchronized (sync) {
-            sync.value = true;
-            sync.notify();
-        }
-
-    }
-
-    @Before
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-
-        rng = new Random(System.currentTimeMillis()); // Initialize the Random
-                                                      // Number Generator
-        entries = new ArrayList<byte[]>(); // initialize the entries list
-        entriesSize = new ArrayList<Integer>();
-        sync = new SyncObj(); // initialize the synchronization data structure
-
-        zkc.close();
-    }
-
-
-    /* Clean up a directory recursively */
-    @Override
-    protected boolean cleanUpDir(File dir) {
-        if (dir.isDirectory()) {
-            LOG.info("Cleaning up " + dir.getName());
-            String[] children = dir.list();
-            for (String string : children) {
-                boolean success = cleanUpDir(new File(dir, string));
-                if (!success)
-                    return false;
-            }
-        }
-        // The directory is now empty so delete it
-        return dir.delete();
-    }
-
-    /* User for testing purposes, void */
-    class emptyWatcher implements Watcher {
-        public void process(WatchedEvent event) {
-        }
-    }
-
-}

+ 0 - 720
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieReadWriteTest.java

@@ -1,720 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.NoSuchFieldException;
-import java.lang.IllegalAccessException;
-import java.lang.reflect.Field;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.Semaphore;
-
-
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.streaming.LedgerInputStream;
-import org.apache.bookkeeper.streaming.LedgerOutputStream;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This test tests read and write, synchronous and asynchronous, strings and
- * integers for a BookKeeper client. The test deployment uses a ZooKeeper server
- * and three BookKeepers.
- * 
- */
-
-public class BookieReadWriteTest extends BaseTestCase implements AddCallback, ReadCallback {
-
-    // Depending on the taste, select the amount of logging
-    // by decommenting one of the two lines below
-    // static Logger LOG = Logger.getRootLogger();
-    static Logger LOG = Logger.getLogger(BookieReadWriteTest.class);
-
-    byte[] ledgerPassword = "aaa".getBytes();
-    LedgerHandle lh, lh2;
-    long ledgerId;
-    Enumeration<LedgerEntry> ls;
-
-    // test related variables
-    int numEntriesToWrite = 200;
-    int maxInt = 2147483647;
-    Random rng; // Random Number Generator
-    ArrayList<byte[]> entries; // generated entries
-    ArrayList<Integer> entriesSize;
-    
-    DigestType digestType;
-    
-    public BookieReadWriteTest(DigestType digestType){
-        super(3);
-        this.digestType = digestType;
-    }
-    // Synchronization
-    SyncObj sync;
-    Set<Object> syncObjs;
-
-    class SyncObj {
-        volatile int counter;
-        boolean value;
-
-        public SyncObj() {
-            counter = 0;
-            value = false;
-        }
-    }
-
-    @Test
-    public void testOpenException() throws KeeperException, IOException, InterruptedException {
-        try {
-            lh = bkc.openLedger(0, digestType, ledgerPassword);
-            fail("Haven't thrown exception");
-        } catch (BKException e) {
-            LOG.warn("Successfully thrown and caught exception:", e);
-        }
-    }
-
-    /**
-     * test the streaming api for reading and writing
-     * 
-     * @throws {@link IOException}, {@link KeeperException}
-     */
-    @Test
-    public void testStreamingClients() throws IOException, KeeperException, BKException, InterruptedException {
-        bkc = new BookKeeper("127.0.0.1");
-        lh = bkc.createLedger(digestType, ledgerPassword);
-        // write a string so that we cna
-        // create a buffer of a single bytes
-        // and check for corner cases
-        String toWrite = "we need to check for this string to match " + "and for the record mahadev is the best";
-        LedgerOutputStream lout = new LedgerOutputStream(lh, 1);
-        byte[] b = toWrite.getBytes();
-        lout.write(b);
-        lout.close();
-        long lId = lh.getId();
-        lh.close();
-        // check for sanity
-        lh = bkc.openLedger(lId, digestType, ledgerPassword);
-        LedgerInputStream lin = new LedgerInputStream(lh, 1);
-        byte[] bread = new byte[b.length];
-        int read = 0;
-        while (read < b.length) {
-            read = read + lin.read(bread, read, b.length);
-        }
-
-        String newString = new String(bread);
-        assertTrue("these two should same", toWrite.equals(newString));
-        lin.close();
-        lh.close();
-        // create another ledger to write one byte at a time
-        lh = bkc.createLedger(digestType, ledgerPassword);
-        lout = new LedgerOutputStream(lh);
-        for (int i = 0; i < b.length; i++) {
-            lout.write(b[i]);
-        }
-        lout.close();
-        lId = lh.getId();
-        lh.close();
-        lh = bkc.openLedger(lId, digestType, ledgerPassword);
-        lin = new LedgerInputStream(lh);
-        bread = new byte[b.length];
-        read = 0;
-        while (read < b.length) {
-            read = read + lin.read(bread, read, b.length);
-        }
-        newString = new String(bread);
-        assertTrue("these two should be same ", toWrite.equals(newString));
-        lin.close();
-        lh.close();
-    }
-
-    @Test
-    public void testReadWriteAsyncSingleClient() throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-            }
-
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** WRITE COMPLETE ***");
-            // close ledger
-            lh.close();
-
-            // *** WRITING PART COMPLETE // READ PART BEGINS ***
-
-            // open ledger
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + (lh.getLastAddConfirmed() + 1));
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            // read entries
-            lh.asyncReadEntries(0, numEntriesToWrite - 1, this, (Object) sync);
-
-            synchronized (sync) {
-                while (sync.value == false) {
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** READ COMPLETE ***");
-
-            // at this point, Enumeration<LedgerEntry> ls is filled with the returned
-            // values
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer origbb = ByteBuffer.wrap(entries.get(i));
-                Integer origEntry = origbb.getInt();
-                byte[] entry = ls.nextElement().getEntry();
-                ByteBuffer result = ByteBuffer.wrap(entry);
-                LOG.debug("Length of result: " + result.capacity());
-                LOG.debug("Original entry: " + origEntry);
-
-                Integer retrEntry = result.getInt();
-                LOG.debug("Retrieved entry: " + retrEntry);
-                assertTrue("Checking entry " + i + " for equality", origEntry.equals(retrEntry));
-                assertTrue("Checking entry " + i + " for size", entry.length == entriesSize.get(i).intValue());
-                i++;
-            }
-            assertTrue("Checking number of read entries", i == numEntriesToWrite);
-
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-
-    class ThrottleTestCallback implements ReadCallback {
-        int throttle;
-        
-        ThrottleTestCallback(int threshold){
-            this.throttle = threshold;
-        }
-        
-        public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx){
-            if(rc != BKException.Code.OK){
-                fail("Return code is not OK: " + rc);
-            }
-        
-            ls = seq;
-            synchronized(sync){
-                sync.counter += throttle;
-                sync.notify();
-            }
-            LOG.info("Current counter: " + sync.counter);
-        }
-    }
-    
-    /**
-     * Method for obtaining the available permits of a ledger handle
-     * using reflection to avoid adding a new public method to the
-     * class.
-     *   
-     * @param lh
-     * @return
-     */
-    @SuppressWarnings("unchecked")
-    int getAvailablePermits(LedgerHandle lh) throws
-    NoSuchFieldException, IllegalAccessException
-    { 
-        Field field = LedgerHandle.class.getDeclaredField("opCounterSem"); 
-        field.setAccessible(true); 
-        return ((Semaphore)field.get(lh)).availablePermits(); 
-    }
-    
-    @Test
-    public void testReadWriteAsyncSingleClientThrottle() throws 
-    IOException, NoSuchFieldException, IllegalAccessException {
-        try {
-                       
-            Integer throttle = 100;
-            ThrottleTestCallback tcb = new ThrottleTestCallback(throttle);
-            // Create a BookKeeper client and a ledger
-            System.setProperty("throttle", throttle.toString());
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            
-            numEntriesToWrite = 8000; 
-            for (int i = 0; i < (numEntriesToWrite - 2000); i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-                /*
-                 * Check that the difference is no larger than the throttling threshold
-                 */
-                int testValue = getAvailablePermits(lh);
-                assertTrue("Difference is incorrect : " + i + ", " + sync.counter + ", " + testValue, testValue <= throttle);
-            }
-            
-
-            for (int i = 0; i < 2000; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-                
-                /*
-                 * Check that the difference is no larger than the throttling threshold
-                 */
-                int testValue = getAvailablePermits(lh);
-                assertTrue("Difference is incorrect : " + i + ", " + sync.counter + ", " + testValue, testValue <= throttle);
-            }
-            
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** WRITE COMPLETE ***");
-            // close ledger
-            lh.close();
-
-            // *** WRITING PART COMPLETE // READ PART BEGINS ***
-            
-            // open ledger
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + (lh.getLastAddConfirmed() + 1));
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            // read entries
-            sync.counter = 0;
-            for (int i = 0; i < numEntriesToWrite; i+=throttle) {
-                lh.asyncReadEntries(i, i + throttle - 1, tcb, (Object) sync);
-                int testValue = getAvailablePermits(lh);
-                assertTrue("Difference is incorrect : " + i + ", " + sync.counter + ", " + testValue, testValue <= throttle);
-            }
-            
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.info("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** READ COMPLETE ***");
-
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-    
-    @Test
-    public void testSyncReadAsyncWriteStringsSingleClient() throws IOException {
-        LOG.info("TEST READ WRITE STRINGS MIXED SINGLE CLIENT");
-        String charset = "utf-8";
-        LOG.debug("Default charset: " + Charset.defaultCharset());
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                int randomInt = rng.nextInt(maxInt);
-                byte[] entry = new String(Integer.toString(randomInt)).getBytes(charset);
-                entries.add(entry);
-                lh.asyncAddEntry(entry, this, sync);
-            }
-
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-
-            LOG.debug("*** ASYNC WRITE COMPLETE ***");
-            // close ledger
-            lh.close();
-
-            // *** WRITING PART COMPLETED // READ PART BEGINS ***
-
-            // open ledger
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + (lh.getLastAddConfirmed() + 1));
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            // read entries
-            ls = lh.readEntries(0, numEntriesToWrite - 1);
-
-            LOG.debug("*** SYNC READ COMPLETE ***");
-
-            // at this point, Enumeration<LedgerEntry> ls is filled with the returned
-            // values
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                byte[] origEntryBytes = entries.get(i++);
-                byte[] retrEntryBytes = ls.nextElement().getEntry();
-
-                LOG.debug("Original byte entry size: " + origEntryBytes.length);
-                LOG.debug("Saved byte entry size: " + retrEntryBytes.length);
-
-                String origEntry = new String(origEntryBytes, charset);
-                String retrEntry = new String(retrEntryBytes, charset);
-
-                LOG.debug("Original entry: " + origEntry);
-                LOG.debug("Retrieved entry: " + retrEntry);
-
-                assertTrue("Checking entry " + i + " for equality", origEntry.equals(retrEntry));
-            }
-            assertTrue("Checking number of read entries", i == numEntriesToWrite);
-
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-
-    }
-
-    @Test
-    public void testReadWriteSyncSingleClient() throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-                entries.add(entry.array());
-                lh.addEntry(entry.array());
-            }
-            lh.close();
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + lh.getLastAddConfirmed());
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            ls = lh.readEntries(0, numEntriesToWrite - 1);
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer origbb = ByteBuffer.wrap(entries.get(i++));
-                Integer origEntry = origbb.getInt();
-                ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry());
-                LOG.debug("Length of result: " + result.capacity());
-                LOG.debug("Original entry: " + origEntry);
-
-                Integer retrEntry = result.getInt();
-                LOG.debug("Retrieved entry: " + retrEntry);
-                assertTrue("Checking entry " + i + " for equality", origEntry.equals(retrEntry));
-            }
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-
-    @Test
-    public void testReadWriteZero() throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                lh.addEntry(new byte[0]);
-            }
-
-            /*
-             * Write a non-zero entry
-             */
-            ByteBuffer entry = ByteBuffer.allocate(4);
-            entry.putInt(rng.nextInt(maxInt));
-            entry.position(0);
-            entries.add(entry.array());
-            lh.addEntry(entry.array());
-
-            lh.close();
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            LOG.debug("Number of entries written: " + lh.getLastAddConfirmed());
-            assertTrue("Verifying number of entries written", lh.getLastAddConfirmed() == numEntriesToWrite);
-
-            ls = lh.readEntries(0, numEntriesToWrite - 1);
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry());
-                LOG.debug("Length of result: " + result.capacity());
-
-                assertTrue("Checking if entry " + i + " has zero bytes", result.capacity() == 0);
-            }
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-
-    @Test
-    public void testMultiLedger() throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            lh2 = bkc.createLedger(digestType, ledgerPassword);
-
-            long ledgerId = lh.getId();
-            long ledgerId2 = lh2.getId();
-
-            // bkc.initMessageDigest("SHA1");
-            LOG.info("Ledger ID 1: " + lh.getId() + ", Ledger ID 2: " + lh2.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                lh.addEntry(new byte[0]);
-                lh2.addEntry(new byte[0]);
-            }
-
-            lh.close();
-            lh2.close();
-
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            lh2 = bkc.openLedger(ledgerId2, digestType, ledgerPassword);
-
-            LOG.debug("Number of entries written: " + lh.getLastAddConfirmed() + ", " + lh2.getLastAddConfirmed());
-            assertTrue("Verifying number of entries written lh (" + lh.getLastAddConfirmed() + ")", lh
-                    .getLastAddConfirmed() == (numEntriesToWrite - 1));
-            assertTrue("Verifying number of entries written lh2 (" + lh2.getLastAddConfirmed() + ")", lh2
-                    .getLastAddConfirmed() == (numEntriesToWrite - 1));
-
-            ls = lh.readEntries(0, numEntriesToWrite - 1);
-            int i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry());
-                LOG.debug("Length of result: " + result.capacity());
-
-                assertTrue("Checking if entry " + i + " has zero bytes", result.capacity() == 0);
-            }
-            lh.close();
-            ls = lh2.readEntries(0, numEntriesToWrite - 1);
-            i = 0;
-            while (ls.hasMoreElements()) {
-                ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry());
-                LOG.debug("Length of result: " + result.capacity());
-
-                assertTrue("Checking if entry " + i + " has zero bytes", result.capacity() == 0);
-            }
-            lh2.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-
-    @Test
-    public void testReadWriteAsyncLength() throws IOException {
-        try {
-            // Create a BookKeeper client and a ledger
-            bkc = new BookKeeper("127.0.0.1");
-            lh = bkc.createLedger(digestType, ledgerPassword);
-            // bkc.initMessageDigest("SHA1");
-            ledgerId = lh.getId();
-            LOG.info("Ledger ID: " + lh.getId());
-            for (int i = 0; i < numEntriesToWrite; i++) {
-                ByteBuffer entry = ByteBuffer.allocate(4);
-                entry.putInt(rng.nextInt(maxInt));
-                entry.position(0);
-
-                entries.add(entry.array());
-                entriesSize.add(entry.array().length);
-                lh.asyncAddEntry(entry.array(), this, sync);
-            }
-
-            // wait for all entries to be acknowledged
-            synchronized (sync) {
-                while (sync.counter < numEntriesToWrite) {
-                    LOG.debug("Entries counter = " + sync.counter);
-                    sync.wait();
-                }
-            }
-            long length = numEntriesToWrite * 4;
-            assertTrue("Ledger length before closing: " + lh.getLength(), lh.getLength() == length);
-            
-            LOG.debug("*** WRITE COMPLETE ***");
-            // close ledger
-            lh.close();
-
-            // *** WRITING PART COMPLETE // READ PART BEGINS ***
-
-            // open ledger
-            lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);
-            assertTrue("Ledger length after opening: " + lh.getLength(), lh.getLength() == length);
-
-
-            lh.close();
-        } catch (KeeperException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to ZooKeeper exception");
-        } catch (BKException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to BookKeeper exception");
-        } catch (InterruptedException e) {
-            LOG.error("Test failed", e);
-            fail("Test failed due to interruption");
-        }
-    }
-    
-    
-    public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
-        if(rc != BKException.Code.OK) fail("Return code is not OK: " + rc);
-        
-        SyncObj x = (SyncObj) ctx;
-        
-        synchronized (x) {
-            x.counter++;
-            x.notify();
-        }
-    }
-
-    public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
-        if(rc != BKException.Code.OK) fail("Return code is not OK: " + rc);
-        
-        ls = seq;
-
-        synchronized (sync) {
-            sync.value = true;
-            sync.notify();
-        }
-    }
-
-    @Before
-    public void setUp() throws Exception{
-        super.setUp();
-        rng = new Random(System.currentTimeMillis()); // Initialize the Random
-                                                      // Number Generator
-        entries = new ArrayList<byte[]>(); // initialize the entries list
-        entriesSize = new ArrayList<Integer>();
-        sync = new SyncObj(); // initialize the synchronization data structure
-        
-    }
-
-    /* Clean up a directory recursively */
-    protected boolean cleanUpDir(File dir) {
-        if (dir.isDirectory()) {
-            LOG.info("Cleaning up " + dir.getName());
-            String[] children = dir.list();
-            for (String string : children) {
-                boolean success = cleanUpDir(new File(dir, string));
-                if (!success)
-                    return false;
-            }
-        }
-        // The directory is now empty so delete it
-        return dir.delete();
-    }
-
-    /* User for testing purposes, void */
-    class emptyWatcher implements Watcher {
-        public void process(WatchedEvent event) {
-        }
-    }
-
-}

+ 0 - 400
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/BookieRecoveryTest.java

@@ -1,400 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.AsyncCallback.RecoverCallback;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.bookkeeper.tools.BookKeeperTools;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.Code;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This class tests the bookie recovery admin functionality.
- */
-public class BookieRecoveryTest extends BaseTestCase {
-    static Logger LOG = Logger.getLogger(BookieRecoveryTest.class);
-
-    // Object used for synchronizing async method calls
-    class SyncObject {
-        boolean value;
-
-        public SyncObject() {
-            value = false;
-        }
-    }
-
-    // Object used for implementing the Bookie RecoverCallback for this jUnit
-    // test. This verifies that the operation completed successfully.
-    class BookieRecoverCallback implements RecoverCallback {
-        @Override
-        public void recoverComplete(int rc, Object ctx) {
-            LOG.info("Recovered bookie operation completed with rc: " + rc);
-            assertTrue(rc == Code.OK.intValue());
-            SyncObject sync = (SyncObject) ctx;
-            synchronized (sync) {
-                sync.value = true;
-                sync.notify();
-            }
-        }
-    }
-
-    // Objects to use for this jUnit test.
-    DigestType digestType;
-    SyncObject sync;
-    BookieRecoverCallback bookieRecoverCb;
-    BookKeeperTools bkTools;
-
-    // Constructor
-    public BookieRecoveryTest(DigestType digestType) {
-        super(3);
-        this.digestType = digestType;
-    }
-
-    @Before
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        // Set up the configuration properties needed.
-        System.setProperty("digestType", digestType.toString());
-        System.setProperty("passwd", "");
-        sync = new SyncObject();
-        bookieRecoverCb = new BookieRecoverCallback();
-        bkTools = new BookKeeperTools(HOSTPORT);
-    }
-
-    @After
-    @Override
-    public void tearDown() throws Exception {
-        // Release any resources used by the BookKeeperTools instance.
-        bkTools.shutdown();
-        super.tearDown();
-    }
-
-    /**
-     * Helper method to create a number of ledgers
-     * 
-     * @param numLedgers
-     *            Number of ledgers to create
-     * @return List of LedgerHandles for each of the ledgers created
-     * @throws BKException
-     * @throws KeeperException
-     * @throws IOException
-     * @throws InterruptedException
-     */
-    private List<LedgerHandle> createLedgers(int numLedgers) throws BKException, KeeperException, IOException,
-            InterruptedException {
-        List<LedgerHandle> lhs = new ArrayList<LedgerHandle>();
-        for (int i = 0; i < numLedgers; i++) {
-            lhs.add(bkc.createLedger(digestType, System.getProperty("passwd").getBytes()));
-        }
-        return lhs;
-    }
-
-    /**
-     * Helper method to write dummy ledger entries to all of the ledgers passed.
-     * 
-     * @param numEntries
-     *            Number of ledger entries to write for each ledger
-     * @param startEntryId
-     *            The first entry Id we're expecting to write for each ledger
-     * @param lhs
-     *            List of LedgerHandles for all ledgers to write entries to
-     * @throws BKException
-     * @throws InterruptedException
-     */
-    private void writeEntriestoLedgers(int numEntries, long startEntryId, List<LedgerHandle> lhs) throws BKException,
-            InterruptedException {
-        for (LedgerHandle lh : lhs) {
-            for (int i = 0; i < numEntries; i++) {
-                lh.addEntry(("LedgerId: " + lh.getId() + ", EntryId: " + (startEntryId + i)).getBytes());
-            }
-        }
-    }
-
-    /**
-     * Helper method to startup a new bookie server with the indicated port
-     * number
-     * 
-     * @param port
-     *            Port to start the new bookie server on
-     * @throws IOException
-     */
-    private void startNewBookie(int port)
-    throws IOException, InterruptedException {
-        File f = File.createTempFile("bookie", "test");
-        tmpDirs.add(f);
-        f.delete();
-        f.mkdir();
-        BookieServer server = new BookieServer(port, HOSTPORT, f, new File[] { f });
-        server.start();
-        bs.add(server);
-        while(!server.isRunning()){
-            Thread.sleep(500);
-        }
-        LOG.info("New bookie on port " + port + " has been created.");
-    }
-
-    /**
-     * Helper method to verify that we can read the recovered ledger entries.
-     * 
-     * @param numLedgers
-     *            Number of ledgers to verify
-     * @param startEntryId
-     *            Start Entry Id to read
-     * @param endEntryId
-     *            End Entry Id to read
-     * @throws BKException
-     * @throws InterruptedException
-     */
-    private void verifyRecoveredLedgers(int numLedgers, long startEntryId, long endEntryId) throws BKException,
-            InterruptedException {
-        // Get a set of LedgerHandles for all of the ledgers to verify
-        List<LedgerHandle> lhs = new ArrayList<LedgerHandle>();
-        for (int i = 0; i < numLedgers; i++) {
-            lhs.add(bkc.openLedger(i + 1, digestType, System.getProperty("passwd").getBytes()));
-        }
-        // Read the ledger entries to verify that they are all present and
-        // correct in the new bookie.
-        for (LedgerHandle lh : lhs) {
-            Enumeration<LedgerEntry> entries = lh.readEntries(startEntryId, endEntryId);
-            while (entries.hasMoreElements()) {
-                LedgerEntry entry = entries.nextElement();
-                assertTrue(new String(entry.getEntry()).equals("LedgerId: " + entry.getLedgerId() + ", EntryId: "
-                        + entry.getEntryId()));
-            }
-        }
-
-    }
-
-    /**
-     * This tests the asynchronous bookie recovery functionality by writing
-     * entries into 3 bookies, killing one bookie, starting up a new one to
-     * replace it, and then recovering the ledger entries from the killed bookie
-     * onto the new one. We'll verify that the entries stored on the killed
-     * bookie are properly copied over and restored onto the new one.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testAsyncBookieRecoveryToSpecificBookie() throws Exception {
-        // Create the ledgers
-        int numLedgers = 3;
-        List<LedgerHandle> lhs = createLedgers(numLedgers);
-
-        // Write the entries for the ledgers with dummy values.
-        int numMsgs = 10;
-        writeEntriestoLedgers(numMsgs, 0, lhs);
-
-        // Shutdown the first bookie server
-        LOG.info("Finished writing all ledger entries so shutdown one of the bookies.");
-        bs.get(0).shutdown();
-        bs.remove(0);
-
-        // Startup a new bookie server
-        int newBookiePort = initialPort + numBookies;
-        startNewBookie(newBookiePort);
-
-        // Write some more entries for the ledgers so a new ensemble will be
-        // created for them.
-        writeEntriestoLedgers(numMsgs, 10, lhs);
-
-        // Call the async recover bookie method.
-        InetSocketAddress bookieSrc = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), initialPort);
-        InetSocketAddress bookieDest = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), newBookiePort);
-        LOG.info("Now recover the data on the killed bookie (" + bookieSrc + ") and replicate it to the new one ("
-                + bookieDest + ")");
-        // Initiate the sync object
-        sync.value = false;
-        bkTools.asyncRecoverBookieData(bookieSrc, bookieDest, bookieRecoverCb, sync);
-
-        // Wait for the async method to complete.
-        synchronized (sync) {
-            while (sync.value == false) {
-                sync.wait();
-            }
-        }
-
-        // Verify the recovered ledger entries are okay.
-        verifyRecoveredLedgers(numLedgers, 0, 2 * numMsgs - 1);
-    }
-
-    /**
-     * This tests the asynchronous bookie recovery functionality by writing
-     * entries into 3 bookies, killing one bookie, starting up a few new
-     * bookies, and then recovering the ledger entries from the killed bookie
-     * onto random available bookie servers. We'll verify that the entries
-     * stored on the killed bookie are properly copied over and restored onto
-     * the other bookies.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testAsyncBookieRecoveryToRandomBookies() throws Exception {
-        // Create the ledgers
-        int numLedgers = 3;
-        List<LedgerHandle> lhs = createLedgers(numLedgers);
-
-        // Write the entries for the ledgers with dummy values.
-        int numMsgs = 10;
-        writeEntriestoLedgers(numMsgs, 0, lhs);
-
-        // Shutdown the first bookie server
-        LOG.info("Finished writing all ledger entries so shutdown one of the bookies.");
-        bs.get(0).shutdown();
-        bs.remove(0);
-
-        // Startup three new bookie servers
-        for (int i = 0; i < 3; i++) {
-            int newBookiePort = initialPort + numBookies + i;
-            startNewBookie(newBookiePort);
-        }
-
-        // Write some more entries for the ledgers so a new ensemble will be
-        // created for them.
-        writeEntriestoLedgers(numMsgs, 10, lhs);
-
-        // Call the async recover bookie method.
-        InetSocketAddress bookieSrc = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), initialPort);
-        InetSocketAddress bookieDest = null;
-        LOG.info("Now recover the data on the killed bookie (" + bookieSrc
-                + ") and replicate it to a random available one");
-        // Initiate the sync object
-        sync.value = false;
-        bkTools.asyncRecoverBookieData(bookieSrc, bookieDest, bookieRecoverCb, sync);
-
-        // Wait for the async method to complete.
-        synchronized (sync) {
-            while (sync.value == false) {
-                sync.wait();
-            }
-        }
-
-        // Verify the recovered ledger entries are okay.
-        verifyRecoveredLedgers(numLedgers, 0, 2 * numMsgs - 1);
-    }
-
-    /**
-     * This tests the synchronous bookie recovery functionality by writing
-     * entries into 3 bookies, killing one bookie, starting up a new one to
-     * replace it, and then recovering the ledger entries from the killed bookie
-     * onto the new one. We'll verify that the entries stored on the killed
-     * bookie are properly copied over and restored onto the new one.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testSyncBookieRecoveryToSpecificBookie() throws Exception {
-        // Create the ledgers
-        int numLedgers = 3;
-        List<LedgerHandle> lhs = createLedgers(numLedgers);
-
-        // Write the entries for the ledgers with dummy values.
-        int numMsgs = 10;
-        writeEntriestoLedgers(numMsgs, 0, lhs);
-
-        // Shutdown the first bookie server
-        LOG.info("Finished writing all ledger entries so shutdown one of the bookies.");
-        bs.get(0).shutdown();
-        bs.remove(0);
-
-        // Startup a new bookie server
-        int newBookiePort = initialPort + numBookies;
-        startNewBookie(newBookiePort);
-
-        // Write some more entries for the ledgers so a new ensemble will be
-        // created for them.
-        writeEntriestoLedgers(numMsgs, 10, lhs);
-
-        // Call the sync recover bookie method.
-        InetSocketAddress bookieSrc = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), initialPort);
-        InetSocketAddress bookieDest = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), newBookiePort);
-        LOG.info("Now recover the data on the killed bookie (" + bookieSrc + ") and replicate it to the new one ("
-                + bookieDest + ")");
-        bkTools.recoverBookieData(bookieSrc, bookieDest);
-
-        // Verify the recovered ledger entries are okay.
-        verifyRecoveredLedgers(numLedgers, 0, 2 * numMsgs - 1);
-    }
-
-    /**
-     * This tests the synchronous bookie recovery functionality by writing
-     * entries into 3 bookies, killing one bookie, starting up a few new
-     * bookies, and then recovering the ledger entries from the killed bookie
-     * onto random available bookie servers. We'll verify that the entries
-     * stored on the killed bookie are properly copied over and restored onto
-     * the other bookies.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testSyncBookieRecoveryToRandomBookies() throws Exception {
-        // Create the ledgers
-        int numLedgers = 3;
-        List<LedgerHandle> lhs = createLedgers(numLedgers);
-
-        // Write the entries for the ledgers with dummy values.
-        int numMsgs = 10;
-        writeEntriestoLedgers(numMsgs, 0, lhs);
-
-        // Shutdown the first bookie server
-        LOG.info("Finished writing all ledger entries so shutdown one of the bookies.");
-        bs.get(0).shutdown();
-        bs.remove(0);
-
-        // Startup three new bookie servers
-        for (int i = 0; i < 3; i++) {
-            int newBookiePort = initialPort + numBookies + i;
-            startNewBookie(newBookiePort);
-        }
-
-        // Write some more entries for the ledgers so a new ensemble will be
-        // created for them.
-        writeEntriestoLedgers(numMsgs, 10, lhs);
-
-        // Call the sync recover bookie method.
-        InetSocketAddress bookieSrc = new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), initialPort);
-        InetSocketAddress bookieDest = null;
-        LOG.info("Now recover the data on the killed bookie (" + bookieSrc
-                + ") and replicate it to a random available one");
-        bkTools.recoverBookieData(bookieSrc, bookieDest);
-
-        // Verify the recovered ledger entries are okay.
-        verifyRecoveredLedgers(numLedgers, 0, 2 * numMsgs - 1);
-    }
-
-}

+ 0 - 74
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/CloseTest.java

@@ -1,74 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import org.junit.*;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.log4j.Logger;
-
-/**
- * This unit test tests closing ledgers sequentially. It creates 4 ledgers, then
- * write 1000 entries to each ledger and close it.
- * 
- */
-
-public class CloseTest extends BaseTestCase{
-    static Logger LOG = Logger.getLogger(CloseTest.class);
-    DigestType digestType;
-
-    public CloseTest(DigestType digestType) {
-        super(3);
-        this.digestType = digestType;
-    }
-
-    @Test
-    public void testClose() throws Exception {
-
-        /*
-         * Create 4 ledgers.
-         */
-        int numLedgers = 4;
-        int numMsgs = 100;
-
-        LedgerHandle[] lh = new LedgerHandle[numLedgers];
-        for (int i = 0; i < numLedgers; i++) {
-            lh[i] = bkc.createLedger(digestType, "".getBytes());
-        }
-
-        String tmp = "BookKeeper is cool!";
-
-        /*
-         * Write 1000 entries to lh1.
-         */
-        for (int i = 0; i < numMsgs; i++) {
-            for (int j = 0; j < numLedgers; j++) {
-                lh[j].addEntry(tmp.getBytes());
-            }
-        }
-
-        for (int i = 0; i < numLedgers; i++) {
-
-            lh[i].close();
-        }
-    }
-}

+ 0 - 178
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/ConcurrentLedgerTest.java

@@ -1,178 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-package org.apache.bookkeeper.test;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.bookkeeper.bookie.Bookie;
-import org.apache.bookkeeper.bookie.BookieException;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import junit.framework.TestCase;
-
-/**
- * Tests writing to concurrent ledgers
- */
-public class ConcurrentLedgerTest extends TestCase {
-    Bookie bookie;
-    File txnDir, ledgerDir;
-    int recvTimeout = 10000;
-    Semaphore throttle;
-    
-    @Override
-    @Before
-    public void setUp() throws IOException {
-        String txnDirName = System.getProperty("txnDir");
-        if (txnDirName != null) {
-            txnDir = new File(txnDirName);
-        }
-        String ledgerDirName = System.getProperty("ledgerDir");
-        if (ledgerDirName != null) {
-            ledgerDir = new File(ledgerDirName);
-        }
-        File tmpFile = File.createTempFile("book", ".txn", txnDir);
-        tmpFile.delete();
-        txnDir = new File(tmpFile.getParent(), tmpFile.getName()+".dir");
-        txnDir.mkdirs();
-        tmpFile = File.createTempFile("book", ".ledger", ledgerDir);
-        ledgerDir = new File(tmpFile.getParent(), tmpFile.getName()+".dir");
-        ledgerDir.mkdirs();
-        
-        bookie = new Bookie(5000, null, txnDir, new File[] {ledgerDir});
-    }
-    
-    static void recursiveDelete(File f) {
-        if (f.isFile()) {
-            f.delete();
-        } else {
-            for(File i: f.listFiles()) {
-                recursiveDelete(i);
-            }
-            f.delete();
-        }
-    }
-    
-    @Override
-    @After
-    public void tearDown() {
-        try {
-            bookie.shutdown();
-            recursiveDelete(txnDir);
-            recursiveDelete(ledgerDir);
-        } catch (InterruptedException e) {
-            e.printStackTrace();
-        }
-    }
-
-    byte zeros[] = new byte[16];
-
-    int iterations = 51;
-    {
-        String iterationsString = System.getProperty("iterations");
-        if (iterationsString != null) {
-            iterations = Integer.parseInt(iterationsString);
-        }
-    }
-    int iterationStep = 25;
-    {
-        String iterationsString = System.getProperty("iterationStep");
-        if (iterationsString != null) {
-            iterationStep = Integer.parseInt(iterationsString);
-        }
-    }
-    @Test
-    public void testConcurrentWrite() throws IOException, InterruptedException, BookieException {
-        int size = 1024;
-        int totalwrites = 128;
-        if (System.getProperty("totalwrites") != null) {
-            totalwrites = Integer.parseInt(System.getProperty("totalwrites"));
-        }
-        System.out.println("Running up to " + iterations + " iterations");
-        System.out.println("Total writes = " + totalwrites);
-        int ledgers;
-        for(ledgers = 1; ledgers <= iterations; ledgers += iterationStep) {
-            long duration = doWrites(ledgers, size, totalwrites);
-            System.out.println(totalwrites + " on " + ledgers + " took " + duration + " ms");
-        }
-        System.out.println("ledgers " + ledgers);
-        for(ledgers = 1; ledgers <= iterations; ledgers += iterationStep) {
-            long duration = doReads(ledgers, size, totalwrites);
-            System.out.println(ledgers + " read " + duration + " ms");
-        }
-    }
-
-    private long doReads(int ledgers, int size, int totalwrites)
-            throws IOException, InterruptedException, BookieException {
-        long start = System.currentTimeMillis();
-        for(int i = 1; i <= totalwrites/ledgers; i++) {
-            for(int j = 1; j <= ledgers; j++) {
-                ByteBuffer entry = bookie.readEntry(j, i);
-                // skip the ledger id and the entry id
-                entry.getLong();
-                entry.getLong();
-                assertEquals(j + "@" + i, j+2, entry.getLong());
-                assertEquals(j + "@" + i, i+3, entry.getLong());
-            }
-        }
-        long finish = System.currentTimeMillis();
-        return finish - start;
-    }
-    private long doWrites(int ledgers, int size, int totalwrites)
-            throws IOException, InterruptedException, BookieException {
-        throttle = new Semaphore(10000);
-        WriteCallback cb = new WriteCallback() {
-            @Override
-            public void writeComplete(int rc, long ledgerId, long entryId,
-                    InetSocketAddress addr, Object ctx) {
-                AtomicInteger counter = (AtomicInteger)ctx;
-                counter.getAndIncrement();
-                throttle.release();
-            }
-        };
-        AtomicInteger counter = new AtomicInteger();
-        long start = System.currentTimeMillis();
-        for(int i = 1; i <= totalwrites/ledgers; i++) {
-            for(int j = 1; j <= ledgers; j++) {
-                ByteBuffer bytes = ByteBuffer.allocate(size);
-                bytes.putLong(j);
-                bytes.putLong(i);
-                bytes.putLong(j+2);
-                bytes.putLong(i+3);
-                bytes.put(("This is ledger " + j + " entry " + i).getBytes());
-                bytes.position(0);
-                bytes.limit(bytes.capacity());
-                throttle.acquire();
-                bookie.addEntry(bytes, cb, counter, zeros);
-            }
-        }
-        long finish = System.currentTimeMillis();
-        return finish - start;
-    }
-}

+ 0 - 163
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LedgerDeleteTest.java

@@ -1,163 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.io.File;
-
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.log4j.Logger;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This class tests the ledger delete functionality both from the BookKeeper
- * client and the server side.
- */
-public class LedgerDeleteTest extends BaseTestCase {
-    static Logger LOG = Logger.getLogger(LedgerDeleteTest.class);
-    DigestType digestType;
-
-    public LedgerDeleteTest(DigestType digestType) {
-        super(3);
-        this.digestType = digestType;
-    }
-
-    @Before
-    @Override
-    public void setUp() throws Exception {
-        // Set up the configuration properties needed.
-        System.setProperty("logSizeLimit", Long.toString(2 * 1024 * 1024L));
-        System.setProperty("gcWaitTime", "1000");
-        super.setUp();
-    }
-
-    /**
-     * Common method to create ledgers and write entries to them.
-     */
-    private LedgerHandle[] writeLedgerEntries(int numLedgers, int msgSize, int numMsgs) throws Exception {
-        // Create the ledgers
-        LedgerHandle[] lhs = new LedgerHandle[numLedgers];
-        for (int i = 0; i < numLedgers; i++) {
-            lhs[i] = bkc.createLedger(digestType, "".getBytes());
-        }
-
-        // Create a dummy message string to write as ledger entries
-        StringBuilder msgSB = new StringBuilder();
-        for (int i = 0; i < msgSize; i++) {
-            msgSB.append("a");
-        }
-        String msg = msgSB.toString();
-
-        // Write all of the entries for all of the ledgers
-        for (int i = 0; i < numMsgs; i++) {
-            for (int j = 0; j < numLedgers; j++) {
-                lhs[j].addEntry(msg.getBytes());
-            }
-        }
-
-        // Return the ledger handles to the inserted ledgers and entries
-        return lhs;
-    }
-
-    /**
-     * This test writes enough ledger entries to roll over the entry log file.
-     * It will then delete all of the ledgers from the client and let the
-     * server's EntryLogger garbage collector thread delete the initial entry
-     * log file.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testLedgerDelete() throws Exception {
-        // Write enough ledger entries so that we roll over the initial entryLog (0.log)
-        LedgerHandle[] lhs = writeLedgerEntries(3, 1024, 1024);
-
-        // Delete all of these ledgers from the BookKeeper client
-        for (LedgerHandle lh : lhs) {
-            bkc.deleteLedger(lh.getId());
-        }
-        LOG.info("Finished deleting all ledgers so waiting for the GC thread to clean up the entryLogs");
-        Thread.sleep(2000);
-
-        // Verify that the first entry log (0.log) has been deleted from all of the Bookie Servers.
-        for (File ledgerDirectory : tmpDirs) {
-            for (File f : ledgerDirectory.listFiles()) {
-                assertFalse("Found the entry log file (0.log) that should have been deleted in ledgerDirectory: "
-                        + ledgerDirectory, f.isFile() && f.getName().equals("0.log"));
-            }
-        }
-    }
-
-    /**
-     * This test is similar to testLedgerDelete() except it will stop and
-     * restart the Bookie Servers after it has written out the ledger entries.
-     * On restart, there will be existing entry logs and ledger index files for
-     * the EntryLogger and LedgerCache to read and store into memory.
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testLedgerDeleteWithExistingEntryLogs() throws Exception {
-        // Write enough ledger entries so that we roll over the initial entryLog (0.log)
-        LedgerHandle[] lhs = writeLedgerEntries(3, 1024, 1024);
-
-        /*
-         * Shutdown the Bookie Servers and restart them using the same ledger
-         * directories. This will test the reading of pre-existing ledger index
-         * files in the LedgerCache during startup of a Bookie Server.
-         */
-        for (BookieServer server : bs) {
-            server.shutdown();
-        }
-        bs.clear();
-        int j = 0;
-        for (File f : tmpDirs) {
-            BookieServer server = new BookieServer(initialPort + j, HOSTPORT, f, new File[] { f });
-            server.start();
-            bs.add(server);
-            j++;
-        }
-
-        // Delete all of these ledgers from the BookKeeper client
-        for (LedgerHandle lh : lhs) {
-            bkc.deleteLedger(lh.getId());
-        }
-        LOG.info("Finished deleting all ledgers so waiting for the GC thread to clean up the entryLogs");
-        Thread.sleep(2000);
-
-        /*
-         * Verify that the first two entry logs ([0,1].log) have been deleted
-         * from all of the Bookie Servers. When we restart the servers in this
-         * test, a new entry log is created. We know then that the first two
-         * entry logs should be deleted.
-         */
-        for (File ledgerDirectory : tmpDirs) {
-            for (File f : ledgerDirectory.listFiles()) {
-                assertFalse("Found the entry log file ([0,1].log) that should have been deleted in ledgerDirectory: "
-                        + ledgerDirectory, f.isFile() && (f.getName().equals("0.log") || f.getName().equals("1.log")));
-            }
-        }
-    }
-
-}

+ 0 - 88
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LedgerRecoveryTest.java

@@ -1,88 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import org.junit.*;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.BookKeeper.DigestType;
-import org.apache.log4j.Logger;
-
-/**
- * This unit test tests ledger recovery.
- * 
- */
-
-public class LedgerRecoveryTest extends BaseTestCase {
-    static Logger LOG = Logger.getLogger(LedgerRecoveryTest.class);
-
-    DigestType digestType;
-
-    public LedgerRecoveryTest(DigestType digestType) {
-        super(3);
-        this.digestType = digestType;
-    }
-
-    private void testInternal(int numEntries) throws Exception {
-        /*
-         * Create ledger.
-         */
-        LedgerHandle beforelh = null;
-        beforelh = bkc.createLedger(digestType, "".getBytes());
-
-        String tmp = "BookKeeper is cool!";
-        for (int i = 0; i < numEntries; i++) {
-            beforelh.addEntry(tmp.getBytes());
-        }
-
-        long length = (long) (numEntries * tmp.length());
-        
-        /*
-         * Try to open ledger.
-         */
-        LedgerHandle afterlh = bkc.openLedger(beforelh.getId(), digestType, "".getBytes());
-
-        /*
-         * Check if has recovered properly.
-         */
-        assertTrue("Has not recovered correctly: " + afterlh.getLastAddConfirmed(),
-                afterlh.getLastAddConfirmed() == numEntries - 1);       
-        assertTrue("Has not set the length correctly: " + afterlh.getLength() + ", " + length, 
-                afterlh.getLength() == length);
-    }
-    
-    @Test
-    public void testLedgerRecovery() throws Exception {
-        testInternal(100);
-     
-    }
-
-    @Test
-    public void testEmptyLedgerRecoveryOne() throws Exception{
-        testInternal(1);
-    }
-
-    @Test
-    public void testEmptyLedgerRecovery() throws Exception{
-        testInternal(0);
-    }
-
-}

+ 0 - 117
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/LoopbackClient.java

@@ -1,117 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.net.InetSocketAddress;
-import java.io.IOException;
-import java.lang.InterruptedException;
-import java.util.Arrays;
-import java.util.concurrent.Executors;
-
-import org.apache.bookkeeper.proto.BookieClient;
-import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
-import org.apache.bookkeeper.util.OrderedSafeExecutor;
-import org.apache.log4j.Logger;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-
-/**
- * This class tests BookieClient. It just sends the a new entry to itself.
- * 
- * 
- * 
- */
-
-class LoopbackClient implements WriteCallback {
-    Logger LOG = Logger.getLogger(LoopbackClient.class);
-    BookieClient client;
-    static int recvTimeout = 2000;
-    long begin = 0;
-    int limit;
-    OrderedSafeExecutor executor;
-
-    static class Counter {
-        int c;
-        int limit;
-
-        Counter(int limit) {
-            this.c = 0;
-            this.limit = limit;
-        }
-
-        synchronized void increment() {
-            if (++c == limit)
-                this.notify();
-        }
-    }
-
-    LoopbackClient(ClientSocketChannelFactory channelFactory, OrderedSafeExecutor executor, long begin, int limit) throws IOException {
-        this.client = new BookieClient(channelFactory, executor);
-        this.begin = begin;
-    }
-
-    void write(long ledgerId, long entry, byte[] data, InetSocketAddress addr, WriteCallback cb, Object ctx)
-            throws IOException, InterruptedException {
-        LOG.info("Ledger id: " + ledgerId + ", Entry: " + entry);
-        byte[] passwd = new byte[20];
-        Arrays.fill(passwd, (byte) 'a');
-
-        client.addEntry(addr, ledgerId, passwd, entry, ChannelBuffers.wrappedBuffer(data), cb, ctx);
-    }
-
-    public void writeComplete(int rc, long ledgerId, long entryId, InetSocketAddress addr, Object ctx) {
-        Counter counter = (Counter) ctx;
-        counter.increment();
-    }
-
-    public static void main(String args[]) {
-        byte[] data = new byte[Integer.parseInt(args[0])];
-        Integer limit = Integer.parseInt(args[1]);
-        Counter c = new Counter(limit);
-        long ledgerId = Long.valueOf("0").longValue();
-        long begin = System.currentTimeMillis();
-
-        LoopbackClient lb;
-        ClientSocketChannelFactory channelFactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors
-                .newCachedThreadPool());
-        OrderedSafeExecutor executor = new OrderedSafeExecutor(2);
-        try {
-            InetSocketAddress addr = new InetSocketAddress("127.0.0.1", Integer.valueOf(args[2]).intValue());
-            lb = new LoopbackClient(channelFactory, executor, begin, limit.intValue());
-
-            for (int i = 0; i < limit; i++) {
-                lb.write(ledgerId, i, data, addr, lb, c);
-            }
-
-            synchronized (c) {
-                c.wait();
-                System.out.println("Time to write all entries: " + (System.currentTimeMillis() - begin));
-            }
-        } catch (IOException e) {
-            e.printStackTrace();
-        } catch (InterruptedException e) {
-            e.printStackTrace();
-        }
-    }
-
-}

+ 0 - 60
src/contrib/bookkeeper/test/org/apache/bookkeeper/test/NIOServerFactoryTest.java

@@ -1,60 +0,0 @@
-package org.apache.bookkeeper.test;
-
-/*
- * 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * 
- */
-
-import java.net.Socket;
-import java.nio.ByteBuffer;
-
-import org.apache.bookkeeper.proto.NIOServerFactory;
-import org.apache.bookkeeper.proto.NIOServerFactory.Cnxn;
-import org.apache.bookkeeper.proto.NIOServerFactory.PacketProcessor;
-import org.junit.Test;
-
-import junit.framework.TestCase;
-
-public class NIOServerFactoryTest extends TestCase {
-    PacketProcessor problemProcessor = new PacketProcessor() {
-
-        public void processPacket(ByteBuffer packet, Cnxn src) {
-            if (packet.getInt() == 1) {
-                throw new RuntimeException("Really bad thing happened");
-            }
-            src.sendResponse(new ByteBuffer[] { ByteBuffer.allocate(4) });
-        }
-
-    };
-
-    @Test
-    public void testProblemProcessor() throws Exception {
-        NIOServerFactory factory = new NIOServerFactory(22334, problemProcessor);
-        Socket s = new Socket("127.0.0.1", 22334);
-        s.setSoTimeout(5000);
-        try {
-            s.getOutputStream().write("\0\0\0\4\0\0\0\1".getBytes());
-            s.getOutputStream().write("\0\0\0\4\0\0\0\2".getBytes());
-            s.getInputStream().read();
-        } finally {
-            s.close();
-            factory.shutdown();
-        }
-    }
-}

+ 0 - 202
src/contrib/hedwig/LICENSE.txt

@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

+ 0 - 2
src/contrib/hedwig/NOTICE.txt

@@ -1,2 +0,0 @@
-Copyright (c) 2010 Yahoo! Inc.  All rights reserved.
-

+ 0 - 3
src/contrib/hedwig/README

@@ -1,3 +0,0 @@
-Hedwig is a large scale pub/sub system built on top of ZooKeeper and BookKeeper.
-
-For documentation on building, setting up, and using Hedwig see the `doc` directory.

+ 0 - 73
src/contrib/hedwig/client/pom.xml

@@ -1,73 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hedwig</groupId>
-    <artifactId>hedwig</artifactId>
-    <version>1.0-SNAPSHOT</version>
-  </parent>
-  <properties>
-      <mainclass>org.apache.hedwig.client.App</mainclass>
-  </properties>
-  <groupId>org.apache.hedwig</groupId>
-  <artifactId>client</artifactId>
-  <packaging>jar</packaging>
-  <version>1.0-SNAPSHOT</version>
-  <name>client</name>
-  <url>http://maven.apache.org</url>
-  <dependencies>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <version>4.8.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hedwig</groupId>
-      <artifactId>protocol</artifactId>
-      <version>1.0-SNAPSHOT</version>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>1.2.14</version>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.jboss.netty</groupId>
-      <artifactId>netty</artifactId>
-      <version>3.1.2.GA</version>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-configuration</groupId>
-      <artifactId>commons-configuration</artifactId>
-      <version>1.6</version>
-    </dependency>
-  </dependencies>
-  <repositories>
-    <repository>
-      <id>jboss.release</id>
-      <name>JBoss releases</name>
-      <url>http://repository.jboss.org/maven2</url>
-    </repository>
-  </repositories>
-</project>

+ 0 - 29
src/contrib/hedwig/client/src/main/cpp/Makefile.am

@@ -1,29 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-ACLOCAL_AMFLAGS = -I m4
-
-SUBDIRS = lib test
-
-library_includedir=$(includedir)/hedwig-0.1/hedwig
-library_include_HEADERS = inc/hedwig/callback.h inc/hedwig/client.h inc/hedwig/exceptions.h inc/hedwig/publish.h inc/hedwig/subscribe.h 
-
-pkgconfigdir = $(libdir)/pkgconfig
-nodist_pkgconfig_DATA = hedwig-0.1.pc
-
-EXTRA_DIST = $(DX_CONFIG) doc/html

+ 0 - 186
src/contrib/hedwig/client/src/main/cpp/aminclude.am

@@ -1,186 +0,0 @@
-# Copyright (C) 2004 Oren Ben-Kiki
-# This file is distributed under the same terms as the Automake macro files.
-
-# Generate automatic documentation using Doxygen. Goals and variables values
-# are controlled by the various DX_COND_??? conditionals set by autoconf.
-#
-# The provided goals are:
-# doxygen-doc: Generate all doxygen documentation.
-# doxygen-run: Run doxygen, which will generate some of the documentation
-#              (HTML, CHM, CHI, MAN, RTF, XML) but will not do the post
-#              processing required for the rest of it (PS, PDF, and some MAN).
-# doxygen-man: Rename some doxygen generated man pages.
-# doxygen-ps: Generate doxygen PostScript documentation.
-# doxygen-pdf: Generate doxygen PDF documentation.
-#
-# Note that by default these are not integrated into the automake goals. If
-# doxygen is used to generate man pages, you can achieve this integration by
-# setting man3_MANS to the list of man pages generated and then adding the
-# dependency:
-#
-#   $(man3_MANS): doxygen-doc
-#
-# This will cause make to run doxygen and generate all the documentation.
-#
-# The following variable is intended for use in Makefile.am:
-#
-# DX_CLEANFILES = everything to clean.
-#
-# This is usually added to MOSTLYCLEANFILES.
-
-## --------------------------------- ##
-## Format-independent Doxygen rules. ##
-## --------------------------------- ##
-
-if DX_COND_doc
-
-## ------------------------------- ##
-## Rules specific for HTML output. ##
-## ------------------------------- ##
-
-if DX_COND_html
-
-DX_CLEAN_HTML = @DX_DOCDIR@/html
-
-endif DX_COND_html
-
-## ------------------------------ ##
-## Rules specific for CHM output. ##
-## ------------------------------ ##
-
-if DX_COND_chm
-
-DX_CLEAN_CHM = @DX_DOCDIR@/chm
-
-if DX_COND_chi
-
-DX_CLEAN_CHI = @DX_DOCDIR@/@PACKAGE@.chi
-
-endif DX_COND_chi
-
-endif DX_COND_chm
-
-## ------------------------------ ##
-## Rules specific for MAN output. ##
-## ------------------------------ ##
-
-if DX_COND_man
-
-DX_CLEAN_MAN = @DX_DOCDIR@/man
-
-endif DX_COND_man
-
-## ------------------------------ ##
-## Rules specific for RTF output. ##
-## ------------------------------ ##
-
-if DX_COND_rtf
-
-DX_CLEAN_RTF = @DX_DOCDIR@/rtf
-
-endif DX_COND_rtf
-
-## ------------------------------ ##
-## Rules specific for XML output. ##
-## ------------------------------ ##
-
-if DX_COND_xml
-
-DX_CLEAN_XML = @DX_DOCDIR@/xml
-
-endif DX_COND_xml
-
-## ----------------------------- ##
-## Rules specific for PS output. ##
-## ----------------------------- ##
-
-if DX_COND_ps
-
-DX_CLEAN_PS = @DX_DOCDIR@/@PACKAGE@.ps
-
-DX_PS_GOAL = doxygen-ps
-
-doxygen-ps: @DX_DOCDIR@/@PACKAGE@.ps
-
-@DX_DOCDIR@/@PACKAGE@.ps: @DX_DOCDIR@/@PACKAGE@.tag
-	cd @DX_DOCDIR@/latex; \
-	rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
-	$(DX_LATEX) refman.tex; \
-	$(MAKEINDEX_PATH) refman.idx; \
-	$(DX_LATEX) refman.tex; \
-	countdown=5; \
-	while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
-	                  refman.log > /dev/null 2>&1 \
-	   && test $$countdown -gt 0; do \
-	    $(DX_LATEX) refman.tex; \
-	    countdown=`expr $$countdown - 1`; \
-	done; \
-	$(DX_DVIPS) -o ../@PACKAGE@.ps refman.dvi
-
-endif DX_COND_ps
-
-## ------------------------------ ##
-## Rules specific for PDF output. ##
-## ------------------------------ ##
-
-if DX_COND_pdf
-
-DX_CLEAN_PDF = @DX_DOCDIR@/@PACKAGE@.pdf
-
-DX_PDF_GOAL = doxygen-pdf
-
-doxygen-pdf: @DX_DOCDIR@/@PACKAGE@.pdf
-
-@DX_DOCDIR@/@PACKAGE@.pdf: @DX_DOCDIR@/@PACKAGE@.tag
-	cd @DX_DOCDIR@/latex; \
-	rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
-	$(DX_PDFLATEX) refman.tex; \
-	$(DX_MAKEINDEX) refman.idx; \
-	$(DX_PDFLATEX) refman.tex; \
-	countdown=5; \
-	while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
-	                  refman.log > /dev/null 2>&1 \
-	   && test $$countdown -gt 0; do \
-	    $(DX_PDFLATEX) refman.tex; \
-	    countdown=`expr $$countdown - 1`; \
-	done; \
-	mv refman.pdf ../@PACKAGE@.pdf
-
-endif DX_COND_pdf
-
-## ------------------------------------------------- ##
-## Rules specific for LaTeX (shared for PS and PDF). ##
-## ------------------------------------------------- ##
-
-if DX_COND_latex
-
-DX_CLEAN_LATEX = @DX_DOCDIR@/latex
-
-endif DX_COND_latex
-
-.PHONY: doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
-
-.INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-
-doxygen-run: @DX_DOCDIR@/@PACKAGE@.tag
-
-doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-
-@DX_DOCDIR@/@PACKAGE@.tag: $(DX_CONFIG) $(pkginclude_HEADERS)
-	rm -rf @DX_DOCDIR@
-	$(DX_ENV) $(DX_DOXYGEN) $(srcdir)/$(DX_CONFIG)
-
-DX_CLEANFILES = \
-    @DX_DOCDIR@/@PACKAGE@.tag \
-    -r \
-    $(DX_CLEAN_HTML) \
-    $(DX_CLEAN_CHM) \
-    $(DX_CLEAN_CHI) \
-    $(DX_CLEAN_MAN) \
-    $(DX_CLEAN_RTF) \
-    $(DX_CLEAN_XML) \
-    $(DX_CLEAN_PS) \
-    $(DX_CLEAN_PDF) \
-    $(DX_CLEAN_LATEX)
-
-endif DX_COND_doc

+ 0 - 1252
src/contrib/hedwig/client/src/main/cpp/c-doc.Doxyfile

@@ -1,1252 +0,0 @@
-# Doxyfile 1.4.7
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
-#
-# All text after a hash (#) is considered a comment and will be ignored
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
-# by quotes) that should identify the project.
-
-PROJECT_NAME = $(PROJECT)-$(VERSION)
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
-# This could be handy for archiving the generated documentation or 
-# if some version control system is used.
-
-PROJECT_NUMBER = 
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
-# base path where the generated documentation will be put. 
-# If a relative path is entered, it will be relative to the location 
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = $(DOCDIR)
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
-# 4096 sub-directories (in 2 levels) under the output directory of each output 
-# format and will distribute the generated files over these directories. 
-# Enabling this option can be useful when feeding doxygen a huge amount of 
-# source files, where putting all generated files in the same directory would 
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
-# documentation generated by doxygen is written. Doxygen will use this 
-# information to generate all constant output in the proper language. 
-# The default language is English, other supported languages are: 
-# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, 
-# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, 
-# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, 
-# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, 
-# Swedish, and Ukrainian.
-
-OUTPUT_LANGUAGE = English
-
-# This tag can be used to specify the encoding used in the generated output. 
-# The encoding is not always determined by the language that is chosen, 
-# but also whether or not the output is meant for Windows or non-Windows users. 
-# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES 
-# forces the Windows encoding (this is the default for the Windows binary), 
-# whereas setting the tag to NO uses a Unix-style encoding (the default for 
-# all platforms other than Windows).
-
-USE_WINDOWS_ENCODING = NO
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
-# include brief member descriptions after the members that are listed in 
-# the file and class documentation (similar to JavaDoc). 
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
-# the brief description of a member or function before the detailed description. 
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator 
-# that is used to form the text in various listings. Each string 
-# in this list, if found as the leading text of the brief description, will be 
-# stripped from the text and the result after processing the whole list, is 
-# used as the annotated text. Otherwise, the brief description is used as-is. 
-# If left blank, the following values are used ("$name" is automatically 
-# replaced with the name of the entity): "The $name class" "The $name widget" 
-# "The $name file" "is" "provides" "specifies" "contains" 
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF = 
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
-# Doxygen will generate a detailed section even if there is only a brief 
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all 
-# inherited members of a class in the documentation of that class as if those 
-# members were ordinary class members. Constructors, destructors and assignment 
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
-# path before files name in the file list and in the header files. If set 
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
-# can be used to strip a user-defined part of the path. Stripping is 
-# only done if one of the specified strings matches the left-hand part of 
-# the path. The tag can be used to show relative paths in the file list. 
-# If left blank the directory from which doxygen is run is used as the 
-# path to strip.
-
-STRIP_FROM_PATH = 
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
-# the path mentioned in the documentation of a class, which tells 
-# the reader which header file to include in order to use a class. 
-# If left blank only the name of the header file containing the class 
-# definition is used. Otherwise one should specify the include paths that 
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH = 
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
-# (but less readable) file names. This can be useful is your file systems 
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
-# will interpret the first line (until the first dot) of a JavaDoc-style 
-# comment as the brief description. If set to NO, the JavaDoc 
-# comments will behave just like the Qt-style comments (thus requiring an 
-# explicit @brief command for a brief description.
-
-JAVADOC_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
-# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
-# comments) as a brief description. This used to be the default behaviour. 
-# The new default is to treat a multi-line C++ comment block as a detailed 
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the DETAILS_AT_TOP tag is set to YES then Doxygen 
-# will output the detailed description near the top, like JavaDoc.
-# If set to NO, the detailed description appears after the member 
-# documentation.
-
-DETAILS_AT_TOP = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
-# member inherits the documentation from any documented member that it 
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce 
-# a new page for each member. If set to NO, the documentation of a member will 
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts 
-# as commands in the documentation. An alias has the form "name=value". 
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
-# put the command \sideeffect (or @sideeffect) in the documentation, which 
-# will result in a user-defined paragraph with heading "Side Effects:". 
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES = 
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
-# sources only. Doxygen will then generate output that is more tailored for C. 
-# For instance, some of the names that are used will be different. The list 
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java 
-# sources only. Doxygen will then generate output that is more tailored for Java. 
-# For instance, namespaces will be presented as packages, qualified scopes 
-# will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to 
-# include (a tag file for) the STL sources as input, then you should 
-# set this tag to YES in order to let doxygen match functions declarations and 
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. 
-# func(std::string) {}). This also make the inheritance and collaboration 
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = NO
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
-# tag is set to YES, then doxygen will reuse the documentation of the first 
-# member in the group (if any) for the other members of the group. By default 
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
-# the same type (for instance a group of public functions) to be put as a 
-# subgroup of that type (e.g. under the Public Functions section). Set it to 
-# NO to prevent subgrouping. Alternatively, this can be done per class using 
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
-# documentation are documented, even if no documentation was available. 
-# Private class members and static file members will be hidden unless 
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file 
-# will be included in the documentation.
-
-EXTRACT_STATIC = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
-# defined locally in source files will be included in the documentation. 
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local 
-# methods, which are defined in the implementation section but not in 
-# the interface are included in the documentation. 
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
-# undocumented members of documented classes, files or namespaces. 
-# If set to NO (the default) these members will be included in the 
-# various overviews, but no documentation section is generated. 
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
-# undocumented classes that are normally visible in the class hierarchy. 
-# If set to NO (the default) these classes will be included in the various 
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
-# friend (class|struct|union) declarations. 
-# If set to NO (the default) these declarations will be included in the 
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
-# documentation blocks found inside the body of a function. 
-# If set to NO (the default) these blocks will be appended to the 
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation 
-# that is typed after a \internal command is included. If the tag is set 
-# to NO (the default) then the documentation will be excluded. 
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
-# file names in lower-case letters. If set to YES upper-case letters are also 
-# allowed. This is useful if you have classes or files whose names only differ 
-# in case and if your file system supports case sensitive file names. Windows 
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
-# will show members with their full class and namespace scopes in the 
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
-# will put a list of the files that are included by a file in the documentation 
-# of that file.
-
-SHOW_INCLUDE_FILES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
-# will sort the (detailed) documentation of file and class members 
-# alphabetically by member name. If set to NO the members will appear in 
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
-# brief documentation of file, namespace and class members alphabetically 
-# by member name. If set to NO (the default) the members will appear in 
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
-# sorted by fully-qualified names, including namespaces. If set to 
-# NO (the default), the class list will be sorted only by class name, 
-# not including the namespace part. 
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the 
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or 
-# disable (NO) the todo list. This list is created by putting \todo 
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or 
-# disable (NO) the test list. This list is created by putting \test 
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or 
-# disable (NO) the bug list. This list is created by putting \bug 
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
-# disable (NO) the deprecated list. This list is created by putting 
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST = YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional 
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS = 
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
-# the initial value of a variable or define consists of for it to appear in 
-# the documentation. If the initializer consists of more lines than specified 
-# here it will be hidden. Use a value of 0 to hide initializers completely. 
-# The appearance of the initializer of individual variables and defines in the 
-# documentation can be controlled using \showinitializer or \hideinitializer 
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
-# at the bottom of the documentation of classes and structs. If set to YES the 
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# If the sources in your project are distributed over multiple directories 
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy 
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that 
-# doxygen should invoke to get the current version for each file (typically from the 
-# version control system). Doxygen will invoke the program by executing (via 
-# popen()) the command <command> <input-file>, where <command> is the value of 
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file 
-# provided by doxygen. Whatever the program writes to standard output 
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER = 
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated 
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are 
-# generated by doxygen. Possible values are YES and NO. If left blank 
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
-# potential errors in the documentation, such as not documenting some 
-# parameters in a documented function, or documenting parameters that 
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# This WARN_NO_PARAMDOC option can be abled to get warnings for 
-# functions that are documented, but have no documentation for their parameters 
-# or return value. If set to NO (the default) doxygen will only warn about 
-# wrong or incomplete parameter documentation, but not about the absence of 
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that 
-# doxygen can produce. The string should contain the $file, $line, and $text 
-# tags, which will be replaced by the file and line number from which the 
-# warning originated and the warning text. Optionally the format may contain 
-# $version, which will be replaced by the version of the file (if it could 
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning 
-# and error messages should be written. If left blank the output is written 
-# to stderr.
-
-WARN_LOGFILE = 
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain 
-# documented source files. You may enter file names like "myfile.cpp" or 
-# directories like "/usr/src/myproject". Separate the files or directories 
-# with spaces.
-
-INPUT = inc/ lib/
-
-# If the value of the INPUT tag contains directories, you can use the 
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
-# and *.h) to filter out the source-files in the directories. If left 
-# blank the following patterns are tested: 
-# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx 
-# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
-
-FILE_PATTERNS = 
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
-# should be searched for input files as well. Possible values are YES and NO. 
-# If left blank NO is used.
-
-RECURSIVE = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should 
-# excluded from the INPUT source files. This way you can easily exclude a 
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-
-EXCLUDE = 
-
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or 
-# directories that are symbolic links (a Unix filesystem feature) are excluded 
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the 
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
-# certain files from those directories. Note that the wildcards are matched 
-# against the file with absolute path, so to exclude all test directories 
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS = 
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or 
-# directories that contain example code fragments that are included (see 
-# the \include command).
-
-EXAMPLE_PATH = 
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
-# and *.h) to filter out the source-files in the directories. If left 
-# blank all files are included.
-
-EXAMPLE_PATTERNS = 
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
-# searched for input files to be used with the \include or \dontinclude 
-# commands irrespective of the value of the RECURSIVE tag. 
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or 
-# directories that contain image that are included in the documentation (see 
-# the \image command).
-
-IMAGE_PATH = 
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should 
-# invoke to filter for each input file. Doxygen will invoke the filter program 
-# by executing (via popen()) the command <filter> <input-file>, where <filter> 
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
-# input file. Doxygen will then use the output that the filter program writes 
-# to standard output.  If FILTER_PATTERNS is specified, this tag will be 
-# ignored.
-
-INPUT_FILTER = 
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
-# basis.  Doxygen will compare the file name with each pattern and apply the 
-# filter if there is a match.  The filters are a list of the form: 
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
-# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
-# is applied to all files.
-
-FILTER_PATTERNS = 
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
-# INPUT_FILTER) will be used to filter the input files when producing source 
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
-# be generated. Documented entities will be cross-referenced with these sources. 
-# Note: To get rid of all source code in the generated output, make sure also 
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body 
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
-# doxygen to hide any special comment blocks from generated source code 
-# fragments. Normal C and C++ comments will always remain visible.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
-# then for each documented function all documented 
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = YES
-
-# If the REFERENCES_RELATION tag is set to YES (the default) 
-# then for each documented function all documented entities 
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = YES
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.  Otherwise they will link to the documentstion.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code 
-# will point to the HTML generated by the htags(1) tool instead of doxygen 
-# built-in source browser. The htags tool is part of GNU's global source 
-# tagging system (see http://www.gnu.org/software/global/global.html). You 
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
-# will generate a verbatim copy of the header file for each class for 
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
-# of all compounds will be generated. Enable this if the project 
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all 
-# classes will be put under the same header in the alphabetical index. 
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX = 
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
-# generate HTML output.
-
-GENERATE_HTML = $(GENERATE_HTML)
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for 
-# each generated HTML page. If it is left blank doxygen will generate a 
-# standard header.
-
-HTML_HEADER = 
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
-# each generated HTML page. If it is left blank doxygen will generate a 
-# standard footer.
-
-HTML_FOOTER = 
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
-# style sheet that is used by each HTML page. It can be used to 
-# fine-tune the look of the HTML output. If the tag is left blank doxygen 
-# will generate a default style sheet. Note that doxygen will try to copy 
-# the style sheet file to the HTML output directory, so don't put your own 
-# stylesheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET = 
-
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
-# files or namespaces will be aligned in HTML using tables. If set to 
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
-# will be generated that can be used as input for tools like the 
-# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) 
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = $(GENERATE_HTMLHELP)
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
-# be used to specify the file name of the resulting .chm file. You 
-# can add a path in front of the file if the result should not be 
-# written to the html output directory.
-
-CHM_FILE = ../$(PROJECT).chm
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
-# be used to specify the location (absolute path including file name) of 
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION = $(HHC_PATH)
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
-# controls if a separate .chi index file is generated (YES) or that 
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = $(GENERATE_CHI)
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
-# controls whether a binary table of contents is generated (YES) or a 
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members 
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
-# top of each HTML page. The value NO (the default) enables the index and 
-# the value YES disables it.
-
-DISABLE_INDEX = NO
-
-# This tag can be used to set the number of enum values (range [1..20]) 
-# that doxygen will group on one line in the generated HTML documentation.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
-# generated containing a tree-like index structure (just like the one that 
-# is generated for HTML Help). For this to work a browser that supports 
-# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
-# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
-# probably better off using the HTML help feature.
-
-GENERATE_TREEVIEW = NO
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
-# used to set the initial width (in pixels) of the frame in which the tree 
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
-# generate Latex output.
-
-GENERATE_LATEX = $(GENERATE_LATEX)
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
-# invoked. If left blank `latex' will be used as the default command name.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
-# generate index for LaTeX. If left blank `makeindex' will be used as the 
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
-# LaTeX documents. This may be useful for small projects and may help to 
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used 
-# by the printer. Possible values are: a4, a4wide, letter, legal and 
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE = $(PAPER_SIZE)
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES = 
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
-# the generated latex document. The header should contain everything until 
-# the first chapter. If it is left blank doxygen will generate a 
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER = 
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
-# contain links (just like the HTML output) instead of page references 
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = NO
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
-# plain latex in the generated Makefile. Set this option to YES to get a 
-# higher quality PDF documentation.
-
-USE_PDFLATEX = $(GENERATE_PDF)
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
-# command to the generated LaTeX files. This will instruct LaTeX to keep 
-# running if errors occur, instead of asking the user for help. 
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
-# include the index chapters (such as File Index, Compound Index, etc.) 
-# in the output.
-
-LATEX_HIDE_INDICES = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
-# The RTF output is optimized for Word 97 and may not look very pretty with 
-# other RTF readers or editors.
-
-GENERATE_RTF = $(GENERATE_RTF)
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
-# RTF documents. This may be useful for small projects and may help to 
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
-# will contain hyperlink fields. The RTF file will 
-# contain links (just like the HTML output) instead of page references. 
-# This makes the output suitable for online browsing using WORD or other 
-# programs which support those fields. 
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's 
-# config file, i.e. a series of assignments. You only have to provide 
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE = 
-
-# Set optional variables used in the generation of an rtf document. 
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE = 
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
-# generate man pages
-
-GENERATE_MAN = $(GENERATE_MAN)
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to 
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
-# then it will generate one additional man file for each entity 
-# documented in the real man page(s). These additional files 
-# only source the real man page, but without them the man command 
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will 
-# generate an XML file that captures the structure of 
-# the code including all documentation.
-
-GENERATE_XML = $(GENERATE_XML)
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema, 
-# which can be used by a validating XML parser to check the 
-# syntax of the XML files.
-
-XML_SCHEMA = 
-
-# The XML_DTD tag can be used to specify an XML DTD, 
-# which can be used by a validating XML parser to check the 
-# syntax of the XML files.
-
-XML_DTD = 
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
-# dump the program listings (including syntax highlighting 
-# and cross-referencing information) to the XML output. Note that 
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
-# generate an AutoGen Definitions (see autogen.sf.net) file 
-# that captures the structure of the code including all 
-# documentation. Note that this feature is still experimental 
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
-# generate a Perl module file that captures the structure of 
-# the code including all documentation. Note that this 
-# feature is still experimental and incomplete at the 
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
-# nicely formatted so it can be parsed by a human reader.  This is useful 
-# if you want to understand what is going on.  On the other hand, if this 
-# tag is set to NO the size of the Perl module output will be much smaller 
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file 
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
-# This is useful so different doxyrules.make files included by the same 
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX = 
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor   
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
-# evaluate all C-preprocessor directives found in the sources and include 
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
-# names in the source code. If set to NO (the default) only conditional 
-# compilation will be performed. Macro expansion can be done in a controlled 
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
-# then the macro expansion is limited to the macros specified with the 
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that 
-# contain include files that are not input files but should be processed by 
-# the preprocessor.
-
-INCLUDE_PATH = 
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
-# patterns (like *.h and *.hpp) to filter out the header-files in the 
-# directories. If left blank, the patterns specified with FILE_PATTERNS will 
-# be used.
-
-INCLUDE_FILE_PATTERNS = 
-
-# The PREDEFINED tag can be used to specify one or more macro names that 
-# are defined before the preprocessor is started (similar to the -D option of 
-# gcc). The argument of the tag is a list of macros of the form: name 
-# or name=definition (no spaces). If the definition and the = are 
-# omitted =1 is assumed. To prevent a macro definition from being 
-# undefined via #undef or recursively expanded use the := operator 
-# instead of the = operator.
-
-PREDEFINED = 
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
-# this tag can be used to specify a list of macro names that should be expanded. 
-# The macro definition that is found in the sources will be used. 
-# Use the PREDEFINED tag if you want to use a different macro definition.
-
-EXPAND_AS_DEFINED = 
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
-# doxygen's preprocessor will remove all function-like macros that are alone 
-# on a line, have an all uppercase name, and do not end with a semicolon. Such 
-# function macros are typically used for boiler-plate code, and will confuse 
-# the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references   
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. 
-# Optionally an initial location of the external documentation 
-# can be added for each tagfile. The format of a tag file without 
-# this location is as follows: 
-#   TAGFILES = file1 file2 ... 
-# Adding location for the tag files is done as follows: 
-#   TAGFILES = file1=loc1 "file2 = loc2" ... 
-# where "loc1" and "loc2" can be relative or absolute paths or 
-# URLs. If a location is present for each tag, the installdox tool 
-# does not have to be run to correct the links.
-# Note that each tag file must have a unique name
-# (where the name does NOT include the path)
-# If a tag file is not located in the directory in which doxygen 
-# is run, you must also specify the path to the tagfile here.
-
-TAGFILES = 
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE = $(DOCDIR)/$(PROJECT).tag
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
-# in the class index. If set to NO only the inherited external classes 
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
-# in the modules index. If set to NO, only the current project's groups will 
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script 
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool   
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base 
-# or super classes. Setting the tag to NO turns the diagrams off. Note that 
-# this option is superseded by the HAVE_DOT option below. This is only a 
-# fallback. It is recommended to install and use dot, since it yields more 
-# powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# If set to YES, the inheritance and collaboration graphs will hide 
-# inheritance and usage relations if the target is undocumented 
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
-# available from the path. This tool is part of Graphviz, a graph visualization 
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = $(HAVE_DOT)
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for each documented class showing the direct and 
-# indirect inheritance relations. Setting this tag to YES will force the 
-# the CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for each documented class showing the direct and 
-# indirect implementation dependencies (inheritance, containment, and 
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
-# collaboration diagrams in a style similar to the OMG's Unified Modeling 
-# Language.
-
-UML_LOOK = NO
-
-# If set to YES, the inheritance and collaboration graphs will show the 
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
-# tags are set to YES then doxygen will generate a graph for each documented 
-# file showing the direct and indirect include dependencies of the file with 
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
-# documented header file showing the documented files that directly or 
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
-# generate a call dependency graph for every global function or class method. 
-# Note that enabling this option will significantly increase the time of a run. 
-# So in most cases it will be better to enable call graphs for selected 
-# functions only using the \callgraph command.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
-# generate a caller dependency graph for every global function or class method. 
-# Note that enabling this option will significantly increase the time of a run. 
-# So in most cases it will be better to enable caller graphs for selected 
-# functions only using the \callergraph command.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
-# will graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES 
-# then doxygen will show the dependencies a directory has on other directories 
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
-# generated by dot. Possible values are png, jpg, or gif
-# If left blank png will be used.
-
-DOT_IMAGE_FORMAT = png
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be 
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH = $(DOT_PATH)
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that 
-# contain dot files that are included in the documentation (see the 
-# \dotfile command).
-
-DOTFILE_DIRS = 
-
-# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width 
-# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
-# this value, doxygen will try to truncate the graph, so that it fits within 
-# the specified constraint. Beware that most browsers cannot cope with very 
-# large images.
-
-MAX_DOT_GRAPH_WIDTH = 1024
-
-# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height 
-# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
-# this value, doxygen will try to truncate the graph, so that it fits within 
-# the specified constraint. Beware that most browsers cannot cope with very 
-# large images.
-
-MAX_DOT_GRAPH_HEIGHT = 1024
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
-# graphs generated by dot. A depth value of 3 means that only nodes reachable 
-# from the root by following a path via at most 3 edges will be shown. Nodes 
-# that lay further from the root node will be omitted. Note that setting this 
-# option to 1 or 2 may greatly reduce the computation time needed for large 
-# code bases. Also note that a graph may be further truncated if the graph's 
-# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH 
-# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), 
-# the graph is not depth-constrained.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
-# background. This is disabled by default, which results in a white background. 
-# Warning: Depending on the platform used, enabling this option may lead to 
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to 
-# read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
-# files in one run (i.e. multiple -o and -T options on the command line). This 
-# makes dot run faster, but since only newer versions of dot (>1.8.10) 
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
-# generate a legend page explaining the meaning of the various boxes and 
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
-# remove the intermediate dot files that are used to generate 
-# the various graphs.
-
-DOT_CLEANUP = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to the search engine   
-#---------------------------------------------------------------------------
-
-# The SEARCHENGINE tag specifies whether or not a search engine should be 
-# used. If set to NO the values of all tags below this one will be ignored.
-
-SEARCHENGINE = NO

+ 0 - 56
src/contrib/hedwig/client/src/main/cpp/config.h.in

@@ -1,56 +0,0 @@
-/* config.h.in.  Generated from configure.ac by autoheader.  */
-
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#undef HAVE_DLFCN_H
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#undef HAVE_INTTYPES_H
-
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#undef HAVE_STDINT_H
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#undef HAVE_STDLIB_H
-
-/* Define to 1 if you have the <strings.h> header file. */
-#undef HAVE_STRINGS_H
-
-/* Define to 1 if you have the <string.h> header file. */
-#undef HAVE_STRING_H
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#undef HAVE_SYS_STAT_H
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#undef HAVE_SYS_TYPES_H
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#undef HAVE_UNISTD_H
-
-/* Define to the sub-directory in which libtool stores uninstalled libraries.
-   */
-#undef LT_OBJDIR
-
-/* Define to the address where bug reports for this package should be sent. */
-#undef PACKAGE_BUGREPORT
-
-/* Define to the full name of this package. */
-#undef PACKAGE_NAME
-
-/* Define to the full name and version of this package. */
-#undef PACKAGE_STRING
-
-/* Define to the one symbol short name of this package. */
-#undef PACKAGE_TARNAME
-
-/* Define to the home page for this package. */
-#undef PACKAGE_URL
-
-/* Define to the version of this package. */
-#undef PACKAGE_VERSION
-
-/* Define to 1 if you have the ANSI C header files. */
-#undef STDC_HEADERS

+ 0 - 40
src/contrib/hedwig/client/src/main/cpp/configure.ac

@@ -1,40 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AC_INIT([Hedwig C++ Client], [0.1], [zookeeper-dev@hadoop.apache.org], [hedwig-cpp], [http://hadoop.apache.org/zookeeper//])
-
-AC_PREREQ([2.59])
-AM_INIT_AUTOMAKE([1.9 no-define foreign])
-AC_CONFIG_HEADERS([config.h])
-AC_PROG_CXX
-AC_LANG([C++])
-AC_CONFIG_FILES([Makefile lib/Makefile test/Makefile hedwig-0.1.pc])
-AC_PROG_LIBTOOL
-AC_CONFIG_MACRO_DIR([m4])
-PKG_CHECK_MODULES([DEPS], [liblog4cxx protobuf cppunit])
-AX_BOOST_BASE
-AX_BOOST_ASIO	  
-AX_BOOST_THREAD
-
-DX_HTML_FEATURE(ON)
-DX_INIT_DOXYGEN(hedwig-c++, c-doc.Doxyfile, doc)
-
-CXXFLAGS="$CXXFLAGS -Wall"
-
-AC_OUTPUT
-

+ 0 - 30
src/contrib/hedwig/client/src/main/cpp/hedwig-0.1.pc.in

@@ -1,30 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: something
-Description: Some library.
-Requires: 
-Version: @PACKAGE_VERSION@
-Libs: -L${libdir} -lhedwig01
-Cflags: -I${includedir}/hedwig-0.1 -I${libdir}/hedwig-0.1/include
-

+ 0 - 45
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/callback.h

@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_CALLBACK_H
-#define HEDWIG_CALLBACK_H
-
-#include <string>
-#include <hedwig/exceptions.h>
-#include <hedwig/protocol.h>
-#include <tr1/memory>
-
-namespace Hedwig {
-  class OperationCallback {
-  public:
-    virtual void operationComplete() = 0;
-    virtual void operationFailed(const std::exception& exception) = 0;
-    
-    virtual ~OperationCallback() {};
-  };
-  typedef std::tr1::shared_ptr<OperationCallback> OperationCallbackPtr;
-
-  class MessageHandlerCallback {
-  public:
-    virtual void consume(const std::string& topic, const std::string& subscriberId, const Message& msg, OperationCallbackPtr& callback) = 0;
-    
-    virtual ~MessageHandlerCallback() {};
-  };
-  typedef std::tr1::shared_ptr<MessageHandlerCallback> MessageHandlerCallbackPtr;
-}
-
-#endif

+ 0 - 80
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/client.h

@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_CLIENT_H
-#define HEDWIG_CLIENT_H
-
-#include <string>
-#include <tr1/memory>
-
-#include <hedwig/subscribe.h>
-#include <hedwig/publish.h>
-#include <hedwig/exceptions.h>
-#include <boost/noncopyable.hpp>
-#include <boost/shared_ptr.hpp>
-
-namespace Hedwig {
-
-  class ClientImpl;
-  typedef boost::shared_ptr<ClientImpl> ClientImplPtr;
-
-  class Configuration {
-  public:
-    static const std::string DEFAULT_SERVER;
-    static const std::string MESSAGE_CONSUME_RETRY_WAIT_TIME;
-    static const std::string SUBSCRIBER_CONSUME_RETRY_WAIT_TIME;
-    static const std::string MAX_MESSAGE_QUEUE_SIZE;
-    static const std::string RECONNECT_SUBSCRIBE_RETRY_WAIT_TIME;
-    static const std::string SYNC_REQUEST_TIMEOUT;
-    static const std::string SUBSCRIBER_AUTOCONSUME;
-
-  public:
-    Configuration() {};
-    virtual int getInt(const std::string& key, int defaultVal) const = 0;
-    virtual const std::string get(const std::string& key, const std::string& defaultVal) const = 0;
-    virtual bool getBool(const std::string& key, bool defaultVal) const = 0;
-
-    virtual ~Configuration() {}
-  };
-
-  /** 
-      Main Hedwig client class. This class is used to acquire an instance of the Subscriber of Publisher.
-  */
-  class Client : private boost::noncopyable {
-  public: 
-    Client(const Configuration& conf);
-
-    /**
-       Retrieve the subscriber object
-    */
-    Subscriber& getSubscriber();
-
-    /**
-       Retrieve the publisher object
-    */
-    Publisher& getPublisher();
-
-    ~Client();
-
-  private:
-    ClientImplPtr clientimpl;
-  };
-
- 
-};
-
-#endif

+ 0 - 51
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/exceptions.h

@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_EXCEPTION_H
-#define HEDWIG_EXCEPTION_H
-
-#include <exception>
-
-namespace Hedwig {
-
-  class ClientException : public std::exception { };
-
-  class ClientTimeoutException : public ClientException {};
-
-  class ServiceDownException : public ClientException {};
-  class CannotConnectException : public ClientException {};
-  class UnexpectedResponseException : public ClientException {};
-  class OomException : public ClientException {};
-  class UnknownRequestException : public ClientException {};
-  class InvalidRedirectException : public ClientException {};
-
-  class PublisherException : public ClientException { };
-  
-
-  class SubscriberException : public ClientException { };
-  class AlreadySubscribedException : public SubscriberException {};
-  class NotSubscribedException : public SubscriberException {};
-
-  class ConfigurationException : public ClientException { };
-  class InvalidPortException : public ConfigurationException {};
-  class HostResolutionException : public ClientException {};
-  
-  class InvalidStateException : public ClientException {};
-  class ShuttingDownException : public InvalidStateException {};
-};
-
-#endif

+ 0 - 61
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/publish.h

@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_PUBLISH_H
-#define HEDWIG_PUBLISH_H
-
-#include <string>
-
-#include <hedwig/exceptions.h>
-#include <hedwig/callback.h>
-#include <hedwig/protocol.h>
-#include <boost/noncopyable.hpp>
-
-namespace Hedwig {
-
-  /**
-     Interface for publishing to a hedwig instance.
-  */
-  class Publisher : private boost::noncopyable {
-  public:
-    /**
-       Publish message for topic, and block until we receive a ACK response from the hedwig server.
-       
-       @param topic Topic to publish to.
-       @param message Data to publish for topic.
-    */
-    virtual void publish(const std::string& topic, const std::string& message) = 0;
-    
-    /** 
-	Asynchronously publish message for topic. 
-	
-	@code
-	OperationCallbackPtr callback(new MyCallback());
-	pub.asyncPublish(callback);
-	@endcode
-
-	@param topic Topic to publish to.
-	@param message Data to publish to topic
-	@param callback Callback which will be used to report success or failure. Success is only reported once the server replies with an ACK response to the publication.
-    */
-    virtual void asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback) = 0;
-    
-    virtual ~Publisher() {}
-  };
-};
-
-#endif

+ 0 - 52
src/contrib/hedwig/client/src/main/cpp/inc/hedwig/subscribe.h

@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_SUBSCRIBE_H
-#define HEDWIG_SUBSCRIBE_H
-
-#include <string>
-
-#include <hedwig/exceptions.h>
-#include <hedwig/callback.h>
-#include <hedwig/protocol.h>
-#include <boost/noncopyable.hpp>
-
-namespace Hedwig {
-
-  /**
-     Interface for subscribing to a hedwig instance. 
-  */
-  class Subscriber : private boost::noncopyable {
-  public:
-    virtual void subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode) = 0;
-    virtual void asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback) = 0;
-    
-    virtual void unsubscribe(const std::string& topic, const std::string& subscriberId) = 0;
-    virtual void asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback) = 0;  
-
-    virtual void consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId) = 0;
-
-    virtual void startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback) = 0;
-    virtual void stopDelivery(const std::string& topic, const std::string& subscriberId) = 0;
-
-    virtual void closeSubscription(const std::string& topic, const std::string& subscriberId) = 0;
-
-    virtual ~Subscriber() {}
-  };
-};
-
-#endif

+ 0 - 32
src/contrib/hedwig/client/src/main/cpp/lib/Makefile.am

@@ -1,32 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PROTODEF = ../../../../../protocol/src/main/protobuf/PubSubProtocol.proto
-
-lib_LTLIBRARIES = libhedwig01.la
-libhedwig01_la_SOURCES = protocol.cpp channel.cpp client.cpp util.cpp clientimpl.cpp publisherimpl.cpp subscriberimpl.cpp eventdispatcher.cpp data.cpp
-libhedwig01_la_CPPFLAGS = -I$(top_srcdir)/inc $(DEPS_CFLAGS)
-libhedwig01_la_LIBADD = $(DEPS_LIBS) $(BOOST_CPPFLAGS) 
-libhedwig01_la_LDFLAGS = -no-undefined $(BOOST_ASIO_LIB) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB)
-
-protocol.cpp: $(PROTODEF)
-	protoc --cpp_out=. -I`dirname $(PROTODEF)` $(PROTODEF)
-	sed "s/PubSubProtocol.pb.h/hedwig\/protocol.h/" PubSubProtocol.pb.cc > protocol.cpp
-	rm PubSubProtocol.pb.cc
-	mv PubSubProtocol.pb.h $(top_srcdir)/inc/hedwig/protocol.h
-

+ 0 - 420
src/contrib/hedwig/client/src/main/cpp/lib/channel.cpp

@@ -1,420 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <arpa/inet.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <iostream>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <errno.h>
-#include <vector>
-#include <utility>
-#include <deque>
-#include "channel.h"
-#include "util.h"
-#include "clientimpl.h"
-
-#include <log4cxx/logger.h>
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-DuplexChannel::DuplexChannel(EventDispatcher& dispatcher, const HostAddress& addr, 
-			     const Configuration& cfg, const ChannelHandlerPtr& handler)
-  : dispatcher(dispatcher), address(addr), handler(handler), 
-    socket(dispatcher.getService()), instream(&in_buf), copy_buf(NULL), copy_buf_length(0),
-    state(UNINITIALISED), receiving(false), sending(false)
-{
-  LOG4CXX_DEBUG(logger, "Creating DuplexChannel(" << this << ")");
-}
-
-/*static*/ void DuplexChannel::connectCallbackHandler(DuplexChannelPtr channel,
-						      const boost::system::error_code& error) {
-  LOG4CXX_DEBUG(logger,"DuplexChannel::connectCallbackHandler error(" << error 
-		<< ") channel(" << channel.get() << ")");
-
-  if (error) {
-    channel->channelDisconnected(ChannelConnectException());
-    channel->setState(DEAD);
-    return;
-  }
-
-  channel->setState(CONNECTED);
-
-  boost::system::error_code ec;
-  boost::asio::ip::tcp::no_delay option(true);
-
-  channel->socket.set_option(option, ec);
-  if (ec) {
-    channel->channelDisconnected(ChannelSetupException());
-    channel->setState(DEAD);
-    return;
-  } 
-  
-  channel->startSending();
-  channel->startReceiving();
-}
-
-void DuplexChannel::connect() {  
-  setState(CONNECTING);
-
-  boost::asio::ip::tcp::endpoint endp(boost::asio::ip::address_v4(address.ip()), address.port());
-  boost::system::error_code error = boost::asio::error::host_not_found;
-
-  socket.async_connect(endp, boost::bind(&DuplexChannel::connectCallbackHandler, 
-					 shared_from_this(), 
-					 boost::asio::placeholders::error)); 
-}
-
-/*static*/ void DuplexChannel::messageReadCallbackHandler(DuplexChannelPtr channel, 
-							  std::size_t message_size,
-							  const boost::system::error_code& error, 
-							  std::size_t bytes_transferred) {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::messageReadCallbackHandler " << error << ", " 
-		<< bytes_transferred << " channel(" << channel.get() << ")");
-		  
-  if (error) {
-    LOG4CXX_ERROR(logger, "Invalid read error (" << error << ") bytes_transferred (" 
-		  << bytes_transferred << ") channel(" << channel.get() << ")");
-    channel->channelDisconnected(ChannelReadException());
-    return;
-  }
-
-  if (channel->copy_buf_length < message_size) {
-    channel->copy_buf_length = message_size;
-    channel->copy_buf = (char*)realloc(channel->copy_buf, channel->copy_buf_length);
-    if (channel->copy_buf == NULL) {
-      LOG4CXX_ERROR(logger, "Error allocating buffer. channel(" << channel.get() << ")");
-      return;
-    }
-  }
-  
-  channel->instream.read(channel->copy_buf, message_size);
-  PubSubResponsePtr response(new PubSubResponse());
-  bool err = response->ParseFromArray(channel->copy_buf, message_size);
-
-
-  if (!err) {
-    LOG4CXX_ERROR(logger, "Error parsing message. channel(" << channel.get() << ")");
-
-    channel->channelDisconnected(ChannelReadException());
-    return;
-  } else {
-    LOG4CXX_DEBUG(logger,  "channel(" << channel.get() << ") : " << channel->in_buf.size() 
-		  << " bytes left in buffer");
-  }
-
-  ChannelHandlerPtr h;
-  {
-    boost::shared_lock<boost::shared_mutex> lock(channel->destruction_lock);
-    if (channel->handler.get()) {
-      h = channel->handler;
-    }
-  }
-  if (h.get()) {
-    h->messageReceived(channel, response);
-  }
-
-  DuplexChannel::readSize(channel);
-}
-
-/*static*/ void DuplexChannel::sizeReadCallbackHandler(DuplexChannelPtr channel, 
-						       const boost::system::error_code& error, 
-						       std::size_t bytes_transferred) {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::sizeReadCallbackHandler " << error << ", " 
-		<< bytes_transferred << " channel(" << channel.get() << ")");
-
-  if (error) {
-    LOG4CXX_ERROR(logger, "Invalid read error (" << error << ") bytes_transferred (" 
-		  << bytes_transferred << ") channel(" << channel.get() << ")");
-    channel->channelDisconnected(ChannelReadException());
-    return;
-  }
-  
-  if (channel->in_buf.size() < sizeof(uint32_t)) {
-    LOG4CXX_ERROR(logger, "Not enough data in stream. Must have been an error reading. " 
-		  << " Closing channel(" << channel.get() << ")");
-    channel->channelDisconnected(ChannelReadException());
-    return;
-  }
-
-  uint32_t size;
-  std::istream is(&channel->in_buf);
-  is.read((char*)&size, sizeof(uint32_t));
-  size = ntohl(size);
-
-  int toread = size - channel->in_buf.size();
-  LOG4CXX_DEBUG(logger, " size of incoming message " << size << ", currently in buffer " 
-		<< channel->in_buf.size() << " channel(" << channel.get() << ")");
-  if (toread <= 0) {
-    DuplexChannel::messageReadCallbackHandler(channel, size, error, 0);
-  } else {
-    boost::asio::async_read(channel->socket, channel->in_buf,
-			    boost::asio::transfer_at_least(toread),
-			    boost::bind(&DuplexChannel::messageReadCallbackHandler, 
-					channel, size,
-					boost::asio::placeholders::error, 
-					boost::asio::placeholders::bytes_transferred));
-  }
-}
-
-/*static*/ void DuplexChannel::readSize(DuplexChannelPtr channel) {
-  if (!channel->isReceiving()) {
-    return;
-  }
-
-  int toread = sizeof(uint32_t) - channel->in_buf.size();
-  LOG4CXX_DEBUG(logger, " size of incoming message " << sizeof(uint32_t) 
-		<< ", currently in buffer " << channel->in_buf.size() 
-		<< " channel(" << channel.get() << ")");
-
-  if (toread < 0) {
-    DuplexChannel::sizeReadCallbackHandler(channel, boost::system::error_code(), 0);
-  } else {
-    //  in_buf_size.prepare(sizeof(uint32_t));
-    boost::asio::async_read(channel->socket, channel->in_buf, 
-			    boost::asio::transfer_at_least(sizeof(uint32_t)),
-			    boost::bind(&DuplexChannel::sizeReadCallbackHandler, 
-					channel, 
-					boost::asio::placeholders::error, 
-					boost::asio::placeholders::bytes_transferred));
-  }
-}
-
-void DuplexChannel::startReceiving() {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::startReceiving channel(" << this << ") currently receiving = " << receiving);
-  
-  boost::lock_guard<boost::mutex> lock(receiving_lock);
-  if (receiving) {
-    return;
-  } 
-  receiving = true;
-  
-  DuplexChannel::readSize(shared_from_this());
-}
-
-bool DuplexChannel::isReceiving() {
-  return receiving;
-}
-
-void DuplexChannel::stopReceiving() {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::stopReceiving channel(" << this << ")");
-  
-  boost::lock_guard<boost::mutex> lock(receiving_lock);
-  receiving = false;
-}
-
-void DuplexChannel::startSending() {
-  {
-    boost::shared_lock<boost::shared_mutex> lock(state_lock);
-    if (state != CONNECTED) {
-      return;
-    }
-  }
-
-  boost::lock_guard<boost::mutex> lock(sending_lock);
-  if (sending) {
-    return;
-  }
-  LOG4CXX_DEBUG(logger, "DuplexChannel::startSending channel(" << this << ")");
-  
-  WriteRequest w;
-  { 
-    boost::lock_guard<boost::mutex> lock(write_lock);
-    if (write_queue.empty()) {
-      return;
-    }
-    w = write_queue.front();
-    write_queue.pop_front();
-  }
-
-  sending = true;
-
-  std::ostream os(&out_buf);
-  uint32_t size = htonl(w.first->ByteSize());
-  os.write((char*)&size, sizeof(uint32_t));
-  
-  bool err = w.first->SerializeToOstream(&os);
-  if (!err) {
-    w.second->operationFailed(ChannelWriteException());
-    channelDisconnected(ChannelWriteException());
-    return;
-  }
-
-  boost::asio::async_write(socket, out_buf, 
-			   boost::bind(&DuplexChannel::writeCallbackHandler, 
-				       shared_from_this(), 
-				       w.second,
-				       boost::asio::placeholders::error, 
-				       boost::asio::placeholders::bytes_transferred));
-}
-
-
-const HostAddress& DuplexChannel::getHostAddress() const {
-  return address;
-}
-
-void DuplexChannel::channelDisconnected(const std::exception& e) {
-  setState(DEAD);
-  
-  {
-    boost::lock_guard<boost::mutex> lock(write_lock);
-    while (!write_queue.empty()) {
-      WriteRequest w = write_queue.front();
-      write_queue.pop_front();
-      w.second->operationFailed(e);
-    }
-  }
-
-  ChannelHandlerPtr h;
-  {
-    boost::shared_lock<boost::shared_mutex> lock(destruction_lock);
-    if (handler.get()) {
-      h = handler;
-    }
-  }
-  if (h.get()) {
-    h->channelDisconnected(shared_from_this(), e);
-  }
-}
-
-void DuplexChannel::kill() {
-  LOG4CXX_DEBUG(logger, "Killing duplex channel (" << this << ")");
-    
-  bool connected = false;
-  {
-    boost::shared_lock<boost::shared_mutex> statelock(state_lock);
-    connected = (state == CONNECTING || state == CONNECTED);
-  }
-
-  boost::lock_guard<boost::shared_mutex> lock(destruction_lock);
-  if (connected) {
-    setState(DEAD);
-    
-    socket.cancel();
-    socket.shutdown(boost::asio::ip::tcp::socket::shutdown_both);
-    socket.close();
-  }
-  handler = ChannelHandlerPtr(); // clear the handler in case it ever referenced the channel*/
-}
-
-DuplexChannel::~DuplexChannel() {
-  /** If we are going away, fail all transactions that haven't been completed */
-  failAllTransactions();
-  kill();
-  free(copy_buf);
-  copy_buf = NULL;
-  copy_buf_length = 0;
-
-  LOG4CXX_DEBUG(logger, "Destroying DuplexChannel(" << this << ")");
-}
-
-/*static*/ void DuplexChannel::writeCallbackHandler(DuplexChannelPtr channel, OperationCallbackPtr callback,
-						    const boost::system::error_code& error, 
-						    std::size_t bytes_transferred) {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::writeCallbackHandler " << error << ", " 
-		<< bytes_transferred << " channel(" << channel.get() << ")");
-
-  if (error) {
-    callback->operationFailed(ChannelWriteException());
-    channel->channelDisconnected(ChannelWriteException());
-    return;
-  }
-
-  callback->operationComplete();
-
-  channel->out_buf.consume(bytes_transferred);
-
-  {
-    boost::lock_guard<boost::mutex> lock(channel->sending_lock);
-    channel->sending = false;
-  }
-
-  channel->startSending();
-}
-
-void DuplexChannel::writeRequest(const PubSubRequestPtr& m, const OperationCallbackPtr& callback) {
-  LOG4CXX_DEBUG(logger, "DuplexChannel::writeRequest channel(" << this << ") txnid(" 
-		<< m->txnid() << ") shouldClaim("<< m->has_shouldclaim() << ", " 
-		<< m->shouldclaim() << ")");
-
-  {
-    boost::shared_lock<boost::shared_mutex> lock(state_lock);
-    if (state != CONNECTED && state != CONNECTING) {
-      LOG4CXX_ERROR(logger,"Tried to write transaction [" << m->txnid() << "] to a channel [" 
-		    << this << "] which is " << (state == DEAD ? "DEAD" : "UNINITIALISED"));
-      callback->operationFailed(UninitialisedChannelException());
-    }
-  }
-
-  { 
-    boost::lock_guard<boost::mutex> lock(write_lock);
-    WriteRequest w(m, callback);
-    write_queue.push_back(w);
-  }
-
-  startSending();
-}
-
-/**
-   Store the transaction data for a request.
-*/
-void DuplexChannel::storeTransaction(const PubSubDataPtr& data) {
-  LOG4CXX_DEBUG(logger, "Storing txnid(" << data->getTxnId() << ") for channel(" << this << ")");
-
-  boost::lock_guard<boost::mutex> lock(txnid2data_lock);
-  txnid2data[data->getTxnId()] = data;
-}
-
-/**
-   Give the transaction back to the caller. 
-*/
-PubSubDataPtr DuplexChannel::retrieveTransaction(long txnid) {
-  boost::lock_guard<boost::mutex> lock(txnid2data_lock);
-
-  PubSubDataPtr data = txnid2data[txnid];
-  txnid2data.erase(txnid);
-  if (data == NULL) {
-    LOG4CXX_ERROR(logger, "Transaction txnid(" << txnid 
-		  << ") doesn't exist in channel (" << this << ")");
-  }
-
-  return data;
-}
-
-void DuplexChannel::failAllTransactions() {
-  boost::lock_guard<boost::mutex> lock(txnid2data_lock);
-  for (TransactionMap::iterator iter = txnid2data.begin(); iter != txnid2data.end(); ++iter) {
-    PubSubDataPtr& data = (*iter).second;
-    data->getCallback()->operationFailed(ChannelDiedException());
-  }
-  txnid2data.clear();
-}
-
-void DuplexChannel::setState(State s) {
-  boost::lock_guard<boost::shared_mutex> lock(state_lock);
-  state = s;
-}

+ 0 - 156
src/contrib/hedwig/client/src/main/cpp/lib/channel.h

@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef HEDWIG_CHANNEL_H
-#define HEDWIG_CHANNEL_H
-
-#include <hedwig/protocol.h>
-#include <hedwig/callback.h>
-#include <hedwig/client.h>
-#include "util.h"
-#include "data.h"
-#include "eventdispatcher.h"
-
-#include <tr1/memory>
-#include <tr1/unordered_map>
-
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-
-#include <boost/shared_ptr.hpp>
-#include <boost/enable_shared_from_this.hpp>
-
-#include <boost/asio/ip/tcp.hpp>
-#include <boost/thread/mutex.hpp>
-#include <boost/thread/shared_mutex.hpp>
-
-namespace Hedwig {
-  class ChannelException : public std::exception { };
-  class UninitialisedChannelException : public ChannelException {};
-
-  class ChannelConnectException : public ChannelException {};
-  class CannotCreateSocketException : public ChannelConnectException {};
-  class ChannelSetupException : public ChannelConnectException {};
-  class ChannelNotConnectedException : public ChannelConnectException {};
-
-  class ChannelDiedException : public ChannelException {};
-
-  class ChannelWriteException : public ChannelException {};
-  class ChannelReadException : public ChannelException {};
-  class ChannelThreadException : public ChannelException {};
-
-  class DuplexChannel;
-  typedef boost::shared_ptr<DuplexChannel> DuplexChannelPtr;
-
-  class ChannelHandler {
-  public:
-    virtual void messageReceived(const DuplexChannelPtr& channel, const PubSubResponsePtr& m) = 0;
-    virtual void channelConnected(const DuplexChannelPtr& channel) = 0;
-
-    virtual void channelDisconnected(const DuplexChannelPtr& channel, const std::exception& e) = 0;
-    virtual void exceptionOccurred(const DuplexChannelPtr& channel, const std::exception& e) = 0;
-
-    virtual ~ChannelHandler() {}
-  };
-
-  typedef boost::shared_ptr<ChannelHandler> ChannelHandlerPtr;
-
-
-  class DuplexChannel : public boost::enable_shared_from_this<DuplexChannel> {
-  public:
-    DuplexChannel(EventDispatcher& dispatcher, const HostAddress& addr, 
-		  const Configuration& cfg, const ChannelHandlerPtr& handler);
-    static void connectCallbackHandler(DuplexChannelPtr channel, 
-				       const boost::system::error_code& error);
-    void connect();
-
-    static void writeCallbackHandler(DuplexChannelPtr channel, OperationCallbackPtr callback, 
-				     const boost::system::error_code& error, 
-				     std::size_t bytes_transferred);
-    void writeRequest(const PubSubRequestPtr& m, const OperationCallbackPtr& callback);
-    
-    const HostAddress& getHostAddress() const;
-
-    void storeTransaction(const PubSubDataPtr& data);
-    PubSubDataPtr retrieveTransaction(long txnid);
-    void failAllTransactions();
-
-    static void sizeReadCallbackHandler(DuplexChannelPtr channel, 
-					const boost::system::error_code& error, 
-					std::size_t bytes_transferred);
-    static void messageReadCallbackHandler(DuplexChannelPtr channel, std::size_t messagesize, 
-					   const boost::system::error_code& error, 
-					   std::size_t bytes_transferred);
-    static void readSize(DuplexChannelPtr channel);
-
-    void startReceiving();
-    bool isReceiving();
-    void stopReceiving();
-    
-    void startSending();
-
-    void channelDisconnected(const std::exception& e);
-    virtual void kill();
-
-    virtual ~DuplexChannel();
-  private:
-    enum State { UNINITIALISED, CONNECTING, CONNECTED, DEAD };
-
-    void setState(State s);
-
-    EventDispatcher& dispatcher;
-
-    HostAddress address;
-    ChannelHandlerPtr handler;
-
-    boost::asio::ip::tcp::socket socket;
-    boost::asio::streambuf in_buf;
-    std::istream instream;
-    
-    // only exists because protobufs can't play nice with streams (if there's more than message len in it, it tries to read all)
-    char* copy_buf;
-    std::size_t copy_buf_length;
-
-    boost::asio::streambuf out_buf;
-    
-    typedef std::pair<PubSubRequestPtr, OperationCallbackPtr> WriteRequest;
-    boost::mutex write_lock;
-    std::deque<WriteRequest> write_queue;
-
-    State state;
-    boost::shared_mutex state_lock;
-
-    bool receiving;
-    boost::mutex receiving_lock;
-    
-    bool sending;
-    boost::mutex sending_lock;
-
-    typedef std::tr1::unordered_map<long, PubSubDataPtr> TransactionMap;
-
-    TransactionMap txnid2data;
-    boost::mutex txnid2data_lock;
-    boost::shared_mutex destruction_lock;
-  };
-  
-
-  struct DuplexChannelPtrHash : public std::unary_function<DuplexChannelPtr, size_t> {
-    size_t operator()(const Hedwig::DuplexChannelPtr& channel) const {
-      return reinterpret_cast<size_t>(channel.get());
-    }
-  };
-};
-#endif

+ 0 - 57
src/contrib/hedwig/client/src/main/cpp/lib/client.cpp

@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <hedwig/client.h>
-#include <memory>
-
-#include "clientimpl.h"
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-const std::string Configuration::DEFAULT_SERVER = "hedwig.cpp.default_server";
-const std::string Configuration::MESSAGE_CONSUME_RETRY_WAIT_TIME = "hedwig.cpp.message_consume_retry_wait_time";
-const std::string Configuration::SUBSCRIBER_CONSUME_RETRY_WAIT_TIME = "hedwig.cpp.subscriber_consume_retry_wait_time";
-const std::string Configuration::MAX_MESSAGE_QUEUE_SIZE = "hedwig.cpp.max_msgqueue_size";
-const std::string Configuration::RECONNECT_SUBSCRIBE_RETRY_WAIT_TIME = "hedwig.cpp.reconnect_subscribe_retry_wait_time";
-const std::string Configuration::SYNC_REQUEST_TIMEOUT = "hedwig.cpp.sync_request_timeout";
-const std::string Configuration::SUBSCRIBER_AUTOCONSUME = "hedwig.cpp.subscriber_autoconsume";
-
-Client::Client(const Configuration& conf) {
-  LOG4CXX_DEBUG(logger, "Client::Client (" << this << ")");
-
-  clientimpl = ClientImpl::Create( conf );
-}
-
-Subscriber& Client::getSubscriber() {
-  return clientimpl->getSubscriber();
-}
-
-Publisher& Client::getPublisher() {
-  return clientimpl->getPublisher();
-}
-
-Client::~Client() {
-  LOG4CXX_DEBUG(logger, "Client::~Client (" << this << ")");
-
-  clientimpl->Destroy();
-}
-
-

+ 0 - 376
src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.cpp

@@ -1,376 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "clientimpl.h"
-#include "channel.h"
-#include "publisherimpl.h"
-#include "subscriberimpl.h"
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-const std::string DEFAULT_SERVER_DEFAULT_VAL = "";
-
-void SyncOperationCallback::wait() {
-  boost::unique_lock<boost::mutex> lock(mut);
-  while(response==PENDING) {
-    if (cond.timed_wait(lock, boost::posix_time::milliseconds(timeout)) == false) {
-      LOG4CXX_ERROR(logger, "Timeout waiting for operation to complete " << this);
-
-      response = TIMEOUT;
-    }
-  }
-}
-
-void SyncOperationCallback::operationComplete() {
-  if (response == TIMEOUT) {
-    LOG4CXX_ERROR(logger, "operationCompleted successfully after timeout " << this);
-    return;
-  }
-
-  {
-    boost::lock_guard<boost::mutex> lock(mut);
-    response = SUCCESS;
-  }
-  cond.notify_all();
-}
-
-void SyncOperationCallback::operationFailed(const std::exception& exception) {
-  if (response == TIMEOUT) {
-    LOG4CXX_ERROR(logger, "operationCompleted unsuccessfully after timeout " << this);
-    return;
-  }
-
-  {
-    boost::lock_guard<boost::mutex> lock(mut);
-    
-    if (typeid(exception) == typeid(ChannelConnectException)) {
-      response = NOCONNECT;
-    } else if (typeid(exception) == typeid(ServiceDownException)) {
-      response = SERVICEDOWN;
-    } else if (typeid(exception) == typeid(AlreadySubscribedException)) {
-      response = ALREADY_SUBSCRIBED;
-    } else if (typeid(exception) == typeid(NotSubscribedException)) {
-      response = NOT_SUBSCRIBED;
-    } else {
-      response = UNKNOWN;
-    }
-  }
-  cond.notify_all();
-}
-
-void SyncOperationCallback::throwExceptionIfNeeded() {
-  switch (response) {
-  case SUCCESS:
-    break;
-  case NOCONNECT:
-    throw CannotConnectException();
-    break;
-  case SERVICEDOWN:
-    throw ServiceDownException();
-    break;
-  case ALREADY_SUBSCRIBED:
-    throw AlreadySubscribedException();
-    break;
-  case NOT_SUBSCRIBED:
-    throw NotSubscribedException();
-    break;
-  case TIMEOUT:
-    throw ClientTimeoutException();
-    break;
-  default:
-    throw ClientException();
-    break;
-  }
-}
-
-HedwigClientChannelHandler::HedwigClientChannelHandler(const ClientImplPtr& client) 
-  : client(client){
-}
-
-void HedwigClientChannelHandler::messageReceived(const DuplexChannelPtr& channel, const PubSubResponsePtr& m) {
-  LOG4CXX_DEBUG(logger, "Message received txnid(" << m->txnid() << ") status(" 
-		<< m->statuscode() << ")");
-  if (m->has_message()) {
-    LOG4CXX_ERROR(logger, "Subscription response, ignore for now");
-    return;
-  }
-  
-  PubSubDataPtr data = channel->retrieveTransaction(m->txnid()); 
-  /* you now have ownership of data, don't leave this funciton without deleting it or 
-     palming it off to someone else */
-
-  if (data == NULL) {
-    return;
-  }
-
-  if (m->statuscode() == NOT_RESPONSIBLE_FOR_TOPIC) {
-    client->redirectRequest(channel, data, m);
-    return;
-  }
-
-  switch (data->getType()) {
-  case PUBLISH:
-    client->getPublisherImpl().messageHandler(m, data);
-    break;
-  case SUBSCRIBE:
-  case UNSUBSCRIBE:
-    client->getSubscriberImpl().messageHandler(m, data);
-    break;
-  default:
-    LOG4CXX_ERROR(logger, "Unimplemented request type " << data->getType());
-    break;
-  }
-}
-
-
-void HedwigClientChannelHandler::channelConnected(const DuplexChannelPtr& channel) {
-  // do nothing 
-}
-
-void HedwigClientChannelHandler::channelDisconnected(const DuplexChannelPtr& channel, const std::exception& e) {
-  LOG4CXX_ERROR(logger, "Channel disconnected");
-
-  client->channelDied(channel);
-}
-
-void HedwigClientChannelHandler::exceptionOccurred(const DuplexChannelPtr& channel, const std::exception& e) {
-  LOG4CXX_ERROR(logger, "Exception occurred" << e.what());
-}
-
-ClientTxnCounter::ClientTxnCounter() : counter(0) 
-{
-}
-
-ClientTxnCounter::~ClientTxnCounter() {
-}
-
-/**
-Increment the transaction counter and return the new value.
-
-@returns the next transaction id
-*/
-long ClientTxnCounter::next() {  // would be nice to remove lock from here, look more into it
-  boost::lock_guard<boost::mutex> lock(mutex);
-
-  long next= ++counter; 
-
-  return next;
-}
-
-ClientImplPtr ClientImpl::Create(const Configuration& conf) {
-  ClientImplPtr impl(new ClientImpl(conf));
-  LOG4CXX_DEBUG(logger, "Creating Clientimpl " << impl);
-
-  impl->dispatcher.start();
-
-  return impl;
-}
-
-void ClientImpl::Destroy() {
-  LOG4CXX_DEBUG(logger, "destroying Clientimpl " << this);
-
-  dispatcher.stop();
-  {
-    boost::lock_guard<boost::shared_mutex> lock(allchannels_lock);
-    
-    shuttingDownFlag = true;
-    for (ChannelMap::iterator iter = allchannels.begin(); iter != allchannels.end(); ++iter ) {
-      (*iter)->kill();
-    }  
-    allchannels.clear();
-  }
-
-  /* destruction of the maps will clean up any items they hold */
-  
-  if (subscriber != NULL) {
-    delete subscriber;
-    subscriber = NULL;
-  }
-  if (publisher != NULL) {
-    delete publisher;
-    publisher = NULL;
-  }
-}
-
-ClientImpl::ClientImpl(const Configuration& conf) 
-  : conf(conf), publisher(NULL), subscriber(NULL), counterobj(), shuttingDownFlag(false)
-{
-}
-
-Subscriber& ClientImpl::getSubscriber() {
-  return getSubscriberImpl();
-}
-
-Publisher& ClientImpl::getPublisher() {
-  return getPublisherImpl();
-}
-    
-SubscriberImpl& ClientImpl::getSubscriberImpl() {
-  if (subscriber == NULL) {
-    boost::lock_guard<boost::mutex> lock(subscribercreate_lock);
-    if (subscriber == NULL) {
-      subscriber = new SubscriberImpl(shared_from_this());
-    }
-  }
-  return *subscriber;
-}
-
-PublisherImpl& ClientImpl::getPublisherImpl() {
-  if (publisher == NULL) { 
-    boost::lock_guard<boost::mutex> lock(publishercreate_lock);
-    if (publisher == NULL) {
-      publisher = new PublisherImpl(shared_from_this());
-    }
-  }
-  return *publisher;
-}
-
-ClientTxnCounter& ClientImpl::counter() {
-  return counterobj;
-}
-
-void ClientImpl::redirectRequest(const DuplexChannelPtr& channel, PubSubDataPtr& data, const PubSubResponsePtr& response) {
-  HostAddress oldhost = channel->getHostAddress();
-  data->addTriedServer(oldhost);
-  
-  HostAddress h = HostAddress::fromString(response->statusmsg());
-  if (data->hasTriedServer(h)) {
-    LOG4CXX_ERROR(logger, "We've been told to try request [" << data->getTxnId() << "] with [" 
-		  << h.getAddressString()<< "] by " << oldhost.getAddressString() 
-		  << " but we've already tried that. Failing operation");
-    data->getCallback()->operationFailed(InvalidRedirectException());
-    return;
-  }
-  LOG4CXX_DEBUG(logger, "We've been told  [" << data->getTopic() << "] is on [" << h.getAddressString() 
-		<< "] by [" << oldhost.getAddressString() << "]. Redirecting request " << data->getTxnId());
-  data->setShouldClaim(true);
-
-  setHostForTopic(data->getTopic(), h);
-  DuplexChannelPtr newchannel;
-  try {
-    if (data->getType() == SUBSCRIBE) {
-      SubscriberClientChannelHandlerPtr handler(new SubscriberClientChannelHandler(shared_from_this(), 
-										   this->getSubscriberImpl(), data));
-      newchannel = createChannel(data->getTopic(), handler);
-      handler->setChannel(newchannel);
-      getSubscriberImpl().doSubscribe(newchannel, data, handler);
-    } else if (data->getType() == PUBLISH) {
-      newchannel = getChannel(data->getTopic());
-      getPublisherImpl().doPublish(newchannel, data);
-    } else {
-      newchannel = getChannel(data->getTopic());
-      getSubscriberImpl().doUnsubscribe(newchannel, data);
-    }
-  } catch (ShuttingDownException& e) {
-    return; // no point in redirecting if we're shutting down
-  }
-}
-
-ClientImpl::~ClientImpl() {
-  LOG4CXX_DEBUG(logger, "deleting Clientimpl " << this);
-}
-
-DuplexChannelPtr ClientImpl::createChannel(const std::string& topic, const ChannelHandlerPtr& handler) {
-  // get the host address
-  // create a channel to the host
-  HostAddress addr = topic2host[topic];
-  if (addr.isNullHost()) {
-    addr = HostAddress::fromString(conf.get(Configuration::DEFAULT_SERVER, DEFAULT_SERVER_DEFAULT_VAL));
-    setHostForTopic(topic, addr);
-  }
-
-  DuplexChannelPtr channel(new DuplexChannel(dispatcher, addr, conf, handler));
-
-  boost::lock_guard<boost::shared_mutex> lock(allchannels_lock);
-  if (shuttingDownFlag) {
-    channel->kill();
-    throw ShuttingDownException();
-  }
-  channel->connect();
-
-  allchannels.insert(channel);
-  LOG4CXX_DEBUG(logger, "(create) All channels size: " << allchannels.size());
-
-  return channel;
-}
-
-DuplexChannelPtr ClientImpl::getChannel(const std::string& topic) {
-  HostAddress addr = topic2host[topic];
-  if (addr.isNullHost()) {
-    addr = HostAddress::fromString(conf.get(Configuration::DEFAULT_SERVER, DEFAULT_SERVER_DEFAULT_VAL));
-    setHostForTopic(topic, addr);
-  }  
-  DuplexChannelPtr channel = host2channel[addr];
-
-  if (channel.get() == 0) {
-    LOG4CXX_DEBUG(logger, " No channel for topic, creating new channel.get() " << channel.get() << " addr " << addr.getAddressString());
-    ChannelHandlerPtr handler(new HedwigClientChannelHandler(shared_from_this()));
-    channel = createChannel(topic, handler);
-
-    boost::lock_guard<boost::shared_mutex> lock(host2channel_lock);
-    host2channel[addr] = channel;
-  } 
-
-  return channel;
-}
-
-void ClientImpl::setHostForTopic(const std::string& topic, const HostAddress& host) {
-  boost::lock_guard<boost::shared_mutex> lock(topic2host_lock);
-  topic2host[topic] = host;
-}
-
-bool ClientImpl::shuttingDown() const {
-  return shuttingDownFlag;
-}
-
-/**
-   A channel has just died. Remove it so we never give it to any other publisher or subscriber.
-   
-   This does not delete the channel. Some publishers or subscribers will still hold it and will be errored
-   when they try to do anything with it. 
-*/
-void ClientImpl::channelDied(const DuplexChannelPtr& channel) {
-  if (shuttingDownFlag) {
-    return;
-  }
-
-  boost::lock_guard<boost::shared_mutex> h2tlock(host2topics_lock);
-  boost::lock_guard<boost::shared_mutex> h2clock(host2channel_lock);
-  boost::lock_guard<boost::shared_mutex> t2hlock(topic2host_lock);
-  boost::lock_guard<boost::shared_mutex> aclock(allchannels_lock);
-  // get host
-  HostAddress addr = channel->getHostAddress();
-  
-  for (Host2TopicsMap::iterator iter = host2topics.find(addr); iter != host2topics.end(); ++iter) {
-    topic2host.erase((*iter).second);
-  }
-  host2topics.erase(addr);
-  host2channel.erase(addr);
-
-  allchannels.erase(channel); // channel should be deleted here
-}
-
-const Configuration& ClientImpl::getConfiguration() {
-  return conf;
-}
-
-boost::asio::io_service& ClientImpl::getService() {
-  return dispatcher.getService();
-}

+ 0 - 150
src/contrib/hedwig/client/src/main/cpp/lib/clientimpl.h

@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HEDWIG_CLIENT_IMPL_H
-#define HEDWIG_CLIENT_IMPL_H
-
-#include <hedwig/client.h>
-#include <hedwig/protocol.h>
-
-#include <boost/asio.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/enable_shared_from_this.hpp>
-#include <boost/thread/mutex.hpp>
-#include <boost/thread/condition_variable.hpp>
-
-#include <tr1/unordered_map>
-#include <list>
-
-#include "util.h"
-#include "channel.h"
-#include "data.h"
-#include "eventdispatcher.h"
-
-namespace Hedwig {
-  const int DEFAULT_SYNC_REQUEST_TIMEOUT = 5000;
-
-  class SyncOperationCallback : public OperationCallback {
-  public:
-  SyncOperationCallback(int timeout) : response(PENDING), timeout(timeout) {}
-    virtual void operationComplete();
-    virtual void operationFailed(const std::exception& exception);
-    
-    void wait();
-    void throwExceptionIfNeeded();
-    
-  private:
-    enum { 
-      PENDING, 
-      SUCCESS,
-      NOCONNECT,
-      SERVICEDOWN,
-      NOT_SUBSCRIBED,
-      ALREADY_SUBSCRIBED,
-      TIMEOUT,
-      UNKNOWN
-    } response;
-    
-    boost::condition_variable cond;
-    boost::mutex mut;
-    int timeout;
-  };
-
-  class HedwigClientChannelHandler : public ChannelHandler {
-  public:
-    HedwigClientChannelHandler(const ClientImplPtr& client);
-    
-    virtual void messageReceived(const DuplexChannelPtr& channel, const PubSubResponsePtr& m);
-    virtual void channelConnected(const DuplexChannelPtr& channel);
-    virtual void channelDisconnected(const DuplexChannelPtr& channel, const std::exception& e);
-    virtual void exceptionOccurred(const DuplexChannelPtr& channel, const std::exception& e);
-    
-  protected:
-    const ClientImplPtr client;
-  };
-  
-  class PublisherImpl;
-  class SubscriberImpl;
-  
-  /**
-     Implementation of the hedwig client. This class takes care of globals such as the topic->host map and the transaction id counter.
-  */
-  class ClientImpl : public boost::enable_shared_from_this<ClientImpl> {
-  public:
-    static ClientImplPtr Create(const Configuration& conf);
-    void Destroy();
-
-    Subscriber& getSubscriber();
-    Publisher& getPublisher();
-
-    ClientTxnCounter& counter();
-
-    void redirectRequest(const DuplexChannelPtr& channel, PubSubDataPtr& data, const PubSubResponsePtr& response);
-
-    const HostAddress& getHostForTopic(const std::string& topic);
-
-    //DuplexChannelPtr getChannelForTopic(const std::string& topic, OperationCallback& callback);
-    //DuplexChannelPtr createChannelForTopic(const std::string& topic, ChannelHandlerPtr& handler, OperationCallback& callback);
-    DuplexChannelPtr createChannel(const std::string& topic, const ChannelHandlerPtr& handler);    
-    DuplexChannelPtr getChannel(const std::string& topic);
-
-    void setHostForTopic(const std::string& topic, const HostAddress& host);
-
-    void setChannelForHost(const HostAddress& address, const DuplexChannelPtr& channel);
-    void channelDied(const DuplexChannelPtr& channel);
-    bool shuttingDown() const;
-    
-    SubscriberImpl& getSubscriberImpl();
-    PublisherImpl& getPublisherImpl();
-
-    const Configuration& getConfiguration();
-    boost::asio::io_service& getService();
-
-    ~ClientImpl();
-  private:
-    ClientImpl(const Configuration& conf);
-
-    const Configuration& conf;
-
-    boost::mutex publishercreate_lock;
-    PublisherImpl* publisher;
-
-    boost::mutex subscribercreate_lock;
-    SubscriberImpl* subscriber;
-
-    ClientTxnCounter counterobj;
-
-    EventDispatcher dispatcher;
-    
-    typedef std::tr1::unordered_multimap<HostAddress, std::string, HostAddressHash > Host2TopicsMap;
-    Host2TopicsMap host2topics;
-    boost::shared_mutex host2topics_lock;
-
-    std::tr1::unordered_map<HostAddress, DuplexChannelPtr, HostAddressHash > host2channel;
-    boost::shared_mutex host2channel_lock;
-    std::tr1::unordered_map<std::string, HostAddress> topic2host;
-    boost::shared_mutex topic2host_lock;
-
-    typedef std::tr1::unordered_set<DuplexChannelPtr, DuplexChannelPtrHash > ChannelMap;
-    ChannelMap allchannels;
-    boost::shared_mutex allchannels_lock;
-
-    bool shuttingDownFlag;
-  };
-};
-#endif

+ 0 - 166
src/contrib/hedwig/client/src/main/cpp/lib/data.cpp

@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <hedwig/protocol.h>
-#include "data.h"
-
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-PubSubDataPtr PubSubData::forPublishRequest(long txnid, const std::string& topic, const std::string& body, const OperationCallbackPtr& callback) {
-  PubSubDataPtr ptr(new PubSubData());
-  ptr->type = PUBLISH;
-  ptr->txnid = txnid;
-  ptr->topic = topic;
-  ptr->body = body;
-  ptr->callback = callback;
-  return ptr;
-}
-
-PubSubDataPtr PubSubData::forSubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback, SubscribeRequest::CreateOrAttach mode) {
-  PubSubDataPtr ptr(new PubSubData());
-  ptr->type = SUBSCRIBE;
-  ptr->txnid = txnid;
-  ptr->subscriberid = subscriberid;
-  ptr->topic = topic;
-  ptr->callback = callback;
-  ptr->mode = mode;
-  return ptr;  
-}
-
-PubSubDataPtr PubSubData::forUnsubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback) {
-  PubSubDataPtr ptr(new PubSubData());
-  ptr->type = UNSUBSCRIBE;
-  ptr->txnid = txnid;
-  ptr->subscriberid = subscriberid;
-  ptr->topic = topic;
-  ptr->callback = callback;
-  return ptr;  
-}
-
-PubSubDataPtr PubSubData::forConsumeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const MessageSeqId msgid) {
-  PubSubDataPtr ptr(new PubSubData());
-  ptr->type = CONSUME;
-  ptr->txnid = txnid;
-  ptr->subscriberid = subscriberid;
-  ptr->topic = topic;
-  ptr->msgid = msgid;
-  return ptr;  
-}
-
-PubSubData::PubSubData() : shouldClaim(false) {  
-}
-
-PubSubData::~PubSubData() {
-}
-
-OperationType PubSubData::getType() const {
-  return type;
-}
-
-long PubSubData::getTxnId() const {
-  return txnid;
-}
-
-const std::string& PubSubData::getTopic() const {
-  return topic;
-}
-
-const std::string& PubSubData::getBody() const {
-  return body;
-}
-
-const MessageSeqId PubSubData::getMessageSeqId() const {
-  return msgid;
-}
-
-const PubSubRequestPtr PubSubData::getRequest() {
-  PubSubRequestPtr request(new Hedwig::PubSubRequest());
-  request->set_protocolversion(Hedwig::VERSION_ONE);
-  request->set_type(type);
-  request->set_txnid(txnid);
-  if (shouldClaim) {
-    request->set_shouldclaim(shouldClaim);
-  }
-  request->set_topic(topic);
-    
-  if (type == PUBLISH) {
-    LOG4CXX_DEBUG(logger, "Creating publish request");
-
-    Hedwig::PublishRequest* pubreq = request->mutable_publishrequest();
-    Hedwig::Message* msg = pubreq->mutable_msg();
-    msg->set_body(body);
-  } else if (type == SUBSCRIBE) {
-    LOG4CXX_DEBUG(logger, "Creating subscribe request");
-
-    Hedwig::SubscribeRequest* subreq = request->mutable_subscriberequest();
-    subreq->set_subscriberid(subscriberid);
-    subreq->set_createorattach(mode);
-  } else if (type == CONSUME) {
-    LOG4CXX_DEBUG(logger, "Creating consume request");
-
-    Hedwig::ConsumeRequest* conreq = request->mutable_consumerequest();
-    conreq->set_subscriberid(subscriberid);
-    conreq->mutable_msgid()->CopyFrom(msgid);
-  } else if (type == UNSUBSCRIBE) {
-    LOG4CXX_DEBUG(logger, "Creating unsubscribe request");
-    
-    Hedwig::UnsubscribeRequest* unsubreq = request->mutable_unsubscriberequest();
-    unsubreq->set_subscriberid(subscriberid);    
-  } else {
-    LOG4CXX_ERROR(logger, "Tried to create a request message for the wrong type [" << type << "]");
-    throw UnknownRequestException();
-  }
-
-  return request;
-}
-
-void PubSubData::setShouldClaim(bool shouldClaim) {
-  this->shouldClaim = shouldClaim;
-}
-
-void PubSubData::addTriedServer(HostAddress& h) {
-  triedservers.insert(h);
-}
-
-bool PubSubData::hasTriedServer(HostAddress& h) {
-  return triedservers.count(h) > 0;
-}
-
-void PubSubData::clearTriedServers() {
-  triedservers.clear();
-}
-
-OperationCallbackPtr& PubSubData::getCallback() {
-  return callback;
-}
-
-void PubSubData::setCallback(const OperationCallbackPtr& callback) {
-  this->callback = callback;
-}
-
-const std::string& PubSubData::getSubscriberId() const {
-  return subscriberid;
-}
-
-SubscribeRequest::CreateOrAttach PubSubData::getMode() const {
-  return mode;
-}

+ 0 - 99
src/contrib/hedwig/client/src/main/cpp/lib/data.h

@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DATA_H
-#define DATA_H
-
-#include <hedwig/protocol.h>
-#include <hedwig/callback.h>
-
-#include <pthread.h>
-#include <tr1/unordered_set>
-#include "util.h"
-#include <boost/shared_ptr.hpp>
-#include <boost/thread/mutex.hpp>
-
-namespace Hedwig {
-  /**
-     Simple counter for transaction ids from the client
-  */
-  class ClientTxnCounter {
-  public:
-    ClientTxnCounter();
-    ~ClientTxnCounter();
-    long next();
-    
-  private:
-    long counter;
-    boost::mutex mutex;
-  };
-
-  class PubSubData;
-  typedef boost::shared_ptr<PubSubData> PubSubDataPtr;
-  typedef boost::shared_ptr<PubSubRequest> PubSubRequestPtr;
-  typedef boost::shared_ptr<PubSubResponse> PubSubResponsePtr;
-
-  /**
-     Data structure to hold information about requests and build request messages.
-     Used to store requests which may need to be resent to another server. 
-   */
-  class PubSubData {
-  public:
-    // to be used for publish
-    static PubSubDataPtr forPublishRequest(long txnid, const std::string& topic, const std::string& body, const OperationCallbackPtr& callback);
-    static PubSubDataPtr forSubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback, SubscribeRequest::CreateOrAttach mode);
-    static PubSubDataPtr forUnsubscribeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const OperationCallbackPtr& callback);
-    static PubSubDataPtr forConsumeRequest(long txnid, const std::string& subscriberid, const std::string& topic, const MessageSeqId msgid);
-
-    ~PubSubData();
-
-    OperationType getType() const;
-    long getTxnId() const;
-    const std::string& getSubscriberId() const;
-    const std::string& getTopic() const;
-    const std::string& getBody() const;
-    const MessageSeqId getMessageSeqId() const;
-
-    void setShouldClaim(bool shouldClaim);
-
-    const PubSubRequestPtr getRequest();
-    void setCallback(const OperationCallbackPtr& callback);
-    OperationCallbackPtr& getCallback();
-    SubscribeRequest::CreateOrAttach getMode() const;
-
-    void addTriedServer(HostAddress& h);
-    bool hasTriedServer(HostAddress& h);
-    void clearTriedServers();
-  private:
-
-    PubSubData();
-    
-    OperationType type;
-    long txnid;
-    std::string subscriberid;
-    std::string topic;
-    std::string body;
-    bool shouldClaim;
-    OperationCallbackPtr callback;
-    SubscribeRequest::CreateOrAttach mode;
-    MessageSeqId msgid;
-    std::tr1::unordered_set<HostAddress, HostAddressHash > triedservers;
-  };
-  
-};
-#endif

+ 0 - 72
src/contrib/hedwig/client/src/main/cpp/lib/eventdispatcher.cpp

@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "eventdispatcher.h"
-
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-EventDispatcher::EventDispatcher() : service(), dummy_work(NULL), t(NULL) {
-}
-
-void EventDispatcher::run_forever() {
-  LOG4CXX_DEBUG(logger, "Starting event dispatcher");
-
-  while (true) {
-    try {
-      service.run();
-      break;
-    } catch (std::exception &e) {
-    LOG4CXX_ERROR(logger, "Exception in dispatch handler. " << e.what());
-    }
-  }
-  LOG4CXX_DEBUG(logger, "Event dispatcher done");
-}
-
-void EventDispatcher::start() {
-  if (t) {
-    return;
-  }
-  dummy_work = new boost::asio::io_service::work(service);
-  t = new boost::thread(boost::bind(&EventDispatcher::run_forever, this));
-}
-
-void EventDispatcher::stop() {
-  if (!t) {
-    return;
-  }
-  delete dummy_work;
-  dummy_work = NULL;
-  
-  service.stop();
-  
-  t->join();
-  delete t;
-  t = NULL;
-}
-
-EventDispatcher::~EventDispatcher() {
-  delete dummy_work;
-}
-
-boost::asio::io_service& EventDispatcher::getService() {
-  return service;
-}

+ 0 - 44
src/contrib/hedwig/client/src/main/cpp/lib/eventdispatcher.h

@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef EVENTDISPATCHER_H
-#define EVENTDISPATCHER_H
-
-#include <boost/asio.hpp>
-#include <boost/thread.hpp>
-
-namespace Hedwig {
-  class EventDispatcher {
-  public:  
-    EventDispatcher();
-    ~EventDispatcher();
-    
-    void start();
-    void stop();
-    
-    boost::asio::io_service& getService();
-    
-  private:
-    void run_forever();
-
-    boost::asio::io_service service;
-    boost::asio::io_service::work* dummy_work;
-    boost::thread* t;
-  };
-}
-
-#endif

+ 0 - 27
src/contrib/hedwig/client/src/main/cpp/lib/exceptions.cpp

@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <hedwig/exceptions.h>
-#include <stdlib.h>
-#include <string.h>
-
-using namespace Hedwig;
-
-
-
-  

+ 0 - 83
src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.cpp

@@ -1,83 +0,0 @@
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "publisherimpl.h"
-#include "channel.h"
-
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-PublishWriteCallback::PublishWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
-
-void PublishWriteCallback::operationComplete() {
-  LOG4CXX_DEBUG(logger, "Successfully wrote transaction: " << data->getTxnId());
-}
-
-void PublishWriteCallback::operationFailed(const std::exception& exception) {
-  LOG4CXX_ERROR(logger, "Error writing to publisher " << exception.what());
-  
-  data->getCallback()->operationFailed(exception);
-}
-
-PublisherImpl::PublisherImpl(const ClientImplPtr& client) 
-  : client(client) {
-}
-
-void PublisherImpl::publish(const std::string& topic, const std::string& message) {
-  SyncOperationCallback* cb = new SyncOperationCallback(client->getConfiguration().getInt(Configuration::SYNC_REQUEST_TIMEOUT, 
-											  DEFAULT_SYNC_REQUEST_TIMEOUT));
-  OperationCallbackPtr callback(cb);
-  asyncPublish(topic, message, callback);
-  cb->wait();
-  
-  cb->throwExceptionIfNeeded();  
-}
-
-void PublisherImpl::asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback) {
-  // use release after callback to release the channel after the callback is called
-  PubSubDataPtr data = PubSubData::forPublishRequest(client->counter().next(), topic, message, callback);
-  
-  DuplexChannelPtr channel = client->getChannel(topic);
-  doPublish(channel, data);
-}
-
-void PublisherImpl::doPublish(const DuplexChannelPtr& channel, const PubSubDataPtr& data) {
-  channel->storeTransaction(data);
-  
-  OperationCallbackPtr writecb(new PublishWriteCallback(client, data));
-  channel->writeRequest(data->getRequest(), writecb);
-}
-
-void PublisherImpl::messageHandler(const PubSubResponsePtr& m, const PubSubDataPtr& txn) {
-  switch (m->statuscode()) {
-  case SUCCESS:
-    txn->getCallback()->operationComplete();
-    break;
-  case SERVICE_DOWN:
-    LOG4CXX_ERROR(logger, "Server responsed with SERVICE_DOWN for " << txn->getTxnId());
-    txn->getCallback()->operationFailed(ServiceDownException());
-    break;
-  default:
-    LOG4CXX_ERROR(logger, "Unexpected response " << m->statuscode() << " for " << txn->getTxnId());
-    txn->getCallback()->operationFailed(UnexpectedResponseException());
-    break;
-  }
-}

+ 0 - 54
src/contrib/hedwig/client/src/main/cpp/lib/publisherimpl.h

@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef PUBLISHER_IMPL_H
-#define PUBLISHER_IMPL_H
-
-#include <hedwig/publish.h>
-#include <hedwig/callback.h>
-#include "clientimpl.h"
-
-namespace Hedwig {
-  class PublishWriteCallback : public OperationCallback {
-  public:
-    PublishWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data);
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-  private:
-    ClientImplPtr client;
-    PubSubDataPtr data;
-  };
-
-  class PublisherImpl : public Publisher {
-  public:
-    PublisherImpl(const ClientImplPtr& client);
-
-    void publish(const std::string& topic, const std::string& message);
-    void asyncPublish(const std::string& topic, const std::string& message, const OperationCallbackPtr& callback);
-    
-    void messageHandler(const PubSubResponsePtr& m, const PubSubDataPtr& txn);
-
-    void doPublish(const DuplexChannelPtr& channel, const PubSubDataPtr& data);
-
-  private:
-    ClientImplPtr client;
-  };
-
-};
-
-#endif

+ 0 - 434
src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.cpp

@@ -1,434 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "subscriberimpl.h"
-#include "util.h"
-#include "channel.h"
-
-#include <boost/asio.hpp>
-#include <boost/date_time/posix_time/posix_time.hpp>
-
-#include <log4cxx/logger.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-const int DEFAULT_MESSAGE_CONSUME_RETRY_WAIT_TIME = 5000;
-const int DEFAULT_SUBSCRIBER_CONSUME_RETRY_WAIT_TIME = 5000;
-const int DEFAULT_MAX_MESSAGE_QUEUE_SIZE = 10;
-const int DEFAULT_RECONNECT_SUBSCRIBE_RETRY_WAIT_TIME = 5000;
-const bool DEFAULT_SUBSCRIBER_AUTOCONSUME = true;
-
-SubscriberWriteCallback::SubscriberWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
-
-void SubscriberWriteCallback::operationComplete() {
-  LOG4CXX_DEBUG(logger, "Successfully wrote subscribe transaction: " << data->getTxnId());
-}
-
-void SubscriberWriteCallback::operationFailed(const std::exception& exception) {
-  LOG4CXX_ERROR(logger, "Error writing to subscriber " << exception.what());
-  
-  //remove txn from channel pending list
-  data->getCallback()->operationFailed(exception);
-  client->getSubscriberImpl().closeSubscription(data->getTopic(), data->getSubscriberId());
-}
-
-UnsubscribeWriteCallback::UnsubscribeWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data) : client(client), data(data) {}
-
-void UnsubscribeWriteCallback::operationComplete() {
-  LOG4CXX_DEBUG(logger, "Successfully wrote unsubscribe transaction: " << data->getTxnId());
-}
-
-void UnsubscribeWriteCallback::operationFailed(const std::exception& exception) {
-  data->getCallback()->operationFailed(exception);
-}
-  
-ConsumeWriteCallback::ConsumeWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data) 
-  : client(client), data(data) {
-}
-
-ConsumeWriteCallback::~ConsumeWriteCallback() {
-}
-
-/* static */ void ConsumeWriteCallback::timerComplete(const ClientImplPtr& client, const PubSubDataPtr& data,
-						      const boost::system::error_code& error) {
-  if (error) {
-    // shutting down
-    return;
-  }
-
-  client->getSubscriberImpl().consume(data->getTopic(), data->getSubscriberId(), data->getMessageSeqId());
-}
-
-
-void ConsumeWriteCallback::operationComplete() {
-  LOG4CXX_DEBUG(logger, "Successfully wrote consume transaction: " << data->getTxnId());
-}
-
-void ConsumeWriteCallback::operationFailed(const std::exception& exception) {
-  int retrywait = client->getConfiguration().getInt(Configuration::MESSAGE_CONSUME_RETRY_WAIT_TIME, 
-						    DEFAULT_MESSAGE_CONSUME_RETRY_WAIT_TIME);
-  LOG4CXX_ERROR(logger, "Error writing consume transaction: " << data->getTxnId() << " error: " << exception.what() 
-		<< " retrying in " << retrywait << " Microseconds");
-
-  boost::asio::deadline_timer t(client->getService(), boost::posix_time::milliseconds(retrywait));
-
-  t.async_wait(boost::bind(&ConsumeWriteCallback::timerComplete, client, data, boost::asio::placeholders::error));  
-}
-
-SubscriberConsumeCallback::SubscriberConsumeCallback(const ClientImplPtr& client, 
-						     const SubscriberClientChannelHandlerPtr& handler, 
-						     const PubSubDataPtr& data, const PubSubResponsePtr& m) 
-  : client(client), handler(handler), data(data), m(m)
-{
-}
-
-void SubscriberConsumeCallback::operationComplete() {
-  LOG4CXX_DEBUG(logger, "ConsumeCallback::operationComplete " << data->getTopic() << " - " << data->getSubscriberId());
-
-  if (client->getConfiguration().getBool(Configuration::SUBSCRIBER_AUTOCONSUME, DEFAULT_SUBSCRIBER_AUTOCONSUME)) {
-    client->getSubscriber().consume(data->getTopic(), data->getSubscriberId(), m->message().msgid());
-  }
-}
-
-/* static */ void SubscriberConsumeCallback::timerComplete(const SubscriberClientChannelHandlerPtr handler, 
-							   const PubSubResponsePtr m, 
-							   const boost::system::error_code& error) {
-  if (error) {
-    return;
-  }
-  handler->messageReceived(handler->getChannel(), m);
-}
-
-void SubscriberConsumeCallback::operationFailed(const std::exception& exception) {
-  LOG4CXX_ERROR(logger, "ConsumeCallback::operationFailed  " << data->getTopic() << " - " << data->getSubscriberId());
-  
-  int retrywait = client->getConfiguration().getInt(Configuration::SUBSCRIBER_CONSUME_RETRY_WAIT_TIME,
-						    DEFAULT_SUBSCRIBER_CONSUME_RETRY_WAIT_TIME);
-
-  LOG4CXX_ERROR(logger, "Error passing message to client transaction: " << data->getTxnId() << " error: " << exception.what() 
-		<< " retrying in " << retrywait << " Microseconds");
-
-  boost::asio::deadline_timer t(client->getService(), boost::posix_time::milliseconds(retrywait));
-
-  t.async_wait(boost::bind(&SubscriberConsumeCallback::timerComplete, handler, m, boost::asio::placeholders::error));  
-}
-
-SubscriberReconnectCallback::SubscriberReconnectCallback(const ClientImplPtr& client, const PubSubDataPtr& origData) 
-  : client(client), origData(origData) {
-}
-
-void SubscriberReconnectCallback::operationComplete() {
-}
-
-void SubscriberReconnectCallback::operationFailed(const std::exception& exception) {
-  LOG4CXX_ERROR(logger, "Error writing to new subscriber. Channel should pick this up disconnect the channel and try to connect again " << exception.what());
-
-}
-
-SubscriberClientChannelHandler::SubscriberClientChannelHandler(const ClientImplPtr& client, SubscriberImpl& subscriber, const PubSubDataPtr& data)
-  : HedwigClientChannelHandler(client), subscriber(subscriber), origData(data), closed(false), should_wait(true)  {
-  LOG4CXX_DEBUG(logger, "Creating SubscriberClientChannelHandler " << this);
-}
-
-SubscriberClientChannelHandler::~SubscriberClientChannelHandler() {
-  LOG4CXX_DEBUG(logger, "Cleaning up SubscriberClientChannelHandler " << this);
-}
-
-void SubscriberClientChannelHandler::messageReceived(const DuplexChannelPtr& channel, const PubSubResponsePtr& m) {
-  if (m->has_message()) {
-    LOG4CXX_DEBUG(logger, "Message received (topic:" << origData->getTopic() << ", subscriberId:" << origData->getSubscriberId() << ")");
-
-    if (this->handler.get()) {
-      OperationCallbackPtr callback(new SubscriberConsumeCallback(client, shared_from_this(), origData, m));
-      this->handler->consume(origData->getTopic(), origData->getSubscriberId(), m->message(), callback);
-    } else {
-      queue.push_back(m);
-      if (queue.size() >= (std::size_t)client->getConfiguration().getInt(Configuration::MAX_MESSAGE_QUEUE_SIZE,
-									 DEFAULT_MAX_MESSAGE_QUEUE_SIZE)) {
-	channel->stopReceiving();
-      }
-    }
-  } else {
-    HedwigClientChannelHandler::messageReceived(channel, m);
-  }
-}
-
-void SubscriberClientChannelHandler::close() {
-  closed = true;
-
-  if (channel) {
-    channel->kill();
-  }
-}
-
-/*static*/ void SubscriberClientChannelHandler::reconnectTimerComplete(const SubscriberClientChannelHandlerPtr handler,
-								       const DuplexChannelPtr channel, const std::exception e, 
-								       const boost::system::error_code& error) {
-  if (error) {
-    return;
-  }
-  handler->should_wait = false;
-  handler->channelDisconnected(channel, e);
-}
-
-void SubscriberClientChannelHandler::channelDisconnected(const DuplexChannelPtr& channel, const std::exception& e) {
-  // has subscription been closed
-  if (closed) {
-    return;
-  }
-
-  // Clean up the channel from all maps
-  client->channelDied(channel);
-  if (client->shuttingDown()) {
-    return;
-  }
-
-  if (should_wait) {
-    int retrywait = client->getConfiguration().getInt(Configuration::RECONNECT_SUBSCRIBE_RETRY_WAIT_TIME,
-						      DEFAULT_RECONNECT_SUBSCRIBE_RETRY_WAIT_TIME);
-    
-    boost::asio::deadline_timer t(client->getService(), boost::posix_time::milliseconds(retrywait));
-    t.async_wait(boost::bind(&SubscriberClientChannelHandler::reconnectTimerComplete, shared_from_this(), 
-			     channel, e, boost::asio::placeholders::error));  
-    return;
-  }
-  should_wait = true;
-
-  // setup pubsub data for reconnection attempt
-  origData->clearTriedServers();
-  OperationCallbackPtr newcallback(new SubscriberReconnectCallback(client, origData));
-  origData->setCallback(newcallback);
-
-  // Create a new handler for the new channel
-  SubscriberClientChannelHandlerPtr newhandler(new SubscriberClientChannelHandler(client, subscriber, origData));  
-  ChannelHandlerPtr baseptr = newhandler;
-  
-  DuplexChannelPtr newchannel = client->createChannel(origData->getTopic(), baseptr);
-  newhandler->setChannel(newchannel);
-  handoverDelivery(newhandler);
-  
-  // remove record of the failed channel from the subscriber
-  client->getSubscriberImpl().closeSubscription(origData->getTopic(), origData->getSubscriberId());
-  
-  // subscriber
-  client->getSubscriberImpl().doSubscribe(newchannel, origData, newhandler);
-}
-
-void SubscriberClientChannelHandler::startDelivery(const MessageHandlerCallbackPtr& handler) {
-  this->handler = handler;
-  
-  while (!queue.empty()) {    
-    PubSubResponsePtr m = queue.front();
-    queue.pop_front();
-
-    OperationCallbackPtr callback(new SubscriberConsumeCallback(client, shared_from_this(), origData, m));
-
-    this->handler->consume(origData->getTopic(), origData->getSubscriberId(), m->message(), callback);
-  }
-  channel->startReceiving();
-}
-
-void SubscriberClientChannelHandler::stopDelivery() {
-  channel->stopReceiving();
-
-  this->handler = MessageHandlerCallbackPtr();
-}
-
-
-void SubscriberClientChannelHandler::handoverDelivery(const SubscriberClientChannelHandlerPtr& newHandler) {
-  LOG4CXX_DEBUG(logger, "Messages in queue " << queue.size());
-  MessageHandlerCallbackPtr handler = this->handler;
-  stopDelivery(); // resets old handler
-  newHandler->startDelivery(handler);
-}
-
-void SubscriberClientChannelHandler::setChannel(const DuplexChannelPtr& channel) {
-  this->channel = channel;
-}
-
-DuplexChannelPtr& SubscriberClientChannelHandler::getChannel() {
-  return channel;
-}
-
-SubscriberImpl::SubscriberImpl(const ClientImplPtr& client) 
-  : client(client) 
-{
-}
-
-SubscriberImpl::~SubscriberImpl() 
-{
-  LOG4CXX_DEBUG(logger, "deleting subscriber" << this);
-}
-
-
-void SubscriberImpl::subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode) {
-  SyncOperationCallback* cb = new SyncOperationCallback(client->getConfiguration().getInt(Configuration::SYNC_REQUEST_TIMEOUT, 
-											  DEFAULT_SYNC_REQUEST_TIMEOUT));
-  OperationCallbackPtr callback(cb);
-  asyncSubscribe(topic, subscriberId, mode, callback);
-  cb->wait();
-  
-  cb->throwExceptionIfNeeded();  
-}
-
-void SubscriberImpl::asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback) {
-  PubSubDataPtr data = PubSubData::forSubscribeRequest(client->counter().next(), subscriberId, topic, callback, mode);
-
-  SubscriberClientChannelHandlerPtr handler(new SubscriberClientChannelHandler(client, *this, data));
-  ChannelHandlerPtr baseptr = handler;
-
-  DuplexChannelPtr channel = client->createChannel(topic, handler);
-  handler->setChannel(channel);
-  doSubscribe(channel, data, handler);
-}
-
-void SubscriberImpl::doSubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data, const SubscriberClientChannelHandlerPtr& handler) {
-  channel->storeTransaction(data);
-
-  OperationCallbackPtr writecb(new SubscriberWriteCallback(client, data));
-  channel->writeRequest(data->getRequest(), writecb);
-
-  boost::lock_guard<boost::shared_mutex> lock(topicsubscriber2handler_lock);
-  TopicSubscriber t(data->getTopic(), data->getSubscriberId());
-  SubscriberClientChannelHandlerPtr oldhandler = topicsubscriber2handler[t];
-  if (oldhandler != NULL) {
-    oldhandler->handoverDelivery(handler);
-  }
-  topicsubscriber2handler[t] = handler;
-  
-  LOG4CXX_DEBUG(logger, "Set topic subscriber for topic(" << data->getTopic() << ") subscriberId(" << data->getSubscriberId() << ") to " << handler.get() << " topicsubscriber2topic(" << &topicsubscriber2handler << ")");
-}
-
-void SubscriberImpl::unsubscribe(const std::string& topic, const std::string& subscriberId) {
-  SyncOperationCallback* cb = new SyncOperationCallback(client->getConfiguration().getInt(Configuration::SYNC_REQUEST_TIMEOUT, 
-											  DEFAULT_SYNC_REQUEST_TIMEOUT));
-  OperationCallbackPtr callback(cb);
-  asyncUnsubscribe(topic, subscriberId, callback);
-  cb->wait();
-  
-  cb->throwExceptionIfNeeded();
-}
-
-void SubscriberImpl::asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback) {
-  closeSubscription(topic, subscriberId);
-
-  PubSubDataPtr data = PubSubData::forUnsubscribeRequest(client->counter().next(), subscriberId, topic, callback);
-  
-  DuplexChannelPtr channel = client->getChannel(topic);
-  doUnsubscribe(channel, data);
-}
-
-void SubscriberImpl::doUnsubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data) {
-  channel->storeTransaction(data);
-  OperationCallbackPtr writecb(new UnsubscribeWriteCallback(client, data));
-  channel->writeRequest(data->getRequest(), writecb);
-}
-
-void SubscriberImpl::consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId) {
-  TopicSubscriber t(topic, subscriberId);
-  
-  boost::shared_lock<boost::shared_mutex> lock(topicsubscriber2handler_lock);
-  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
-
-  if (handler.get() == 0) {
-    LOG4CXX_ERROR(logger, "Cannot consume. Bad handler for topic(" << topic << ") subscriberId(" << subscriberId << ") topicsubscriber2topic(" << &topicsubscriber2handler << ")");
-    return;
-  }
-
-  DuplexChannelPtr channel = handler->getChannel();
-  if (channel.get() == 0) {
-    LOG4CXX_ERROR(logger, "Trying to consume a message on a topic/subscriber pair that don't have a channel. Something fishy going on. Topic: " << topic << " SubscriberId: " << subscriberId << " MessageSeqId: " << messageSeqId.localcomponent());
-  }
-  
-  PubSubDataPtr data = PubSubData::forConsumeRequest(client->counter().next(), subscriberId, topic, messageSeqId);  
-  OperationCallbackPtr writecb(new ConsumeWriteCallback(client, data));
-  channel->writeRequest(data->getRequest(), writecb);
-}
-
-void SubscriberImpl::startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback) {
-  TopicSubscriber t(topic, subscriberId);
-
-  boost::shared_lock<boost::shared_mutex> lock(topicsubscriber2handler_lock);
-  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
-
-  if (handler.get() == 0) {
-    LOG4CXX_ERROR(logger, "Trying to start deliver on a non existant handler topic = " << topic << ", subscriber = " << subscriberId);
-  }
-  handler->startDelivery(callback);
-}
-
-void SubscriberImpl::stopDelivery(const std::string& topic, const std::string& subscriberId) {
-  TopicSubscriber t(topic, subscriberId);
-  
-  boost::shared_lock<boost::shared_mutex> lock(topicsubscriber2handler_lock);
-  SubscriberClientChannelHandlerPtr handler = topicsubscriber2handler[t];
-
-  if (handler.get() == 0) {
-    LOG4CXX_ERROR(logger, "Trying to start deliver on a non existant handler topic = " << topic << ", subscriber = " << subscriberId);
-  }
-  handler->stopDelivery();
-}
-
-void SubscriberImpl::closeSubscription(const std::string& topic, const std::string& subscriberId) {
-  LOG4CXX_DEBUG(logger, "closeSubscription (" << topic << ",  " << subscriberId << ")");
-
-  TopicSubscriber t(topic, subscriberId);
-
-  SubscriberClientChannelHandlerPtr handler;
-  {
-    boost::lock_guard<boost::shared_mutex> lock(topicsubscriber2handler_lock);
-    handler = topicsubscriber2handler[t];
-    topicsubscriber2handler.erase(t);
-  }
-  
-  if (handler.get() != 0) {
-    handler->close();
-  }
-}
-
-/**
-   takes ownership of txn
-*/
-void SubscriberImpl::messageHandler(const PubSubResponsePtr& m, const PubSubDataPtr& txn) {
-  if (!txn.get()) {
-    LOG4CXX_ERROR(logger, "Invalid transaction");
-    return;
-  }
-
-  LOG4CXX_DEBUG(logger, "message received with status " << m->statuscode());
-
-  switch (m->statuscode()) {
-  case SUCCESS:
-    txn->getCallback()->operationComplete();
-    break;
-  case SERVICE_DOWN:
-    txn->getCallback()->operationFailed(ServiceDownException());
-    break;
-  case CLIENT_ALREADY_SUBSCRIBED:
-  case TOPIC_BUSY:
-    txn->getCallback()->operationFailed(AlreadySubscribedException());
-    break;
-  case CLIENT_NOT_SUBSCRIBED:
-    txn->getCallback()->operationFailed(NotSubscribedException());
-    break;
-  default:
-    txn->getCallback()->operationFailed(UnexpectedResponseException());
-    break;
-  }
-}

+ 0 - 166
src/contrib/hedwig/client/src/main/cpp/lib/subscriberimpl.h

@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef SUBSCRIBE_IMPL_H
-#define SUBSCRIBE_IMPL_H
-
-#include <hedwig/subscribe.h>
-#include <hedwig/callback.h>
-#include "clientimpl.h"
-#include <utility>
-#include <tr1/memory>
-#include <deque>
-
-#include <boost/shared_ptr.hpp>
-#include <boost/enable_shared_from_this.hpp>
-#include <boost/thread/shared_mutex.hpp>
-
-namespace Hedwig {
-  class SubscriberWriteCallback : public OperationCallback {
-  public:
-    SubscriberWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data);
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-  private:
-    const ClientImplPtr client;
-    const PubSubDataPtr data;
-  };
-  
-  class UnsubscribeWriteCallback : public OperationCallback {
-  public:
-    UnsubscribeWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data);
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-  private:
-    const ClientImplPtr client;
-    const PubSubDataPtr data;
-  };
-
-  class ConsumeWriteCallback : public OperationCallback {
-  public:
-    ConsumeWriteCallback(const ClientImplPtr& client, const PubSubDataPtr& data);
-    ~ConsumeWriteCallback();
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-    
-    static void timerComplete(const ClientImplPtr& client, const PubSubDataPtr& data, const boost::system::error_code& error);
-  private:
-    const ClientImplPtr client;
-    const PubSubDataPtr data;
-    };
-
-  class SubscriberReconnectCallback : public OperationCallback {
-  public: 
-    SubscriberReconnectCallback(const ClientImplPtr& client, const PubSubDataPtr& origData);
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-  private:
-    const ClientImplPtr client;
-    const PubSubDataPtr origData;
-  };
-
-  class SubscriberClientChannelHandler;
-  typedef boost::shared_ptr<SubscriberClientChannelHandler> SubscriberClientChannelHandlerPtr;
-
-  class SubscriberConsumeCallback : public OperationCallback {
-  public: 
-    SubscriberConsumeCallback(const ClientImplPtr& client, const SubscriberClientChannelHandlerPtr& handler, const PubSubDataPtr& data, const PubSubResponsePtr& m);
-
-    void operationComplete();
-    void operationFailed(const std::exception& exception);
-    static void timerComplete(const SubscriberClientChannelHandlerPtr handler, 
-			      const PubSubResponsePtr m, 
-			      const boost::system::error_code& error);
-
-  private:
-    const ClientImplPtr client;
-    const SubscriberClientChannelHandlerPtr handler;
-    
-    const PubSubDataPtr data;
-    const PubSubResponsePtr m;
-  };
-
-  class SubscriberClientChannelHandler : public HedwigClientChannelHandler, 
-					 public boost::enable_shared_from_this<SubscriberClientChannelHandler> {
-  public: 
-    SubscriberClientChannelHandler(const ClientImplPtr& client, SubscriberImpl& subscriber, const PubSubDataPtr& data);
-    ~SubscriberClientChannelHandler();
-
-    void messageReceived(const DuplexChannelPtr& channel, const PubSubResponsePtr& m);
-    void channelDisconnected(const DuplexChannelPtr& channel, const std::exception& e);
-
-    void startDelivery(const MessageHandlerCallbackPtr& handler);
-    void stopDelivery();
-
-    void handoverDelivery(const SubscriberClientChannelHandlerPtr& newHandler);
-
-    void setChannel(const DuplexChannelPtr& channel);
-    DuplexChannelPtr& getChannel();
-
-    static void reconnectTimerComplete(const SubscriberClientChannelHandlerPtr handler, const DuplexChannelPtr channel, const std::exception e, 
-				       const boost::system::error_code& error);
-
-    void close();
-  private:
-
-    SubscriberImpl& subscriber;
-    std::deque<PubSubResponsePtr> queue;
-    
-    MessageHandlerCallbackPtr handler;
-    PubSubDataPtr origData;
-    DuplexChannelPtr channel;
-    bool closed;
-    bool should_wait;
-  };
-
-  class SubscriberImpl : public Subscriber {
-  public:
-    SubscriberImpl(const ClientImplPtr& client);
-    ~SubscriberImpl();
-
-    void subscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode);
-    void asyncSubscribe(const std::string& topic, const std::string& subscriberId, const SubscribeRequest::CreateOrAttach mode, const OperationCallbackPtr& callback);
-    
-    void unsubscribe(const std::string& topic, const std::string& subscriberId);
-    void asyncUnsubscribe(const std::string& topic, const std::string& subscriberId, const OperationCallbackPtr& callback);
-
-    void consume(const std::string& topic, const std::string& subscriberId, const MessageSeqId& messageSeqId);
-
-    void startDelivery(const std::string& topic, const std::string& subscriberId, const MessageHandlerCallbackPtr& callback);
-    void stopDelivery(const std::string& topic, const std::string& subscriberId);
-
-    void closeSubscription(const std::string& topic, const std::string& subscriberId);
-
-    void messageHandler(const PubSubResponsePtr& m, const PubSubDataPtr& txn);
-
-    void doSubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data, const SubscriberClientChannelHandlerPtr& handler);
-    void doUnsubscribe(const DuplexChannelPtr& channel, const PubSubDataPtr& data);
-
-  private:
-    const ClientImplPtr client;
-    
-    std::tr1::unordered_map<TopicSubscriber, SubscriberClientChannelHandlerPtr, TopicSubscriberHash > topicsubscriber2handler;
-    boost::shared_mutex topicsubscriber2handler_lock;	    
-  };
-
-};
-
-#endif

+ 0 - 141
src/contrib/hedwig/client/src/main/cpp/lib/util.cpp

@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <string>
-
-#include <netdb.h>
-#include <errno.h>
-#include "util.h"
-#include "channel.h"
-#include <log4cxx/logger.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-
-static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("hedwig."__FILE__));
-
-using namespace Hedwig;
-
-#define MAX_HOSTNAME_LENGTH 256
-const std::string UNITIALISED_HOST("UNINITIALISED HOST");
-
-const int DEFAULT_PORT = 4080;
-const int DEFAULT_SSL_PORT = 9876;
-
-HostAddress::HostAddress() : initialised(false), address_str() {
-  memset(&socket_addr, 0, sizeof(struct sockaddr_in));
-}
-
-HostAddress::~HostAddress() {
-}
-
-bool HostAddress::isNullHost() const {
-  return !initialised;
-}
-
-bool HostAddress::operator==(const HostAddress& other) const {
-  return (other.ip() == ip() && other.port() == port());
-}
-
-const std::string& HostAddress::getAddressString() const {
-  if (!isNullHost()) {
-    return address_str;
-  } else {
-    return UNITIALISED_HOST;
-  }
-}
-   
-uint32_t HostAddress::ip() const {
-  return ntohl(socket_addr.sin_addr.s_addr);;
-}
-
-uint16_t HostAddress::port() const {
-  return ntohs(socket_addr.sin_port);
-}
-
-const struct sockaddr_in& HostAddress::socketAddress() const {
-  return socket_addr;
-}
-
-
-void HostAddress::parse_string() {
-  char* url = strdup(address_str.c_str());
-
-  if (url == NULL) {
-    LOG4CXX_ERROR(logger, "You seems to be out of memory");
-    throw OomException();
-  }
-  int port = DEFAULT_PORT;
-  int sslport = DEFAULT_SSL_PORT;
-
-  char *colon = strchr(url, ':');
-  if (colon) {
-    *colon = 0;
-    colon++;
-    
-    char* sslcolon = strchr(colon, ':');
-    if (sslcolon) {
-      *sslcolon = 0;
-      sslcolon++;
-      
-      sslport = strtol(sslcolon, NULL, 10);
-      if (sslport == 0) {
-        LOG4CXX_ERROR(logger, "Invalid SSL port given: [" << sslcolon << "]");
-	free((void*)url);
-	throw InvalidPortException();
-      }
-    }
-    
-    port = strtol(colon, NULL, 10);
-    if (port == 0) {
-      LOG4CXX_ERROR(logger, "Invalid port given: [" << colon << "]");
-      free((void*)url);
-      throw InvalidPortException();
-    }
-  }
-
-  int err = 0;
-  
-  struct addrinfo *addr;
-  struct addrinfo hints;
-
-  memset(&hints, 0, sizeof(struct addrinfo));
-  hints.ai_family = AF_INET;
-
-  err = getaddrinfo(url, NULL, &hints, &addr);
-  if (err != 0) {
-    LOG4CXX_ERROR(logger, "Couldn't resolve host [" << url << "]:" << hstrerror(err));
-    free((void*)url);
-    throw HostResolutionException();
-  }
-
-  sockaddr_in* sa_ptr = (sockaddr_in*)addr->ai_addr;
-  socket_addr = *sa_ptr;
-  socket_addr.sin_port = htons(port); 
-  //socket_addr.sin_family = AF_INET;
-
-  free((void*)url);
-  free((void*)addr);
-}
-
-HostAddress HostAddress::fromString(std::string str) {
-  HostAddress h;
-  h.address_str = str;
-  h.parse_string();
-  h.initialised = true;
-  return h;
-}
-

+ 0 - 86
src/contrib/hedwig/client/src/main/cpp/lib/util.h

@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HEDWIG_UTIL_H
-#define HEDWIG_UTIL_H
-
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <hedwig/exceptions.h>
-#include <hedwig/callback.h>
-#include <list>
-#include <iostream>
-#include <utility>
-#include <tr1/functional>
-#include <semaphore.h>
-#include <pthread.h>
-
-namespace Hedwig {
-  typedef std::pair<const std::string, const std::string> TopicSubscriber;
-
-  /**
-     Representation of a hosts address
-  */
-  class HostAddress {
-  public:
-    HostAddress();
-    ~HostAddress();
-
-    bool operator==(const HostAddress& other) const;
-    
-    bool isNullHost() const;
-    const std::string& getAddressString() const;
-    uint32_t ip() const;
-    uint16_t port() const;
-    const sockaddr_in& socketAddress() const;
-
-    static HostAddress fromString(std::string host);
-
-  private:
-
-    void parse_string();
-    
-    bool initialised;
-    std::string address_str;
-    struct sockaddr_in socket_addr;
-  };
-
-  /**
-     Hash a host address. Takes the least significant 16-bits of the address and the 16-bits of the
-     port and packs them into one 32-bit number. While collisons are theoretically very possible, they
-     shouldn't happen as the hedwig servers should be in the same subnet.
-  */
-  struct HostAddressHash : public std::unary_function<Hedwig::HostAddress, size_t> {
-    size_t operator()(const Hedwig::HostAddress& address) const {
-        return (address.ip() << 16) & (address.port());
-    }
-  };
-
-
-  /**
-     Hash a channel pointer, just returns the pointer.
-  */
-  struct TopicSubscriberHash : public std::unary_function<Hedwig::TopicSubscriber, size_t> {
-    size_t operator()(const Hedwig::TopicSubscriber& topicsub) const {
-      std::string fullstr = topicsub.first + topicsub.second;
-      return std::tr1::hash<std::string>()(fullstr);
-    }
-  };
-};
-
-#endif

+ 0 - 49
src/contrib/hedwig/client/src/main/cpp/log4cpp.conf

@@ -1,49 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# 
-#
-
-log4j.appender.rootAppender=org.apache.log4j.ConsoleAppender
-log4j.appender.rootAppender.layout=org.apache.log4j.BasicLayout
-
-#log4j.appender.hedwig=org.apache.log4j.RollingFileAppender
-log4j.appender.hedwig=org.apache.log4j.ConsoleAppender
-#log4j.appender.hedwig.fileName=./testLog.log
-log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwig.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %t %c %p - %m%n
-log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwig.layout.ConversionPattern=%.5m%n
-
-log4j.appender.hedwigtest=org.apache.log4j.ConsoleAppender
-#log4j.appender.hedwig.fileName=./testLog.log
-log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwigtest.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %c %p - %m%n
-log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwigtest.layout.ConversionPattern=%.5m%n
-
-# category
-log4j.category.hedwig=DEBUG, hedwig
-log4j.rootCategory=DEBUG
-
-log4j.category.hedwig.channel=ERROR
-log4j.category.hedwig.util=ERROR
-log4j.category.hedwigtest.servercontrol=ERROR
-
-log4j.category.hedwigtest=DEBUG, hedwigtest
-log4j.rootCategory=DEBUG

+ 0 - 111
src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_asio.m4

@@ -1,111 +0,0 @@
-# ===========================================================================
-#       http://www.gnu.org/software/autoconf-archive/ax_boost_asio.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-#   AX_BOOST_ASIO
-#
-# DESCRIPTION
-#
-#   Test for Asio library from the Boost C++ libraries. The macro requires a
-#   preceding call to AX_BOOST_BASE. Further documentation is available at
-#   <http://randspringer.de/boost/index.html>.
-#
-#   This macro calls:
-#
-#     AC_SUBST(BOOST_ASIO_LIB)
-#
-#   And sets:
-#
-#     HAVE_BOOST_ASIO
-#
-# LICENSE
-#
-#   Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
-#   Copyright (c) 2008 Pete Greenwell <pete@mu.org>
-#
-#   Copying and distribution of this file, with or without modification, are
-#   permitted in any medium without royalty provided the copyright notice
-#   and this notice are preserved. This file is offered as-is, without any
-#   warranty.
-
-#serial 9
-
-AC_DEFUN([AX_BOOST_ASIO],
-[
-	AC_ARG_WITH([boost-asio],
-	AS_HELP_STRING([--with-boost-asio@<:@=special-lib@:>@],
-                   [use the ASIO library from boost - it is possible to specify a certain library for the linker
-                        e.g. --with-boost-asio=boost_system-gcc41-mt-1_34 ]),
-        [
-        if test "$withval" = "no"; then
-			want_boost="no"
-        elif test "$withval" = "yes"; then
-            want_boost="yes"
-            ax_boost_user_asio_lib=""
-        else
-		    want_boost="yes"
-        	ax_boost_user_asio_lib="$withval"
-		fi
-        ],
-        [want_boost="yes"]
-	)
-
-	if test "x$want_boost" = "xyes"; then
-        AC_REQUIRE([AC_PROG_CC])
-		CPPFLAGS_SAVED="$CPPFLAGS"
-		CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-		export CPPFLAGS
-
-		LDFLAGS_SAVED="$LDFLAGS"
-		LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
-		export LDFLAGS
-
-        AC_CACHE_CHECK(whether the Boost::ASIO library is available,
-					   ax_cv_boost_asio,
-        [AC_LANG_PUSH([C++])
-		 AC_COMPILE_IFELSE(AC_LANG_PROGRAM([[ @%:@include <boost/asio.hpp>
-											]],
-                                  [[
-
-                                    boost::asio::io_service io;
-                                    boost::system::error_code timer_result;
-                                    boost::asio::deadline_timer t(io);
-                                    t.cancel();
-                                    io.run_one();
-									return 0;
-                                   ]]),
-                             ax_cv_boost_asio=yes, ax_cv_boost_asio=no)
-         AC_LANG_POP([C++])
-		])
-		if test "x$ax_cv_boost_asio" = "xyes"; then
-			AC_DEFINE(HAVE_BOOST_ASIO,,[define if the Boost::ASIO library is available])
-			BN=boost_system
-            if test "x$ax_boost_user_asio_lib" = "x"; then
-				for ax_lib in $BN $BN-$CC $BN-$CC-mt $BN-$CC-mt-s $BN-$CC-s \
-                              lib$BN lib$BN-$CC lib$BN-$CC-mt lib$BN-$CC-mt-s lib$BN-$CC-s \
-                              $BN-mgw $BN-mgw $BN-mgw-mt $BN-mgw-mt-s $BN-mgw-s ; do
-				    AC_CHECK_LIB($ax_lib, main, [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_thread="yes" break],
-                                 [link_thread="no"])
-  				done
-            else
-               for ax_lib in $ax_boost_user_asio_lib $BN-$ax_boost_user_asio_lib; do
-				      AC_CHECK_LIB($ax_lib, main,
-                                   [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_asio="yes" break],
-                                   [link_asio="no"])
-                  done
-
-            fi
-            if test "x$ax_lib" = "x"; then
-                AC_MSG_ERROR(Could not find a version of the library!)
-            fi
-			if test "x$link_asio" = "xno"; then
-				AC_MSG_ERROR(Could not link against $ax_lib !)
-			fi
-		fi
-
-		CPPFLAGS="$CPPFLAGS_SAVED"
-    	LDFLAGS="$LDFLAGS_SAVED"
-	fi
-])

+ 0 - 252
src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_base.m4

@@ -1,252 +0,0 @@
-# ===========================================================================
-#       http://www.gnu.org/software/autoconf-archive/ax_boost_base.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-#   AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-#
-# DESCRIPTION
-#
-#   Test for the Boost C++ libraries of a particular version (or newer)
-#
-#   If no path to the installed boost library is given the macro searchs
-#   under /usr, /usr/local, /opt and /opt/local and evaluates the
-#   $BOOST_ROOT environment variable. Further documentation is available at
-#   <http://randspringer.de/boost/index.html>.
-#
-#   This macro calls:
-#
-#     AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS)
-#
-#   And sets:
-#
-#     HAVE_BOOST
-#
-# LICENSE
-#
-#   Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
-#   Copyright (c) 2009 Peter Adolphs
-#
-#   Copying and distribution of this file, with or without modification, are
-#   permitted in any medium without royalty provided the copyright notice
-#   and this notice are preserved. This file is offered as-is, without any
-#   warranty.
-
-#serial 17
-
-AC_DEFUN([AX_BOOST_BASE],
-[
-AC_ARG_WITH([boost],
-  [AS_HELP_STRING([--with-boost@<:@=ARG@:>@],
-    [use Boost library from a standard location (ARG=yes),
-     from the specified location (ARG=<path>),
-     or disable it (ARG=no)
-     @<:@ARG=yes@:>@ ])],
-    [
-    if test "$withval" = "no"; then
-        want_boost="no"
-    elif test "$withval" = "yes"; then
-        want_boost="yes"
-        ac_boost_path=""
-    else
-        want_boost="yes"
-        ac_boost_path="$withval"
-    fi
-    ],
-    [want_boost="yes"])
-
-
-AC_ARG_WITH([boost-libdir],
-        AS_HELP_STRING([--with-boost-libdir=LIB_DIR],
-        [Force given directory for boost libraries. Note that this will overwrite library path detection, so use this parameter only if default library detection fails and you know exactly where your boost libraries are located.]),
-        [
-        if test -d "$withval"
-        then
-                ac_boost_lib_path="$withval"
-        else
-                AC_MSG_ERROR(--with-boost-libdir expected directory name)
-        fi
-        ],
-        [ac_boost_lib_path=""]
-)
-
-if test "x$want_boost" = "xyes"; then
-    boost_lib_version_req=ifelse([$1], ,1.20.0,$1)
-    boost_lib_version_req_shorten=`expr $boost_lib_version_req : '\([[0-9]]*\.[[0-9]]*\)'`
-    boost_lib_version_req_major=`expr $boost_lib_version_req : '\([[0-9]]*\)'`
-    boost_lib_version_req_minor=`expr $boost_lib_version_req : '[[0-9]]*\.\([[0-9]]*\)'`
-    boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
-    if test "x$boost_lib_version_req_sub_minor" = "x" ; then
-        boost_lib_version_req_sub_minor="0"
-        fi
-    WANT_BOOST_VERSION=`expr $boost_lib_version_req_major \* 100000 \+  $boost_lib_version_req_minor \* 100 \+ $boost_lib_version_req_sub_minor`
-    AC_MSG_CHECKING(for boostlib >= $boost_lib_version_req)
-    succeeded=no
-
-    dnl On x86_64 systems check for system libraries in both lib64 and lib.
-    dnl The former is specified by FHS, but e.g. Debian does not adhere to
-    dnl this (as it rises problems for generic multi-arch support).
-    dnl The last entry in the list is chosen by default when no libraries
-    dnl are found, e.g. when only header-only libraries are installed!
-    libsubdirs="lib"
-    if test `uname -m` = x86_64; then
-        libsubdirs="lib64 lib lib64"
-    fi
-
-    dnl first we check the system location for boost libraries
-    dnl this location ist chosen if boost libraries are installed with the --layout=system option
-    dnl or if you install boost with RPM
-    if test "$ac_boost_path" != ""; then
-        BOOST_LDFLAGS="-L$ac_boost_path/$libsubdir"
-        BOOST_CPPFLAGS="-I$ac_boost_path/include"
-    elif test "$cross_compiling" != yes; then
-        for ac_boost_path_tmp in /usr /usr/local /opt /opt/local ; do
-            if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then
-                for libsubdir in $libsubdirs ; do
-                    if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
-                done
-                BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir"
-                BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include"
-                break;
-            fi
-        done
-    fi
-
-    dnl overwrite ld flags if we have required special directory with
-    dnl --with-boost-libdir parameter
-    if test "$ac_boost_lib_path" != ""; then
-       BOOST_LDFLAGS="-L$ac_boost_lib_path"
-    fi
-
-    CPPFLAGS_SAVED="$CPPFLAGS"
-    CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-    export CPPFLAGS
-
-    LDFLAGS_SAVED="$LDFLAGS"
-    LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
-    export LDFLAGS
-
-    AC_REQUIRE([AC_PROG_CXX])
-    AC_LANG_PUSH(C++)
-        AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
-    @%:@include <boost/version.hpp>
-    ]], [[
-    #if BOOST_VERSION >= $WANT_BOOST_VERSION
-    // Everything is okay
-    #else
-    #  error Boost version is too old
-    #endif
-    ]])],[
-        AC_MSG_RESULT(yes)
-    succeeded=yes
-    found_system=yes
-        ],[
-        ])
-    AC_LANG_POP([C++])
-
-
-
-    dnl if we found no boost with system layout we search for boost libraries
-    dnl built and installed without the --layout=system option or for a staged(not installed) version
-    if test "x$succeeded" != "xyes"; then
-        _version=0
-        if test "$ac_boost_path" != ""; then
-            if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
-                for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
-                    _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
-                    V_CHECK=`expr $_version_tmp \> $_version`
-                    if test "$V_CHECK" = "1" ; then
-                        _version=$_version_tmp
-                    fi
-                    VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
-                    BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE"
-                done
-            fi
-        else
-            if test "$cross_compiling" != yes; then
-                for ac_boost_path in /usr /usr/local /opt /opt/local ; do
-                    if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
-                        for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
-                            _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
-                            V_CHECK=`expr $_version_tmp \> $_version`
-                            if test "$V_CHECK" = "1" ; then
-                                _version=$_version_tmp
-                                best_path=$ac_boost_path
-                            fi
-                        done
-                    fi
-                done
-
-                VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
-                BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE"
-                if test "$ac_boost_lib_path" = ""; then
-                    for libsubdir in $libsubdirs ; do
-                        if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
-                    done
-                    BOOST_LDFLAGS="-L$best_path/$libsubdir"
-                fi
-            fi
-
-            if test "x$BOOST_ROOT" != "x"; then
-                for libsubdir in $libsubdirs ; do
-                    if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
-                done
-                if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then
-                    version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'`
-                    stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'`
-                        stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'`
-                    V_CHECK=`expr $stage_version_shorten \>\= $_version`
-                    if test "$V_CHECK" = "1" -a "$ac_boost_lib_path" = "" ; then
-                        AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT)
-                        BOOST_CPPFLAGS="-I$BOOST_ROOT"
-                        BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir"
-                    fi
-                fi
-            fi
-        fi
-
-        CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-        export CPPFLAGS
-        LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
-        export LDFLAGS
-
-        AC_LANG_PUSH(C++)
-            AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
-        @%:@include <boost/version.hpp>
-        ]], [[
-        #if BOOST_VERSION >= $WANT_BOOST_VERSION
-        // Everything is okay
-        #else
-        #  error Boost version is too old
-        #endif
-        ]])],[
-            AC_MSG_RESULT(yes)
-        succeeded=yes
-        found_system=yes
-            ],[
-            ])
-        AC_LANG_POP([C++])
-    fi
-
-    if test "$succeeded" != "yes" ; then
-        if test "$_version" = "0" ; then
-            AC_MSG_NOTICE([[We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option.  If you are sure you have boost installed, then check your version number looking in <boost/version.hpp>. See http://randspringer.de/boost for more documentation.]])
-        else
-            AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).])
-        fi
-        # execute ACTION-IF-NOT-FOUND (if present):
-        ifelse([$3], , :, [$3])
-    else
-        AC_SUBST(BOOST_CPPFLAGS)
-        AC_SUBST(BOOST_LDFLAGS)
-        AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available])
-        # execute ACTION-IF-FOUND (if present):
-        ifelse([$2], , :, [$2])
-    fi
-
-    CPPFLAGS="$CPPFLAGS_SAVED"
-    LDFLAGS="$LDFLAGS_SAVED"
-fi
-
-])

+ 0 - 149
src/contrib/hedwig/client/src/main/cpp/m4/ax_boost_thread.m4

@@ -1,149 +0,0 @@
-# ===========================================================================
-#      http://www.gnu.org/software/autoconf-archive/ax_boost_thread.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-#   AX_BOOST_THREAD
-#
-# DESCRIPTION
-#
-#   Test for Thread library from the Boost C++ libraries. The macro requires
-#   a preceding call to AX_BOOST_BASE. Further documentation is available at
-#   <http://randspringer.de/boost/index.html>.
-#
-#   This macro calls:
-#
-#     AC_SUBST(BOOST_THREAD_LIB)
-#
-#   And sets:
-#
-#     HAVE_BOOST_THREAD
-#
-# LICENSE
-#
-#   Copyright (c) 2009 Thomas Porschberg <thomas@randspringer.de>
-#   Copyright (c) 2009 Michael Tindal
-#
-#   Copying and distribution of this file, with or without modification, are
-#   permitted in any medium without royalty provided the copyright notice
-#   and this notice are preserved. This file is offered as-is, without any
-#   warranty.
-
-#serial 17
-
-AC_DEFUN([AX_BOOST_THREAD],
-[
-	AC_ARG_WITH([boost-thread],
-	AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@],
-                   [use the Thread library from boost - it is possible to specify a certain library for the linker
-                        e.g. --with-boost-thread=boost_thread-gcc-mt ]),
-        [
-        if test "$withval" = "no"; then
-			want_boost="no"
-        elif test "$withval" = "yes"; then
-            want_boost="yes"
-            ax_boost_user_thread_lib=""
-        else
-		    want_boost="yes"
-        	ax_boost_user_thread_lib="$withval"
-		fi
-        ],
-        [want_boost="yes"]
-	)
-
-	if test "x$want_boost" = "xyes"; then
-        AC_REQUIRE([AC_PROG_CC])
-        AC_REQUIRE([AC_CANONICAL_BUILD])
-		CPPFLAGS_SAVED="$CPPFLAGS"
-		CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-		export CPPFLAGS
-
-		LDFLAGS_SAVED="$LDFLAGS"
-		LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
-		export LDFLAGS
-
-        AC_CACHE_CHECK(whether the Boost::Thread library is available,
-					   ax_cv_boost_thread,
-        [AC_LANG_PUSH([C++])
-			 CXXFLAGS_SAVE=$CXXFLAGS
-
-			 if test "x$build_os" = "xsolaris" ; then
-  				 CXXFLAGS="-pthreads $CXXFLAGS"
-			 elif test "x$build_os" = "xming32" ; then
-				 CXXFLAGS="-mthreads $CXXFLAGS"
-			 else
-				CXXFLAGS="-pthread $CXXFLAGS"
-			 fi
-			 AC_COMPILE_IFELSE(AC_LANG_PROGRAM([[@%:@include <boost/thread/thread.hpp>]],
-                                   [[boost::thread_group thrds;
-                                   return 0;]]),
-                   ax_cv_boost_thread=yes, ax_cv_boost_thread=no)
-			 CXXFLAGS=$CXXFLAGS_SAVE
-             AC_LANG_POP([C++])
-		])
-		if test "x$ax_cv_boost_thread" = "xyes"; then
-           if test "x$build_os" = "xsolaris" ; then
-			  BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS"
-		   elif test "x$build_os" = "xming32" ; then
-			  BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS"
-		   else
-			  BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS"
-		   fi
-
-			AC_SUBST(BOOST_CPPFLAGS)
-
-			AC_DEFINE(HAVE_BOOST_THREAD,,[define if the Boost::Thread library is available])
-            BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
-
-			LDFLAGS_SAVE=$LDFLAGS
-                        case "x$build_os" in
-                          *bsd* )
-                               LDFLAGS="-pthread $LDFLAGS"
-                          break;
-                          ;;
-                        esac
-            if test "x$ax_boost_user_thread_lib" = "x"; then
-                for libextension in `ls $BOOSTLIBDIR/libboost_thread*.so* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_thread.*\)\.so.*$;\1;'` `ls $BOOSTLIBDIR/libboost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_thread.*\)\.a*$;\1;'`; do
-                     ax_lib=${libextension}
-				    AC_CHECK_LIB($ax_lib, exit,
-                                 [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
-                                 [link_thread="no"])
-  				done
-                if test "x$link_thread" != "xyes"; then
-                for libextension in `ls $BOOSTLIBDIR/boost_thread*.dll* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_thread.*\)\.dll.*$;\1;'` `ls $BOOSTLIBDIR/boost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_thread.*\)\.a*$;\1;'` ; do
-                     ax_lib=${libextension}
-				    AC_CHECK_LIB($ax_lib, exit,
-                                 [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
-                                 [link_thread="no"])
-  				done
-                fi
-
-            else
-               for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do
-				      AC_CHECK_LIB($ax_lib, exit,
-                                   [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
-                                   [link_thread="no"])
-                  done
-
-            fi
-            if test "x$ax_lib" = "x"; then
-                AC_MSG_ERROR(Could not find a version of the library!)
-            fi
-			if test "x$link_thread" = "xno"; then
-				AC_MSG_ERROR(Could not link against $ax_lib !)
-                        else
-                           case "x$build_os" in
-                              *bsd* )
-			        BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS"
-                              break;
-                              ;;
-                           esac
-
-			fi
-		fi
-
-		CPPFLAGS="$CPPFLAGS_SAVED"
-    	LDFLAGS="$LDFLAGS_SAVED"
-	fi
-])

+ 0 - 533
src/contrib/hedwig/client/src/main/cpp/m4/ax_doxygen.m4

@@ -1,533 +0,0 @@
-# ===========================================================================
-#      http://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-#   DX_INIT_DOXYGEN(PROJECT-NAME, DOXYFILE-PATH, [OUTPUT-DIR])
-#   DX_DOXYGEN_FEATURE(ON|OFF)
-#   DX_DOT_FEATURE(ON|OFF)
-#   DX_HTML_FEATURE(ON|OFF)
-#   DX_CHM_FEATURE(ON|OFF)
-#   DX_CHI_FEATURE(ON|OFF)
-#   DX_MAN_FEATURE(ON|OFF)
-#   DX_RTF_FEATURE(ON|OFF)
-#   DX_XML_FEATURE(ON|OFF)
-#   DX_PDF_FEATURE(ON|OFF)
-#   DX_PS_FEATURE(ON|OFF)
-#
-# DESCRIPTION
-#
-#   The DX_*_FEATURE macros control the default setting for the given
-#   Doxygen feature. Supported features are 'DOXYGEN' itself, 'DOT' for
-#   generating graphics, 'HTML' for plain HTML, 'CHM' for compressed HTML
-#   help (for MS users), 'CHI' for generating a seperate .chi file by the
-#   .chm file, and 'MAN', 'RTF', 'XML', 'PDF' and 'PS' for the appropriate
-#   output formats. The environment variable DOXYGEN_PAPER_SIZE may be
-#   specified to override the default 'a4wide' paper size.
-#
-#   By default, HTML, PDF and PS documentation is generated as this seems to
-#   be the most popular and portable combination. MAN pages created by
-#   Doxygen are usually problematic, though by picking an appropriate subset
-#   and doing some massaging they might be better than nothing. CHM and RTF
-#   are specific for MS (note that you can't generate both HTML and CHM at
-#   the same time). The XML is rather useless unless you apply specialized
-#   post-processing to it.
-#
-#   The macros mainly control the default state of the feature. The use can
-#   override the default by specifying --enable or --disable. The macros
-#   ensure that contradictory flags are not given (e.g.,
-#   --enable-doxygen-html and --enable-doxygen-chm,
-#   --enable-doxygen-anything with --disable-doxygen, etc.) Finally, each
-#   feature will be automatically disabled (with a warning) if the required
-#   programs are missing.
-#
-#   Once all the feature defaults have been specified, call DX_INIT_DOXYGEN
-#   with the following parameters: a one-word name for the project for use
-#   as a filename base etc., an optional configuration file name (the
-#   default is 'Doxyfile', the same as Doxygen's default), and an optional
-#   output directory name (the default is 'doxygen-doc').
-#
-#   Automake Support
-#
-#   The following is a template aminclude.am file for use with Automake.
-#   Make targets and variables values are controlled by the various
-#   DX_COND_* conditionals set by autoconf.
-#
-#   The provided targets are:
-#
-#     doxygen-doc: Generate all doxygen documentation.
-#
-#     doxygen-run: Run doxygen, which will generate some of the
-#                  documentation (HTML, CHM, CHI, MAN, RTF, XML)
-#                  but will not do the post processing required
-#                  for the rest of it (PS, PDF, and some MAN).
-#
-#     doxygen-man: Rename some doxygen generated man pages.
-#
-#     doxygen-ps:  Generate doxygen PostScript documentation.
-#
-#     doxygen-pdf: Generate doxygen PDF documentation.
-#
-#   Note that by default these are not integrated into the automake targets.
-#   If doxygen is used to generate man pages, you can achieve this
-#   integration by setting man3_MANS to the list of man pages generated and
-#   then adding the dependency:
-#
-#     $(man3_MANS): doxygen-doc
-#
-#   This will cause make to run doxygen and generate all the documentation.
-#
-#   The following variable is intended for use in Makefile.am:
-#
-#     DX_CLEANFILES = everything to clean.
-#
-#   Then add this variable to MOSTLYCLEANFILES.
-#
-#     ----- begin aminclude.am -------------------------------------
-#
-#     ## --------------------------------- ##
-#     ## Format-independent Doxygen rules. ##
-#     ## --------------------------------- ##
-#
-#     if DX_COND_doc
-#
-#     ## ------------------------------- ##
-#     ## Rules specific for HTML output. ##
-#     ## ------------------------------- ##
-#
-#     if DX_COND_html
-#
-#     DX_CLEAN_HTML = @DX_DOCDIR@/html
-#
-#     endif DX_COND_html
-#
-#     ## ------------------------------ ##
-#     ## Rules specific for CHM output. ##
-#     ## ------------------------------ ##
-#
-#     if DX_COND_chm
-#
-#     DX_CLEAN_CHM = @DX_DOCDIR@/chm
-#
-#     if DX_COND_chi
-#
-#     DX_CLEAN_CHI = @DX_DOCDIR@/@PACKAGE@.chi
-#
-#     endif DX_COND_chi
-#
-#     endif DX_COND_chm
-#
-#     ## ------------------------------ ##
-#     ## Rules specific for MAN output. ##
-#     ## ------------------------------ ##
-#
-#     if DX_COND_man
-#
-#     DX_CLEAN_MAN = @DX_DOCDIR@/man
-#
-#     endif DX_COND_man
-#
-#     ## ------------------------------ ##
-#     ## Rules specific for RTF output. ##
-#     ## ------------------------------ ##
-#
-#     if DX_COND_rtf
-#
-#     DX_CLEAN_RTF = @DX_DOCDIR@/rtf
-#
-#     endif DX_COND_rtf
-#
-#     ## ------------------------------ ##
-#     ## Rules specific for XML output. ##
-#     ## ------------------------------ ##
-#
-#     if DX_COND_xml
-#
-#     DX_CLEAN_XML = @DX_DOCDIR@/xml
-#
-#     endif DX_COND_xml
-#
-#     ## ----------------------------- ##
-#     ## Rules specific for PS output. ##
-#     ## ----------------------------- ##
-#
-#     if DX_COND_ps
-#
-#     DX_CLEAN_PS = @DX_DOCDIR@/@PACKAGE@.ps
-#
-#     DX_PS_GOAL = doxygen-ps
-#
-#     doxygen-ps: @DX_DOCDIR@/@PACKAGE@.ps
-#
-#     @DX_DOCDIR@/@PACKAGE@.ps: @DX_DOCDIR@/@PACKAGE@.tag
-#         cd @DX_DOCDIR@/latex; \
-#         rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
-#         $(DX_LATEX) refman.tex; \
-#         $(MAKEINDEX_PATH) refman.idx; \
-#         $(DX_LATEX) refman.tex; \
-#         countdown=5; \
-#         while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
-#                           refman.log > /dev/null 2>&1 \
-#            && test $$countdown -gt 0; do \
-#             $(DX_LATEX) refman.tex; \
-#             countdown=`expr $$countdown - 1`; \
-#         done; \
-#         $(DX_DVIPS) -o ../@PACKAGE@.ps refman.dvi
-#
-#     endif DX_COND_ps
-#
-#     ## ------------------------------ ##
-#     ## Rules specific for PDF output. ##
-#     ## ------------------------------ ##
-#
-#     if DX_COND_pdf
-#
-#     DX_CLEAN_PDF = @DX_DOCDIR@/@PACKAGE@.pdf
-#
-#     DX_PDF_GOAL = doxygen-pdf
-#
-#     doxygen-pdf: @DX_DOCDIR@/@PACKAGE@.pdf
-#
-#     @DX_DOCDIR@/@PACKAGE@.pdf: @DX_DOCDIR@/@PACKAGE@.tag
-#         cd @DX_DOCDIR@/latex; \
-#         rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
-#         $(DX_PDFLATEX) refman.tex; \
-#         $(DX_MAKEINDEX) refman.idx; \
-#         $(DX_PDFLATEX) refman.tex; \
-#         countdown=5; \
-#         while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
-#                           refman.log > /dev/null 2>&1 \
-#            && test $$countdown -gt 0; do \
-#             $(DX_PDFLATEX) refman.tex; \
-#             countdown=`expr $$countdown - 1`; \
-#         done; \
-#         mv refman.pdf ../@PACKAGE@.pdf
-#
-#     endif DX_COND_pdf
-#
-#     ## ------------------------------------------------- ##
-#     ## Rules specific for LaTeX (shared for PS and PDF). ##
-#     ## ------------------------------------------------- ##
-#
-#     if DX_COND_latex
-#
-#     DX_CLEAN_LATEX = @DX_DOCDIR@/latex
-#
-#     endif DX_COND_latex
-#
-#     .PHONY: doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
-#
-#     .INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-#
-#     doxygen-run: @DX_DOCDIR@/@PACKAGE@.tag
-#
-#     doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-#
-#     @DX_DOCDIR@/@PACKAGE@.tag: $(DX_CONFIG) $(pkginclude_HEADERS)
-#         rm -rf @DX_DOCDIR@
-#         $(DX_ENV) $(DX_DOXYGEN) $(srcdir)/$(DX_CONFIG)
-#
-#     DX_CLEANFILES = \
-#         @DX_DOCDIR@/@PACKAGE@.tag \
-#         -r \
-#         $(DX_CLEAN_HTML) \
-#         $(DX_CLEAN_CHM) \
-#         $(DX_CLEAN_CHI) \
-#         $(DX_CLEAN_MAN) \
-#         $(DX_CLEAN_RTF) \
-#         $(DX_CLEAN_XML) \
-#         $(DX_CLEAN_PS) \
-#         $(DX_CLEAN_PDF) \
-#         $(DX_CLEAN_LATEX)
-#
-#     endif DX_COND_doc
-#
-#     ----- end aminclude.am ---------------------------------------
-#
-# LICENSE
-#
-#   Copyright (c) 2009 Oren Ben-Kiki <oren@ben-kiki.org>
-#
-#   Copying and distribution of this file, with or without modification, are
-#   permitted in any medium without royalty provided the copyright notice
-#   and this notice are preserved. This file is offered as-is, without any
-#   warranty.
-
-#serial 10
-
-## ----------##
-## Defaults. ##
-## ----------##
-
-DX_ENV=""
-AC_DEFUN([DX_FEATURE_doc],  ON)
-AC_DEFUN([DX_FEATURE_dot],  ON)
-AC_DEFUN([DX_FEATURE_man],  OFF)
-AC_DEFUN([DX_FEATURE_html], ON)
-AC_DEFUN([DX_FEATURE_chm],  OFF)
-AC_DEFUN([DX_FEATURE_chi],  OFF)
-AC_DEFUN([DX_FEATURE_rtf],  OFF)
-AC_DEFUN([DX_FEATURE_xml],  OFF)
-AC_DEFUN([DX_FEATURE_pdf],  ON)
-AC_DEFUN([DX_FEATURE_ps],   ON)
-
-## --------------- ##
-## Private macros. ##
-## --------------- ##
-
-# DX_ENV_APPEND(VARIABLE, VALUE)
-# ------------------------------
-# Append VARIABLE="VALUE" to DX_ENV for invoking doxygen.
-AC_DEFUN([DX_ENV_APPEND], [AC_SUBST([DX_ENV], ["$DX_ENV $1='$2'"])])
-
-# DX_DIRNAME_EXPR
-# ---------------
-# Expand into a shell expression prints the directory part of a path.
-AC_DEFUN([DX_DIRNAME_EXPR],
-         [[expr ".$1" : '\(\.\)[^/]*$' \| "x$1" : 'x\(.*\)/[^/]*$']])
-
-# DX_IF_FEATURE(FEATURE, IF-ON, IF-OFF)
-# -------------------------------------
-# Expands according to the M4 (static) status of the feature.
-AC_DEFUN([DX_IF_FEATURE], [ifelse(DX_FEATURE_$1, ON, [$2], [$3])])
-
-# DX_REQUIRE_PROG(VARIABLE, PROGRAM)
-# ----------------------------------
-# Require the specified program to be found for the DX_CURRENT_FEATURE to work.
-AC_DEFUN([DX_REQUIRE_PROG], [
-AC_PATH_TOOL([$1], [$2])
-if test "$DX_FLAG_[]DX_CURRENT_FEATURE$$1" = 1; then
-    AC_MSG_WARN([$2 not found - will not DX_CURRENT_DESCRIPTION])
-    AC_SUBST(DX_FLAG_[]DX_CURRENT_FEATURE, 0)
-fi
-])
-
-# DX_TEST_FEATURE(FEATURE)
-# ------------------------
-# Expand to a shell expression testing whether the feature is active.
-AC_DEFUN([DX_TEST_FEATURE], [test "$DX_FLAG_$1" = 1])
-
-# DX_CHECK_DEPEND(REQUIRED_FEATURE, REQUIRED_STATE)
-# -------------------------------------------------
-# Verify that a required features has the right state before trying to turn on
-# the DX_CURRENT_FEATURE.
-AC_DEFUN([DX_CHECK_DEPEND], [
-test "$DX_FLAG_$1" = "$2" \
-|| AC_MSG_ERROR([doxygen-DX_CURRENT_FEATURE ifelse([$2], 1,
-                            requires, contradicts) doxygen-DX_CURRENT_FEATURE])
-])
-
-# DX_CLEAR_DEPEND(FEATURE, REQUIRED_FEATURE, REQUIRED_STATE)
-# ----------------------------------------------------------
-# Turn off the DX_CURRENT_FEATURE if the required feature is off.
-AC_DEFUN([DX_CLEAR_DEPEND], [
-test "$DX_FLAG_$1" = "$2" || AC_SUBST(DX_FLAG_[]DX_CURRENT_FEATURE, 0)
-])
-
-# DX_FEATURE_ARG(FEATURE, DESCRIPTION,
-#                CHECK_DEPEND, CLEAR_DEPEND,
-#                REQUIRE, DO-IF-ON, DO-IF-OFF)
-# --------------------------------------------
-# Parse the command-line option controlling a feature. CHECK_DEPEND is called
-# if the user explicitly turns the feature on (and invokes DX_CHECK_DEPEND),
-# otherwise CLEAR_DEPEND is called to turn off the default state if a required
-# feature is disabled (using DX_CLEAR_DEPEND). REQUIRE performs additional
-# requirement tests (DX_REQUIRE_PROG). Finally, an automake flag is set and
-# DO-IF-ON or DO-IF-OFF are called according to the final state of the feature.
-AC_DEFUN([DX_ARG_ABLE], [
-    AC_DEFUN([DX_CURRENT_FEATURE], [$1])
-    AC_DEFUN([DX_CURRENT_DESCRIPTION], [$2])
-    AC_ARG_ENABLE(doxygen-$1,
-                  [AS_HELP_STRING(DX_IF_FEATURE([$1], [--disable-doxygen-$1],
-                                                      [--enable-doxygen-$1]),
-                                  DX_IF_FEATURE([$1], [don't $2], [$2]))],
-                  [
-case "$enableval" in
-#(
-y|Y|yes|Yes|YES)
-    AC_SUBST([DX_FLAG_$1], 1)
-    $3
-;; #(
-n|N|no|No|NO)
-    AC_SUBST([DX_FLAG_$1], 0)
-;; #(
-*)
-    AC_MSG_ERROR([invalid value '$enableval' given to doxygen-$1])
-;;
-esac
-], [
-AC_SUBST([DX_FLAG_$1], [DX_IF_FEATURE([$1], 1, 0)])
-$4
-])
-if DX_TEST_FEATURE([$1]); then
-    $5
-    :
-fi
-if DX_TEST_FEATURE([$1]); then
-    AM_CONDITIONAL(DX_COND_$1, :)
-    $6
-    :
-else
-    AM_CONDITIONAL(DX_COND_$1, false)
-    $7
-    :
-fi
-])
-
-## -------------- ##
-## Public macros. ##
-## -------------- ##
-
-# DX_XXX_FEATURE(DEFAULT_STATE)
-# -----------------------------
-AC_DEFUN([DX_DOXYGEN_FEATURE], [AC_DEFUN([DX_FEATURE_doc],  [$1])])
-AC_DEFUN([DX_MAN_FEATURE],     [AC_DEFUN([DX_FEATURE_man],  [$1])])
-AC_DEFUN([DX_HTML_FEATURE],    [AC_DEFUN([DX_FEATURE_html], [$1])])
-AC_DEFUN([DX_CHM_FEATURE],     [AC_DEFUN([DX_FEATURE_chm],  [$1])])
-AC_DEFUN([DX_CHI_FEATURE],     [AC_DEFUN([DX_FEATURE_chi],  [$1])])
-AC_DEFUN([DX_RTF_FEATURE],     [AC_DEFUN([DX_FEATURE_rtf],  [$1])])
-AC_DEFUN([DX_XML_FEATURE],     [AC_DEFUN([DX_FEATURE_xml],  [$1])])
-AC_DEFUN([DX_XML_FEATURE],     [AC_DEFUN([DX_FEATURE_xml],  [$1])])
-AC_DEFUN([DX_PDF_FEATURE],     [AC_DEFUN([DX_FEATURE_pdf],  [$1])])
-AC_DEFUN([DX_PS_FEATURE],      [AC_DEFUN([DX_FEATURE_ps],   [$1])])
-
-# DX_INIT_DOXYGEN(PROJECT, [CONFIG-FILE], [OUTPUT-DOC-DIR])
-# ---------------------------------------------------------
-# PROJECT also serves as the base name for the documentation files.
-# The default CONFIG-FILE is "Doxyfile" and OUTPUT-DOC-DIR is "doxygen-doc".
-AC_DEFUN([DX_INIT_DOXYGEN], [
-
-# Files:
-AC_SUBST([DX_PROJECT], [$1])
-AC_SUBST([DX_CONFIG], [ifelse([$2], [], Doxyfile, [$2])])
-AC_SUBST([DX_DOCDIR], [ifelse([$3], [], doxygen-doc, [$3])])
-
-# Environment variables used inside doxygen.cfg:
-DX_ENV_APPEND(SRCDIR, $srcdir)
-DX_ENV_APPEND(PROJECT, $DX_PROJECT)
-DX_ENV_APPEND(DOCDIR, $DX_DOCDIR)
-DX_ENV_APPEND(VERSION, $PACKAGE_VERSION)
-
-# Doxygen itself:
-DX_ARG_ABLE(doc, [generate any doxygen documentation],
-            [],
-            [],
-            [DX_REQUIRE_PROG([DX_DOXYGEN], doxygen)
-             DX_REQUIRE_PROG([DX_PERL], perl)],
-            [DX_ENV_APPEND(PERL_PATH, $DX_PERL)])
-
-# Dot for graphics:
-DX_ARG_ABLE(dot, [generate graphics for doxygen documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [DX_REQUIRE_PROG([DX_DOT], dot)],
-            [DX_ENV_APPEND(HAVE_DOT, YES)
-             DX_ENV_APPEND(DOT_PATH, [`DX_DIRNAME_EXPR($DX_DOT)`])],
-            [DX_ENV_APPEND(HAVE_DOT, NO)])
-
-# Man pages generation:
-DX_ARG_ABLE(man, [generate doxygen manual pages],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [],
-            [DX_ENV_APPEND(GENERATE_MAN, YES)],
-            [DX_ENV_APPEND(GENERATE_MAN, NO)])
-
-# RTF file generation:
-DX_ARG_ABLE(rtf, [generate doxygen RTF documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [],
-            [DX_ENV_APPEND(GENERATE_RTF, YES)],
-            [DX_ENV_APPEND(GENERATE_RTF, NO)])
-
-# XML file generation:
-DX_ARG_ABLE(xml, [generate doxygen XML documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [],
-            [DX_ENV_APPEND(GENERATE_XML, YES)],
-            [DX_ENV_APPEND(GENERATE_XML, NO)])
-
-# (Compressed) HTML help generation:
-DX_ARG_ABLE(chm, [generate doxygen compressed HTML help documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [DX_REQUIRE_PROG([DX_HHC], hhc)],
-            [DX_ENV_APPEND(HHC_PATH, $DX_HHC)
-             DX_ENV_APPEND(GENERATE_HTML, YES)
-             DX_ENV_APPEND(GENERATE_HTMLHELP, YES)],
-            [DX_ENV_APPEND(GENERATE_HTMLHELP, NO)])
-
-# Seperate CHI file generation.
-DX_ARG_ABLE(chi, [generate doxygen seperate compressed HTML help index file],
-            [DX_CHECK_DEPEND(chm, 1)],
-            [DX_CLEAR_DEPEND(chm, 1)],
-            [],
-            [DX_ENV_APPEND(GENERATE_CHI, YES)],
-            [DX_ENV_APPEND(GENERATE_CHI, NO)])
-
-# Plain HTML pages generation:
-DX_ARG_ABLE(html, [generate doxygen plain HTML documentation],
-            [DX_CHECK_DEPEND(doc, 1) DX_CHECK_DEPEND(chm, 0)],
-            [DX_CLEAR_DEPEND(doc, 1) DX_CLEAR_DEPEND(chm, 0)],
-            [],
-            [DX_ENV_APPEND(GENERATE_HTML, YES)],
-            [DX_TEST_FEATURE(chm) || DX_ENV_APPEND(GENERATE_HTML, NO)])
-
-# PostScript file generation:
-DX_ARG_ABLE(ps, [generate doxygen PostScript documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [DX_REQUIRE_PROG([DX_LATEX], latex)
-             DX_REQUIRE_PROG([DX_MAKEINDEX], makeindex)
-             DX_REQUIRE_PROG([DX_DVIPS], dvips)
-             DX_REQUIRE_PROG([DX_EGREP], egrep)])
-
-# PDF file generation:
-DX_ARG_ABLE(pdf, [generate doxygen PDF documentation],
-            [DX_CHECK_DEPEND(doc, 1)],
-            [DX_CLEAR_DEPEND(doc, 1)],
-            [DX_REQUIRE_PROG([DX_PDFLATEX], pdflatex)
-             DX_REQUIRE_PROG([DX_MAKEINDEX], makeindex)
-             DX_REQUIRE_PROG([DX_EGREP], egrep)])
-
-# LaTeX generation for PS and/or PDF:
-if DX_TEST_FEATURE(ps) || DX_TEST_FEATURE(pdf); then
-    AM_CONDITIONAL(DX_COND_latex, :)
-    DX_ENV_APPEND(GENERATE_LATEX, YES)
-else
-    AM_CONDITIONAL(DX_COND_latex, false)
-    DX_ENV_APPEND(GENERATE_LATEX, NO)
-fi
-
-# Paper size for PS and/or PDF:
-AC_ARG_VAR(DOXYGEN_PAPER_SIZE,
-           [a4wide (default), a4, letter, legal or executive])
-case "$DOXYGEN_PAPER_SIZE" in
-#(
-"")
-    AC_SUBST(DOXYGEN_PAPER_SIZE, "")
-;; #(
-a4wide|a4|letter|legal|executive)
-    DX_ENV_APPEND(PAPER_SIZE, $DOXYGEN_PAPER_SIZE)
-;; #(
-*)
-    AC_MSG_ERROR([unknown DOXYGEN_PAPER_SIZE='$DOXYGEN_PAPER_SIZE'])
-;;
-esac
-
-#For debugging:
-#echo DX_FLAG_doc=$DX_FLAG_doc
-#echo DX_FLAG_dot=$DX_FLAG_dot
-#echo DX_FLAG_man=$DX_FLAG_man
-#echo DX_FLAG_html=$DX_FLAG_html
-#echo DX_FLAG_chm=$DX_FLAG_chm
-#echo DX_FLAG_chi=$DX_FLAG_chi
-#echo DX_FLAG_rtf=$DX_FLAG_rtf
-#echo DX_FLAG_xml=$DX_FLAG_xml
-#echo DX_FLAG_pdf=$DX_FLAG_pdf
-#echo DX_FLAG_ps=$DX_FLAG_ps
-#echo DX_ENV=$DX_ENV
-])

+ 0 - 49
src/contrib/hedwig/client/src/main/cpp/scripts/log4cxx.conf

@@ -1,49 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# 
-#
-
-log4j.appender.rootAppender=org.apache.log4j.ConsoleAppender
-log4j.appender.rootAppender.layout=org.apache.log4j.BasicLayout
-
-#log4j.appender.hedwig=org.apache.log4j.RollingFileAppender
-log4j.appender.hedwig=org.apache.log4j.ConsoleAppender
-#log4j.appender.hedwig.fileName=./testLog.log
-log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwig.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %t %c %p - %m%n
-log4j.appender.hedwig.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwig.layout.ConversionPattern=%.5m%n
-
-log4j.appender.hedwigtest=org.apache.log4j.ConsoleAppender
-#log4j.appender.hedwig.fileName=./testLog.log
-log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwigtest.layout.ConversionPattern=[%d{%H:%M:%S.%l}] %c %p - %m%n
-log4j.appender.hedwigtest.layout=org.apache.log4j.PatternLayout
-log4j.appender.hedwigtest.layout.ConversionPattern=%.5m%n
-
-# category
-log4j.category.hedwig=DEBUG, hedwig
-log4j.rootCategory=DEBUG
-
-#log4j.category.hedwig.channel=ERROR
-log4j.category.hedwig.util=ERROR
-log4j.category.hedwigtest.servercontrol=ERROR
-
-log4j.category.hedwigtest=DEBUG, hedwigtest
-log4j.rootCategory=DEBUG

+ 0 - 64
src/contrib/hedwig/client/src/main/cpp/scripts/network-delays.sh

@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-setup_delays() {
-
-    UNAME=`uname -s`
-
-    echo "Setting delay to ${1}ms"
-    case "$UNAME" in
-	Darwin|FreeBSD)
-	    sudo ipfw pipe 1 config delay ${1}ms
-	    sudo ipfw add pipe 1 dst-port 12349 
-	    sudo ipfw add pipe 1 dst-port 12350
-	    sudo ipfw add pipe 1 src-port 12349 
-	    sudo ipfw add pipe 1 src-port 12350 
-            ;;
-	Linux)
-	    sudo tc qdisc add dev lo root handle 1: prio
-	    sudo tc qdisc add dev lo parent 1:3 handle 30: netem delay ${1}ms 
-	    sudo tc filter add dev lo protocol ip parent 1:0 prio 3 u32 match ip dport 12349 0xffff flowid 1:3
-	    sudo tc filter add dev lo protocol ip parent 1:0 prio 3 u32 match ip dport 12350 0xffff flowid 1:3
-	    sudo tc filter add dev lo protocol ip parent 1:0 prio 3 u32 match ip sport 12349 0xffff flowid 1:3
-	    sudo tc filter add dev lo protocol ip parent 1:0 prio 3 u32 match ip sport 12350 0xffff flowid 1:3
-	    ;;
-	*)
-	    echo "Unknown system type, $UNAME, only Linux, Darwin & FreeBSD supported"
-	    ;;
-    esac
-}
-
-clear_delays() {
-    UNAME=`uname -s`
-
-    case "$UNAME" in
-	Darwin|FreeBSD)
-	    echo "Flushing ipfw"
-	    sudo ipfw -f -q flush
-            ;;
-	Linux)
-	    echo "Clearing delay"
-	    sudo tc qdisc del dev lo root
-	    ;;
-	*)
-	    echo "Unknown system type, $UNAME, only Linux, Darwin & FreeBSD supported"
-	    ;;
-    esac
-}
-

+ 0 - 49
src/contrib/hedwig/client/src/main/cpp/scripts/server-control.sh

@@ -1,49 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-HEDWIGBASE=../../../../..
-
-HEDWIGJAR=`ls $HEDWIGBASE/server/target/server-*-with-dependencies.jar`
-if [ ! $? -eq 0 ]; then
-    echo "\n\nCould not find server-VERSION-with-dependencies.jar. \nYou need to build the java part of hedwig. \nRun mvn package in the toplevel hedwig directory.\n\n"
-    exit 1;
-fi
-
-HEDWIGSERVERTESTS=$HEDWIGBASE/server/target/test-classes/
-if [ ! -e $HEDWIGSERVERTESTS ]; then
-    echo "\n\nThe hedwig java server tests need to be build.\b\b"
-    exit 1;
-fi
-
-export CP=.:$HEDWIGJAR:$HEDWIGSERVERTESTS
-
-start_control_server() {
-    if [ -e server-control.pid ]; then
-	kill -9 `cat server-control.pid`
-	rm server-control.pid
-    fi
-    java -cp $CP  -Dlog4j.configuration=log4j.properties org.apache.hedwig.ServerControlDaemon  <&-  1> servercontrol.out  2>&1  &
-    echo $! > server-control.pid
-    sleep 5
-}
-
-stop_control_server() {
-    kill -9 `cat server-control.pid`
-    rm server-control.pid
-}

部分文件因文件數量過多而無法顯示