Browse Source

HADOOP-3111. Remove HBase from Hadoop contrib

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.16@642088 13f79535-47bb-0310-9956-ffa450edef68
Jim Kellerman 17 years ago
parent
commit
0d8cabdba0
100 changed files with 2 additions and 24386 deletions
  1. 2 0
      CHANGES.txt
  2. 0 2
      build.xml
  3. 0 434
      src/contrib/hbase/CHANGES.txt
  4. 0 10
      src/contrib/hbase/NOTICE.txt
  5. 0 1
      src/contrib/hbase/README.txt
  6. 0 245
      src/contrib/hbase/bin/hbase
  7. 0 100
      src/contrib/hbase/bin/hbase-config.sh
  8. 0 157
      src/contrib/hbase/bin/hbase-daemon.sh
  9. 0 46
      src/contrib/hbase/bin/hbase-daemons.sh
  10. 0 78
      src/contrib/hbase/bin/regionservers.sh
  11. 0 45
      src/contrib/hbase/bin/start-hbase.sh
  12. 0 33
      src/contrib/hbase/bin/stop-hbase.sh
  13. 0 78
      src/contrib/hbase/build-webapps.xml
  14. 0 176
      src/contrib/hbase/build.xml
  15. 0 239
      src/contrib/hbase/conf/hbase-default.xml
  16. 0 34
      src/contrib/hbase/conf/hbase-env.sh
  17. 0 25
      src/contrib/hbase/conf/hbase-site.xml
  18. 0 1
      src/contrib/hbase/conf/regionservers
  19. BIN
      src/contrib/hbase/lib/commons-math-1.1.jar
  20. BIN
      src/contrib/hbase/lib/jline-0.9.91.jar
  21. BIN
      src/contrib/hbase/lib/libthrift-r746.jar
  22. BIN
      src/contrib/hbase/lib/lucene-core-2.2.0.jar
  23. 0 238
      src/contrib/hbase/src/examples/thrift/DemoClient.cpp
  24. 0 276
      src/contrib/hbase/src/examples/thrift/DemoClient.java
  25. 0 178
      src/contrib/hbase/src/examples/thrift/DemoClient.rb
  26. 0 18
      src/contrib/hbase/src/examples/thrift/Makefile
  27. 0 15
      src/contrib/hbase/src/examples/thrift/README.txt
  28. 0 215
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java
  29. 0 36
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/CacheFlushListener.java
  30. 0 98
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Chore.java
  31. 0 32
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
  32. 0 280
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
  33. 0 546
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java
  34. 0 51
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
  35. 0 352
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
  36. 0 95
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnection.java
  37. 0 769
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
  38. 0 184
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java
  39. 0 35
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HInternalScannerInterface.java
  40. 0 628
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
  41. 0 138
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java
  42. 0 159
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
  43. 0 3262
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
  44. 0 114
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java
  45. 0 55
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterRegionInterface.java
  46. 0 401
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java
  47. 0 214
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
  48. 0 1920
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
  49. 0 333
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
  50. 0 229
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java
  51. 0 94
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionLocation.java
  52. 0 1770
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
  53. 0 55
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HScannerInterface.java
  54. 0 179
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
  55. 0 158
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
  56. 0 136
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java
  57. 0 2532
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
  58. 0 878
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
  59. 0 353
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
  60. 0 1222
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java
  61. 0 259
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
  62. 0 41
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java
  63. 0 34
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java
  64. 0 377
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
  65. 0 307
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
  66. 0 41
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java
  67. 0 29
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/LogRollListener.java
  68. 0 41
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java
  69. 0 42
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java
  70. 0 44
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/NotServingRegionException.java
  71. 0 44
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java
  72. 0 44
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java
  73. 0 106
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java
  74. 0 141
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Shell.java
  75. 0 38
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableExistsException.java
  76. 0 41
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java
  77. 0 37
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotFoundException.java
  78. 0 42
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java
  79. 0 42
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java
  80. 0 67
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
  81. 0 41
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java
  82. 0 159
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
  83. 0 340
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
  84. 0 125
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java
  85. 0 313
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java
  86. 0 148
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java
  87. 0 181
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java
  88. 0 93
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/master/hql_jsp.java
  89. 0 169
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/master/master_jsp.java
  90. 0 100
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/regionserver/regionserver_jsp.java
  91. 0 248
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/AlterCommand.java
  92. 0 100
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/BasicCommand.java
  93. 0 66
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/ClearCommand.java
  94. 0 45
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/Command.java
  95. 0 27
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/CommandFactory.java
  96. 0 93
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/CreateCommand.java
  97. 0 131
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DeleteCommand.java
  98. 0 90
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DescCommand.java
  99. 0 68
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DisableCommand.java
  100. 0 80
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DropCommand.java

+ 2 - 0
CHANGES.txt

@@ -55,6 +55,8 @@ Release 0.16.2 - Unreleased
     available to 0.16. Clearly, this fix was not checked into trunk.
     available to 0.16. Clearly, this fix was not checked into trunk.
     (Alejandro Abdelnur and Mukund Madhugiri via cdouglas)
     (Alejandro Abdelnur and Mukund Madhugiri via cdouglas)
 
 
+    HADOOP-3111. Remove HBase from Hadoop contrib
+
 Release 0.16.1 - 2008-03-13
 Release 0.16.1 - 2008-03-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 0 - 2
build.xml

@@ -677,7 +677,6 @@
 
 
     	<packageset dir="src/contrib/streaming/src/java"/>
     	<packageset dir="src/contrib/streaming/src/java"/>
     	<packageset dir="src/contrib/data_join/src/java"/>
     	<packageset dir="src/contrib/data_join/src/java"/>
-    	<packageset dir="src/contrib/hbase/src/java"/>
 
 
         <link href="${javadoc.link.java}"/>
         <link href="${javadoc.link.java}"/>
 
 
@@ -694,7 +693,6 @@
 
 
        <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/>
        <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/>
        <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/>
        <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/>
-       <group title="contrib: HBase" packages="org.apache.hadoop.hbase*"/>
 
 
     </javadoc>
     </javadoc>
   </target>	
   </target>	

+ 0 - 434
src/contrib/hbase/CHANGES.txt

@@ -1,434 +0,0 @@
-HBase Change Log
-
-Release 0.16.0 - 2008-02-04
-
-  INCOMPATIBLE CHANGES
-   HADOOP-2056 A table with row keys containing colon fails to split regions
-   HADOOP-2079 Fix generated HLog, HRegion names
-   HADOOP-2495 Minor performance improvements: Slim-down BatchOperation, etc. 
-   HADOOP-2506 Remove the algebra package
-   HADOOP-2519 Performance improvements: Customized RPC serialization
-   HADOOP-2478 Restructure how HBase lays out files in the file system (phase 1)
-               (test input data)
-   HADOOP-2478 Restructure how HBase lays out files in the file system (phase 2)
-               Includes migration tool org.apache.hadoop.hbase.util.Migrate
-   HADOOP-2558 org.onelab.filter.BloomFilter class uses 8X the memory it should
-               be using
-
-  NEW FEATURES
-    HADOOP-2061 Add new Base64 dialects
-    HADOOP-2084 Add a LocalHBaseCluster
-    HADOOP-2068 RESTful interface (Bryan Duxbury via Stack)
-    HADOOP-2316 Run REST servlet outside of master
-                (Bryan Duxbury & Stack)
-    HADOOP-1550 No means of deleting a'row' (Bryan Duxbuery via Stack)
-    HADOOP-2384 Delete all members of a column family on a specific row
-                (Bryan Duxbury via Stack)
-    HADOOP-2395 Implement "ALTER TABLE ... CHANGE column" operation
-                (Bryan Duxbury via Stack)
-    HADOOP-2240 Truncate for hbase (Edward Yoon via Stack)
-    HADOOP-2389 Provide multiple language bindings for HBase (Thrift)
-                (David Simpson via Stack)
-
-  OPTIMIZATIONS
-   HADOOP-2479 Save on number of Text object creations
-   HADOOP-2485 Make mapfile index interval configurable (Set default to 32
-               instead of 128)
-   HADOOP-2553 Don't make Long objects calculating hbase type hash codes
-   HADOOP-2377 Holding open MapFile.Readers is expensive, so use less of them
-   HADOOP-2407 Keeping MapFile.Reader open is expensive: Part 2
-   HADOOP-2533 Performance: Scanning, just creating MapWritable in next
-               consumes >20% CPU
-   HADOOP-2443 Keep lazy cache of regions in client rather than an
-               'authoritative' list (Bryan Duxbury via Stack)
-   HADOOP-2600 Performance: HStore.getRowKeyAtOrBefore should use
-               MapFile.Reader#getClosest (before)
-               (Bryan Duxbury via Stack)
-
-  BUG FIXES
-   HADOOP-2059 In tests, exceptions in min dfs shutdown should not fail test
-               (e.g. nightly #272)
-   HADOOP-2064 TestSplit assertion and NPE failures (Patch build #952 and #953)
-   HADOOP-2124 Use of `hostname` does not work on Cygwin in some cases
-   HADOOP-2083 TestTableIndex failed in #970 and #956
-   HADOOP-2109 Fixed race condition in processing server lease timeout.
-   HADOOP-2137 hql.jsp : The character 0x19 is not valid
-   HADOOP-2109 Fix another race condition in processing dead servers,
-               Fix error online meta regions: was using region name and not
-               startKey as key for map.put. Change TestRegionServerExit to
-               always kill the region server for the META region. This makes
-               the test more deterministic and getting META reassigned was
-               problematic.
-   HADOOP-2155 Method expecting HBaseConfiguration throws NPE when given Configuration
-   HADOOP-2156 BufferUnderflowException for un-named HTableDescriptors
-   HADOOP-2161 getRow() is orders of magnitudes slower than get(), even on rows
-               with one column (Clint Morgan and Stack)
-   HADOOP-2040 Hudson hangs AFTER test has finished
-   HADOOP-2274 Excess synchronization introduced by HADOOP-2139 negatively
-               impacts performance
-   HADOOP-2196 Fix how hbase sits in hadoop 'package' product
-   HADOOP-2276 Address regression caused by HADOOP-2274, fix HADOOP-2173 (When
-               the master times out a region servers lease, the region server
-               may not restart)
-   HADOOP-2253 getRow can return HBASE::DELETEVAL cells
-               (Bryan Duxbury via Stack)
-   HADOOP-2295 Fix assigning a region to multiple servers
-   HADOOP-2234 TableInputFormat erroneously aggregates map values
-   HADOOP-2308 null regioninfo breaks meta scanner
-   HADOOP-2304 Abbreviated symbol parsing error of dir path in jar command
-               (Edward Yoon via Stack)
-   HADOOP-2320 Committed TestGet2 is managled (breaks build).
-   HADOOP-2322 getRow(row, TS) client interface not properly connected
-   HADOOP-2309 ConcurrentModificationException doing get of all region start keys
-   HADOOP-2321 TestScanner2 does not release resources which sometimes cause the
-               test to time out
-   HADOOP-2315 REST servlet doesn't treat / characters in row key correctly
-               (Bryan Duxbury via Stack)
-   HADOOP-2332 Meta table data selection in Hbase Shell
-               (Edward Yoon via Stack)
-   HADOOP-2347 REST servlet not thread safe but run in a threaded manner
-               (Bryan Duxbury via Stack)
-   HADOOP-2365 Result of HashFunction.hash() contains all identical values
-   HADOOP-2362 Leaking hdfs file handle on region split
-   HADOOP-2338 Fix NullPointerException in master server.
-   HADOOP-2380 REST servlet throws NPE when any value node has an empty string
-               (Bryan Duxbury via Stack)
-   HADOOP-2350 Scanner api returns null row names, or skips row names if
-               different column families do not have entries for some rows
-   HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
-               regionserver edits)
-   HADOOP-2392 TestRegionServerExit has new failure mode since HADOOP-2338
-   HADOOP-2324 Fix assertion failures in TestTableMapReduce
-   HADOOP-2396 NPE in HMaster.cancelLease
-   HADOOP-2397 The only time that a meta scanner should try to recover a log is
-               when the master is starting
-   HADOOP-2417 Fix critical shutdown problem introduced by HADOOP-2338
-   HADOOP-2418 Fix assertion failures in TestTableMapReduce, TestTableIndex,
-               and TestTableJoinMapReduce
-   HADOOP-2414 Fix ArrayIndexOutOfBoundsException in bloom filters.
-   HADOOP-2430 Master will not shut down if there are no active region servers
-   HADOOP-2199 Add tools for going from hregion filename to region name in logs
-   HADOOP-2441 Fix build failures in TestHBaseCluster
-   HADOOP-2451 End key is incorrectly assigned in many region splits
-   HADOOP-2455 Error in Help-string of CREATE command (Edward Yoon via Stack)
-   HADOOP-2465 When split parent regions are cleaned up, not all the columns are
-               deleted
-   HADOOP-2468 TestRegionServerExit failed in Hadoop-Nightly #338
-   HADOOP-2467 scanner truncates resultset when > 1 column families
-   HADOOP-2503 REST Insert / Select encoding issue (Bryan Duxbury via Stack)
-   HADOOP-2505 formatter classes missing apache license
-   HADOOP-2504 REST servlet method for deleting a scanner was not properly
-               mapped (Bryan Duxbury via Stack)
-   HADOOP-2507 REST servlet does not properly base64 row keys and column names
-               (Bryan Duxbury via Stack)
-   HADOOP-2530 Missing type in new hbase custom RPC serializer
-   HADOOP-2490 Failure in nightly #346 (Added debugging of hudson failures).
-   HADOOP-2558 fixes for build up on hudson (part 1, part 2, part 3, part 4)
-   HADOOP-2500 Unreadable region kills region servers
-   HADOOP-2579 Initializing a new HTable object against a nonexistent table
-               throws a NoServerForRegionException instead of a
-               TableNotFoundException when a different table has been created
-               previously (Bryan Duxbury via Stack)
-   HADOOP-2587 Splits blocked by compactions cause region to be offline for
-               duration of compaction. 
-   HADOOP-2592 Scanning, a region can let out a row that its not supposed
-               to have
-   HADOOP-2493 hbase will split on row when the start and end row is the
-               same cause data loss (Bryan Duxbury via Stack)
-   HADOOP-2629 Shell digests garbage without complaint
-   HADOOP-2619 Compaction errors after a region splits
-   HADOOP-2621 Memcache flush flushing every 60 secs with out considering
-               the max memcache size
-   HADOOP-2584 Web UI displays an IOException instead of the Tables
-   HADOOP-2650 Remove Writables.clone and use WritableUtils.clone from
-               hadoop instead
-   HADOOP-2668 Documentation and improved logging so fact that hbase now
-               requires migration comes as less of a surprise
-   HADOOP-2686 Removed tables stick around in .META.
-   HADOOP-2688 IllegalArgumentException processing a shutdown stops
-               server going down and results in millions of lines of output
-   HADOOP-2706 HBase Shell crash
-   HADOOP-2712 under load, regions won't split
-   HADOOP-2675 Options not passed to rest/thrift
-   HADOOP-2722 Prevent unintentional thread exit in region server and master
-   HADOOP-2718 Copy Constructor HBaseConfiguration(Configuration) will override
-               hbase configurations if argumant is not an instance of
-               HBaseConfiguration.
-   HADOOP-2753 Back out 2718; programmatic config works but hbase*xml conf
-               is overridden
-   HADOOP-2718 Copy Constructor HBaseConfiguration(Configuration) will override
-               hbase configurations if argumant is not an instance of
-               HBaseConfiguration (Put it back again).
-   HADOOP-2631 2443 breaks HTable.getStartKeys when there is more than one
-               table or table you are enumerating isn't the first table
-   Delete empty file: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/
-               TableOutputCollector.java per Nigel Daley
-   HADOOP-2731 Under load, regions become extremely large and eventually cause
-               region servers to become unresponsive
-   HADOOP-2773 Master marks region offline when it is recovering from a region
-               server death
-
-  IMPROVEMENTS
-   HADOOP-2401 Add convenience put method that takes writable
-               (Johan Oskarsson via Stack)
-   HADOOP-2074 Simple switch to enable DEBUG level-logging in hbase
-   HADOOP-2088 Make hbase runnable in $HADOOP_HOME/build(/contrib/hbase)
-   HADOOP-2126 Use Bob Jenkins' hash for bloom filters
-   HADOOP-2157 Make Scanners implement Iterable
-   HADOOP-2176 Htable.deleteAll documentation is ambiguous
-   HADOOP-2139 (phase 1) Increase parallelism in region servers.
-   HADOOP-2267 [Hbase Shell] Change the prompt's title from 'hbase' to 'hql'.
-               (Edward Yoon via Stack)
-   HADOOP-2139 (phase 2) Make region server more event driven
-   HADOOP-2289 Useless efforts of looking for the non-existant table in select
-               command.
-               (Edward Yoon via Stack)
-   HADOOP-2257 Show a total of all requests and regions on the web ui
-               (Paul Saab via Stack)
-   HADOOP-2261 HTable.abort no longer throws exception if there is no active update.
-   HADOOP-2287 Make hbase unit tests take less time to complete.
-   HADOOP-2262 Retry n times instead of n**2 times.
-   HADOOP-1608 Relational Algrebra Operators
-               (Edward Yoon via Stack)
-   HADOOP-2198 HTable should have method to return table metadata
-   HADOOP-2296 hbase shell: phantom columns show up from select command
-   HADOOP-2297 System.exit() Handling in hbase shell jar command
-               (Edward Yoon via Stack)
-   HADOOP-2224 Add HTable.getRow(ROW, ts)
-               (Bryan Duxbury via Stack)
-   HADOOP-2339 Delete command with no WHERE clause
-               (Edward Yoon via Stack)
-   HADOOP-2299 Support inclusive scans (Bryan Duxbury via Stack)
-   HADOOP-2333 Client side retries happen at the wrong level
-   HADOOP-2357 Compaction cleanup; less deleting + prevent possible file leaks
-   HADOOP-2392 TestRegionServerExit has new failure mode since HADOOP-2338
-   HADOOP-2370 Allow column families with an unlimited number of versions
-               (Edward Yoon via Stack)
-   HADOOP-2047 Add an '--master=X' and '--html' command-line parameters to shell
-               (Edward Yoon via Stack)
-   HADOOP-2351 If select command returns no result, it doesn't need to show the
-               header information (Edward Yoon via Stack)
-   HADOOP-2285 Add being able to shutdown regionservers (Dennis Kubes via Stack)
-   HADOOP-2458 HStoreFile.writeSplitInfo should just call 
-               HStoreFile.Reference.write
-   HADOOP-2471 Add reading/writing MapFile to PerformanceEvaluation suite
-   HADOOP-2522 Separate MapFile benchmark from PerformanceEvaluation
-               (Tom White via Stack)
-   HADOOP-2502 Insert/Select timestamp, Timestamp data type in HQL
-               (Edward Yoon via Stack)
-   HADOOP-2450 Show version (and svn revision) in hbase web ui
-   HADOOP-2472 Range selection using filter (Edward Yoon via Stack)
-   HADOOP-2548 Make TableMap and TableReduce generic
-               (Frederik Hedberg via Stack)
-   HADOOP-2557 Shell count function (Edward Yoon via Stack)
-   HADOOP-2589 Change an classes/package name from Shell to hql
-               (Edward Yoon via Stack)
-   HADOOP-2545 hbase rest server should be started with hbase-daemon.sh
-   HADOOP-2525 Same 2 lines repeated 11 million times in HMaster log upon
-               HMaster shutdown
-   HADOOP-2616 hbase not spliting when the total size of region reaches max
-               region size * 1.5
-   HADOOP-2643 Make migration tool smarter.
-   
-Release 0.15.1
-Branch 0.15
-
-  INCOMPATIBLE CHANGES
-    HADOOP-1931 Hbase scripts take --ARG=ARG_VALUE when should be like hadoop
-                and do ---ARG ARG_VALUE
-
-  NEW FEATURES
-    HADOOP-1768 FS command using Hadoop FsShell operations
-                (Edward Yoon via Stack)
-    HADOOP-1784 Delete: Fix scanners and gets so they work properly in presence
-                of deletes. Added a deleteAll to remove all cells equal to or
-                older than passed timestamp.  Fixed compaction so deleted cells
-                do not make it out into compacted output.  Ensure also that
-                versions > column max are dropped compacting.
-    HADOOP-1720 Addition of HQL (Hbase Query Language) support in Hbase Shell.
-                The old shell syntax has been replaced by HQL, a small SQL-like
-                set of operators, for creating, altering, dropping, inserting,
-                deleting, and selecting, etc., data in hbase.
-                (Inchul Song and Edward Yoon via Stack)
-    HADOOP-1913 Build a Lucene index on an HBase table
-                (Ning Li via Stack)
-    HADOOP-1957 Web UI with report on cluster state and basic browsing of tables
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-    HADOOP-1527 Region server won't start because logdir exists
-    HADOOP-1723 If master asks region server to shut down, by-pass return of
-                shutdown message
-    HADOOP-1729 Recent renaming or META tables breaks hbase shell
-    HADOOP-1730 unexpected null value causes META scanner to exit (silently)
-    HADOOP-1747 On a cluster, on restart, regions multiply assigned
-    HADOOP-1776 Fix for sporadic compaction failures closing and moving
-                compaction result
-    HADOOP-1780 Regions are still being doubly assigned
-    HADOOP-1797 Fix NPEs in MetaScanner constructor
-    HADOOP-1799 Incorrect classpath in binary version of Hadoop
-    HADOOP-1805 Region server hang on exit
-    HADOOP-1785 TableInputFormat.TableRecordReader.next has a bug
-                (Ning Li via Stack)
-    HADOOP-1800 output should default utf8 encoding
-    HADOOP-1801 When hdfs is yanked out from under hbase, hbase should go down gracefully
-    HADOOP-1813 OOME makes zombie of region server
-    HADOOP-1814	TestCleanRegionServerExit fails too often on Hudson
-    HADOOP-1820 Regionserver creates hlogs without bound
-                (reverted 2007/09/25) (Fixed 2007/09/30)
-    HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8")
-    HADOOP-1832 listTables() returns duplicate tables
-    HADOOP-1834 Scanners ignore timestamp passed on creation
-    HADOOP-1847 Many HBase tests do not fail well.
-    HADOOP-1847 Many HBase tests do not fail well. (phase 2)
-    HADOOP-1870 Once file system failure has been detected, don't check it again
-                and get on with shutting down the hbase cluster.
-    HADOOP-1888 NullPointerException in HMemcacheScanner (reprise)
-    HADOOP-1903 Possible data loss if Exception happens between snapshot and
-                flush to disk.
-    HADOOP-1920 Wrapper scripts broken when hadoop in one location and hbase in
-                another
-    HADOOP-1923, HADOOP-1924 a) tests fail sporadically because set up and tear
-                 down is inconsistent b) TestDFSAbort failed in nightly #242
-    HADOOP-1929 Add hbase-default.xml to hbase jar
-    HADOOP-1941 StopRowFilter throws NPE when passed null row
-    HADOOP-1966 Make HBase unit tests more reliable in the Hudson environment.
-    HADOOP-1975 HBase tests failing with java.lang.NumberFormatException
-    HADOOP-1990 Regression test instability affects nightly and patch builds
-    HADOOP-1996 TestHStoreFile fails on windows if run multiple times
-    HADOOP-1937 When the master times out a region server's lease, it is too 
-                aggressive in reclaiming the server's log.
-    HADOOP-2004 webapp hql formatting bugs 
-    HADOOP_2011 Make hbase daemon scripts take args in same order as hadoop
-                daemon scripts
-    HADOOP-2017 TestRegionServerAbort failure in patch build #903 and
-                nightly #266
-    HADOOP-2029 TestLogRolling fails too often in patch and nightlies
-    HADOOP-2038 TestCleanRegionExit failed in patch build #927
-
-  IMPROVEMENTS
-    HADOOP-1737 Make HColumnDescriptor data publically members settable
-    HADOOP-1746 Clean up findbugs warnings
-    HADOOP-1757 Bloomfilters: single argument constructor, use enum for bloom
-                filter types
-    HADOOP-1760 Use new MapWritable and SortedMapWritable classes from
-                org.apache.hadoop.io
-    HADOOP-1793 (Phase 1) Remove TestHClient (Phase2) remove HClient.
-    HADOOP-1794 Remove deprecated APIs
-    HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
-    HADOOP-1833 bin/stop_hbase.sh returns before it completes
-                (Izaak Rubin via Stack) 
-    HADOOP-1835 Updated Documentation for HBase setup/installation
-                (Izaak Rubin via Stack)
-    HADOOP-1868 Make default configuration more responsive
-    HADOOP-1884 Remove useless debugging log messages from hbase.mapred
-    HADOOP-1856 Add Jar command to hbase shell using Hadoop RunJar util
-                (Edward Yoon via Stack)
-    HADOOP-1928 Have master pass the regionserver the filesystem to use
-    HADOOP-1789 Output formatting
-    HADOOP-1960 If a region server cannot talk to the master before its lease
-                times out, it should shut itself down
-    HADOOP-2035 Add logo to webapps
-
-
-Below are the list of changes before 2007-08-18
-
-  1. HADOOP-1384. HBase omnibus patch. (jimk, Vuk Ercegovac, and Michael Stack)
-  2. HADOOP-1402. Fix javadoc warnings in hbase contrib. (Michael Stack)
-  3. HADOOP-1404. HBase command-line shutdown failing (Michael Stack)
-  4. HADOOP-1397. Replace custom hbase locking with 
-     java.util.concurrent.locks.ReentrantLock (Michael Stack)
-  5. HADOOP-1403. HBase reliability - make master and region server more fault
-     tolerant.
-  6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do
-     'Performance Evaluation', etc.
-  7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed 
-     class HLocking.
-  8. HADOOP-1424. TestHBaseCluster fails with IllegalMonitorStateException. Fix
-     regression introduced by HADOOP-1397.
-  9. HADOOP-1426. Make hbase scripts executable + add test classes to CLASSPATH.
- 10. HADOOP-1430. HBase shutdown leaves regionservers up.
- 11. HADOOP-1392. Part1: includes create/delete table; enable/disable table;
-     add/remove column.
- 12. HADOOP-1392. Part2: includes table compaction by merging adjacent regions
-     that have shrunk in size.
- 13. HADOOP-1445 Support updates across region splits and compactions
- 14. HADOOP-1460 On shutdown IOException with complaint 'Cannot cancel lease
-     that is not held'
- 15. HADOOP-1421 Failover detection, split log files.
-     For the files modified, also clean up javadoc, class, field and method 
-     visibility (HADOOP-1466)
- 16. HADOOP-1479 Fix NPE in HStore#get if store file only has keys < passed key.
- 17. HADOOP-1476 Distributed version of 'Performance Evaluation' script
- 18. HADOOP-1469 Asychronous table creation
- 19. HADOOP-1415 Integrate BSD licensed bloom filter implementation.
- 20. HADOOP-1465 Add cluster stop/start scripts for hbase
- 21. HADOOP-1415 Provide configurable per-column bloom filters - part 2.
- 22. HADOOP-1498. Replace boxed types with primitives in many places.
- 23. HADOOP-1509.  Made methods/inner classes in HRegionServer and HClient protected
-     instead of private for easier extension. Also made HRegion and HRegionInfo public too.
-     Added an hbase-default.xml property for specifying what HRegionInterface extension to use
-     for proxy server connection. (James Kennedy via Jim Kellerman)
- 24. HADOOP-1534. [hbase] Memcache scanner fails if start key not present
- 25. HADOOP-1537. Catch exceptions in testCleanRegionServerExit so we can see
-     what is failing.
- 26. HADOOP-1543 [hbase] Add HClient.tableExists
- 27. HADOOP-1519 [hbase] map/reduce interface for HBase.  (Vuk Ercegovac and
-     Jim Kellerman)
- 28. HADOOP-1523 Hung region server waiting on write locks 
- 29. HADOOP-1560 NPE in MiniHBaseCluster on Windows
- 30. HADOOP-1531 Add RowFilter to HRegion.HScanner
-     Adds a row filtering interface and two implemenentations: A page scanner,
-     and a regex row/column-data matcher. (James Kennedy via Stack)
- 31. HADOOP-1566 Key-making utility
- 32. HADOOP-1415 Provide configurable per-column bloom filters. 
-     HADOOP-1466 Clean up visibility and javadoc issues in HBase.
- 33. HADOOP-1538 Provide capability for client specified time stamps in HBase
-     HADOOP-1466 Clean up visibility and javadoc issues in HBase.
- 34. HADOOP-1589 Exception handling in HBase is broken over client server connections
- 35. HADOOP-1375 a simple parser for hbase (Edward Yoon via Stack)
- 36. HADOOP-1600 Update license in HBase code
- 37. HADOOP-1589 Exception handling in HBase is broken over client server
- 38. HADOOP-1574 Concurrent creates of a table named 'X' all succeed
- 39. HADOOP-1581 Un-openable tablename bug
- 40. HADOOP-1607 [shell] Clear screen command (Edward Yoon via Stack)
- 41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates
- 42. HADOOP-1468 Add HBase batch update to reduce RPC overhead
- 43. HADOOP-1616 Sporadic TestTable failures
- 44. HADOOP-1615 Replacing thread notification-based queue with 
-     java.util.concurrent.BlockingQueue in HMaster, HRegionServer
- 45. HADOOP-1606 Updated implementation of RowFilterSet, RowFilterInterface
-     (Izaak Rubin via Stack)
- 46. HADOOP-1579 Add new WhileMatchRowFilter and StopRowFilter filters
-    (Izaak Rubin via Stack)
- 47. HADOOP-1637 Fix to HScanner to Support Filters, Add Filter Tests to
-     TestScanner2 (Izaak Rubin via Stack)
- 48. HADOOP-1516 HClient fails to readjust when ROOT or META redeployed on new
-     region server
- 49. HADOOP-1646 RegionServer OOME's under sustained, substantial loading by
-     10 concurrent clients
- 50. HADOOP-1468 Add HBase batch update to reduce RPC overhead (restrict batches
-     to a single row at a time)
- 51. HADOOP-1528 HClient for multiple tables (phase 1) (James Kennedy & JimK)
- 52. HADOOP-1528 HClient for multiple tables (phase 2) all HBase client side code
-     (except TestHClient and HBaseShell) have been converted to use the new client
-     side objects (HTable/HBaseAdmin/HConnection) instead of HClient.
- 53. HADOOP-1528 HClient for multiple tables - expose close table function
- 54. HADOOP-1466 Clean up warnings, visibility and javadoc issues in HBase.
- 55. HADOOP-1662 Make region splits faster
- 56. HADOOP-1678 On region split, master should designate which host should 
-     serve daughter splits. Phase 1: Master balances load for new regions and
-     when a region server fails.
- 57. HADOOP-1678 On region split, master should designate which host should 
-     serve daughter splits. Phase 2: Master assigns children of split region
-     instead of HRegionServer serving both children.
- 58. HADOOP-1710 All updates should be batch updates
- 59. HADOOP-1711 HTable API should use interfaces instead of concrete classes as
-     method parameters and return values
- 60. HADOOP-1644 Compactions should not block updates
- 60. HADOOP-1672 HBase Shell should use new client classes
-     (Edward Yoon via Stack).
- 61. HADOOP-1709 Make HRegionInterface more like that of HTable
-     HADOOP-1725 Client find of table regions should not include offlined, split parents
-

+ 0 - 10
src/contrib/hbase/NOTICE.txt

@@ -1,10 +0,0 @@
-This product includes software developed by The Apache Software
-Foundation (http://www.apache.org/).
-
-In addition, this product includes software developed by:
-
-European Commission project OneLab (http://www.one-lab.org)
-
-Udanax (http://www.udanax.org)
-
-Facebook, Inc. (http://developers.facebook.com/thrift/ -- Page includes the Thrift Software License)

+ 0 - 1
src/contrib/hbase/README.txt

@@ -1 +0,0 @@
-See http://wiki.apache.org/lucene-hadoop/Hbase

+ 0 - 245
src/contrib/hbase/bin/hbase

@@ -1,245 +0,0 @@
-#! /usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-# 
-# The hbase command script.  Based on the hadoop command script putting
-# in hbase classes, libs and configurations ahead of hadoop's.
-#
-# TODO: Narrow the amount of duplicated code.
-#
-# Environment Variables:
-#
-#   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.
-#
-#   HBASE_HEAPSIZE   The maximum amount of heap to use, in MB. 
-#                    Default is 1000.
-#
-#   HBASE_OPTS       Extra Java runtime options.
-#
-#   HBASE_CONF_DIR   Alternate conf dir. Default is ${HBASE_HOME}/conf.
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-#
-#   HADOOP_HOME      Hadoop home directory.
-#
-#   HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
-#
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-# This will set HBASE_HOME, HADOOP_HOME, etc.
-. "$bin"/hbase-config.sh
-
-cygwin=false
-case "`uname`" in
-CYGWIN*) cygwin=true;;
-esac
-
-# if no args specified, show usage
-if [ $# = 0 ]; then
-  echo "Usage: hbase [--hadoop=hadoopdir] <command>"
-  echo "where <command> is one of:"
-  echo "  shell            run the Hbase shell"
-  echo "  master           run an Hbase HMaster node" 
-  echo "  regionserver     run an Hbase HRegionServer node" 
-  echo "  rest             run an Hbase REST server" 
-  echo "  thrift           run an Hbase Thrift server" 
-  echo "  migrate          upgrade an hbase.rootdir"
-  echo " or"
-  echo "  CLASSNAME        run the class named CLASSNAME"
-  echo "Most commands print help when invoked w/o parameters."
-  exit 1
-fi
-
-# get arguments
-COMMAND=$1
-shift
-
-# Source the hadoop-env.sh.  Will have JAVA_HOME defined. There is no
-# hbase-env.sh as yet.
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m 
-
-# check envvars which might override default args
-if [ "$HBASE_HEAPSIZE" != "" ]; then
-  #echo "run with heapsize $HBASE_HEAPSIZE"
-  JAVA_HEAP_MAX="-Xmx""$HBASE_HEAPSIZE""m"
-  #echo $JAVA_HEAP_MAX
-fi
-
-# CLASSPATH initially contains $HBASE_CONF_DIR
-# Add HADOOP_CONF_DIR if its been defined.
-if [ ! "$HADOOP_CONF_DIR" = "" ]; then
-    CLASSPATH="${CLASSPATH}:${HADOOP_CONF_DIR}"
-fi
-CLASSPATH="${CLASSPATH}:${HBASE_CONF_DIR}"
-CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
-
-# for developers, add hbase and hadoop classes to CLASSPATH
-if [ -d "$HADOOP_HOME/build/contrib/hbase/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/contrib/hbase/classes
-fi
-if [ -d "$HADOOP_HOME/build/contrib/hbase/test" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/contrib/hbase/test
-fi
-if [ -d "$HADOOP_HOME/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
-fi
-if [ -d "$HADOOP_HOME/build/contrib/hbase/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/contrib/hbase/
-fi
-if [ -d "$HADOOP_HOME/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
-fi
-if [ -d "$HADOOP_HOME/build/test/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
-fi
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-# for releases, add core hbase, hadoop jar & webapps to CLASSPATH
-# Look in two places for our hbase jar.
-for f in $HBASE_HOME/hadoop-*-hbase*.jar; do
-  if [ -f $f ]; then
-    CLASSPATH=${CLASSPATH}:$f;
-  fi
-done
-for f in $HADOOP_HOME/contrib/hadoop-*-hbase*.jar; do
-  if [ -f $f ]; then
-    CLASSPATH=${CLASSPATH}:$f;
-  fi
-done
-if [ -d "$HADOOP_HOME/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME
-fi
-for f in $HADOOP_HOME/hadoop-*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add hbase and hadoop libs to CLASSPATH
-for f in $HBASE_HOME/lib/*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-for f in $HADOOP_HOME/lib/*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-
-# default log directory & file
-# TODO: Should we log to hadoop or under hbase?
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  HADOOP_LOG_DIR="$HADOOP_HOME/logs"
-fi
-if [ "$HADOOP_LOGFILE" = "" ]; then
-  HADOOP_LOGFILE='hbase.log'
-fi
-
-# cygwin path translation
-if $cygwin; then
-  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-  HADOOP_HOME=`cygpath -d "$HADOOP_HOME"`
-  HBASE_HOME=`cygpath -d "$HBASE_HOME"`
-  HADOOP_LOG_DIR=`cygpath -d "$HADOOP_LOG_DIR"`
-fi
-
-# TODO: Can this be put into separate script so don't have to duplicate
-# hadoop command script code?
-# setup 'java.library.path' for native-hadoop code if necessary
-JAVA_LIBRARY_PATH=''
-if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then
-  JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
-  
-  if [ -d "$HADOOP_HOME/build/native" ]; then
-    JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
-  fi
-  
-  if [ -d "${HADOOP_HOME}/lib/native" ]; then
-    if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
-    else
-      JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
-    fi
-  fi
-fi
-
-# cygwin path translation
-if $cygwin; then
-  JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
-fi
- 
-# restore ordinary behaviour
-unset IFS
-
-# figure out which class to run
-if [ "$COMMAND" = "shell" ] ; then
-  CLASS='org.apache.hadoop.hbase.Shell'
-elif [ "$COMMAND" = "master" ] ; then
-  CLASS='org.apache.hadoop.hbase.HMaster'
-elif [ "$COMMAND" = "regionserver" ] ; then
-  CLASS='org.apache.hadoop.hbase.HRegionServer'
-elif [ "$COMMAND" = "rest" ] ; then
-  CLASS='org.apache.hadoop.hbase.rest.Dispatcher'
-elif [ "$COMMAND" = "thrift" ] ; then
-  CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
-elif [ "$COMMAND" = "migrate" ] ; then
-  CLASS='org.apache.hadoop.hbase.util.Migrate'
-else
-  CLASS=$COMMAND
-fi
-
-# Have JVM dump heap if we run out of memory.  Files will be 'launch directory'
-# and are named like the following: java_pid21612.hprof. Apparently it doesn't
-# 'cost' to have this flag enabled. Its a 1.6 flag only. See:
-# http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better 
-HBASE_OPTS="$HBASE_OPTS -XX:+HeapDumpOnOutOfMemoryError"
-HBASE_OPTS="$HBASE_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
-HBASE_OPTS="$HBASE_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
-HBASE_OPTS="$HBASE_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
-HBASE_OPTS="$HBASE_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-HBASE_OPTS="$HBASE_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
-HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  HBASE_OPTS="$HBASE_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi  
-
-# run it
-exec "$JAVA" $JAVA_HEAP_MAX $HBASE_OPTS -classpath "$CLASSPATH" $CLASS "$@"

+ 0 - 100
src/contrib/hbase/bin/hbase-config.sh

@@ -1,100 +0,0 @@
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# included in all the hbase scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-# Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
-
-# resolve links - $0 may be a softlink
-
-this="$0"
-while [ -h "$this" ]; do
-  ls=`ls -ld "$this"`
-  link=`expr "$ls" : '.*-> \(.*\)$'`
-  if expr "$link" : '.*/.*' > /dev/null; then
-    this="$link"
-  else
-    this=`dirname "$this"`/"$link"
-  fi
-done
-
-# convert relative path to absolute path
-bin=`dirname "$this"`
-script=`basename "$this"`
-bin=`cd "$bin"; pwd`
-this="$bin/$script"
-
-# the root of the hbase installation
-export HBASE_HOME=`dirname "$this"`/..
-
-#check to see if the conf dir or hadoop home are given as an optional arguments
-while [ $# -gt 1 ]
-do
-  if [ "--config" = "$1" ]
-  then
-    shift
-    confdir=$1
-    shift
-    HADOOP_CONF_DIR=$confdir
-  elif [ "--hbaseconfig" = "$1" ]
-  then
-    shift
-    confdir=$1
-    shift
-    HBASE_CONF_DIR=$confdir
-  elif [ "--hadoop" = "$1" ]
-  then
-    shift
-    home=$1
-    shift
-    HADOOP_HOME=$home
-  elif [ "--hosts" = "$1" ]
-  then
-    shift
-    hosts=$1
-    shift
-    HBASE_REGIONSERVERS=$hosts
-  else
-    # Presume we are at end of options and break
-    break
-  fi
-done
- 
-# If no hadoop home specified, then we assume its above this directory.
-# Can be in one of two places.  If we've been packaged, then it'll be
-# two levels above us.  If we are running from src at src/contrib/hbase
-# or from the build directory at build/contrib/hbase, then its three
-# levels up.  Look for the hadoop script.
-if [ "$HADOOP_HOME" = "" ]; then
-    if [ -f "$HBASE_HOME/../../bin/hadoop" ]; then
-      HADOOP_HOME="$HBASE_HOME/../../"
-    else
-      HADOOP_HOME="$HBASE_HOME/../../../"
-    fi
-fi
-
-# Allow alternate hadoop conf dir location.
-HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
-# Allow alternate hbase conf dir location.
-HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
-# List of hbase regions servers.
-HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}"

+ 0 - 157
src/contrib/hbase/bin/hbase-daemon.sh

@@ -1,157 +0,0 @@
-#!/usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-# 
-# Runs a Hadoop hbase command as a daemon.
-#
-# Environment Variables
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-#   HBASE_CONF_DIR  Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
-#   HADOOP_LOG_DIR   Where log files are stored.  PWD by default.
-#   HADOOP_PID_DIR   The pid files are stored. /tmp by default.
-#   HADOOP_IDENT_STRING   A string representing this instance of hadoop. $USER by default
-#   HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
-#
-# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
-
-usage="Usage: hbase-daemon.sh [--config <hadoop-conf-dir>]\
- [--hbaseconfig <hbase-conf-dir>] (start|stop) <hbase-command> \
- <args...>"
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hbase-config.sh
-
-# get arguments
-startStop=$1
-shift
-
-command=$1
-shift
-
-hbase_rotate_log ()
-{
-    log=$1;
-    num=5;
-    if [ -n "$2" ]; then
-    num=$2
-    fi
-    if [ -f "$log" ]; then # rotate logs
-    while [ $num -gt 1 ]; do
-        prev=`expr $num - 1`
-        [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
-        num=$prev
-    done
-    mv "$log" "$log.$num";
-    fi
-}
-
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
-  . "${HBASE_CONF_DIR}/hbase-env.sh"
-fi
-
-# get log directory
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
-fi
-mkdir -p "$HADOOP_LOG_DIR"
-
-if [ "$HADOOP_PID_DIR" = "" ]; then
-  HADOOP_PID_DIR=/tmp
-fi
-
-if [ "$HADOOP_IDENT_STRING" = "" ]; then
-  export HADOOP_IDENT_STRING="$USER"
-fi
-
-# some variables
-export HADOOP_LOGFILE=hbase-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER="INFO,DRFA"
-log=$HADOOP_LOG_DIR/hbase-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out  
-pid=$HADOOP_PID_DIR/hbase-$HADOOP_IDENT_STRING-$command.pid
-
-# Set default scheduling priority
-if [ "$HADOOP_NICENESS" = "" ]; then
-    export HADOOP_NICENESS=0
-fi
-
-case $startStop in
-
-  (start)
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo $command running as process `cat $pid`.  Stop it first.
-        exit 1
-      fi
-    fi
-
-    hbase_rotate_log $log
-    echo starting $command, logging to $log
-    nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase \
-        --hadoop "${HADOOP_HOME}" \
-        --config "${HADOOP_CONF_DIR}" --hbaseconfig "${HBASE_CONF_DIR}" \
-        $command $startStop "$@" > "$log" 2>&1 < /dev/null &
-    echo $! > $pid
-    sleep 1; head "$log"
-    ;;
-
-  (stop)
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo -n stopping $command
-        if [ "$command" = "master" ]; then
-          nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase \
-              --hadoop "${HADOOP_HOME}" \
-              --config "${HADOOP_CONF_DIR}" --hbaseconfig "${HBASE_CONF_DIR}" \
-              $command $startStop "$@" > "$log" 2>&1 < /dev/null &
-        else
-          kill `cat $pid` > /dev/null 2>&1
-        fi
-        while kill -0 `cat $pid` > /dev/null 2>&1; do
-          echo -n "."
-          sleep 1;
-        done
-        echo
-      else
-        echo no $command to stop
-      fi
-    else
-      echo no $command to stop
-    fi
-    ;;
-
-  (*)
-    echo $usage
-    exit 1
-    ;;
-
-esac

+ 0 - 46
src/contrib/hbase/bin/hbase-daemons.sh

@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-# 
-# Run a Hadoop hbase command on all slave hosts.
-# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
-
-usage="Usage: hbase-daemons.sh [--hadoop <hadoop-home>]
- [--config <hadoop-confdir>] [--hbase <hbase-home>]\
- [--hbaseconfig <hbase-confdir>] [--hosts regionserversfile]\
- [start|stop] command args..."
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. $bin/hbase-config.sh
-
-exec "$bin/regionservers.sh" --config "${HADOOP_CONF_DIR}" \
- --hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" \
- cd "${HBASE_HOME}" \; \
- "$bin/hbase-daemon.sh" --config "${HADOOP_CONF_DIR}" \
- --hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" "$@"

+ 0 - 78
src/contrib/hbase/bin/regionservers.sh

@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-# 
-# Run a shell command on all regionserver hosts.
-#
-# Environment Variables
-#
-#   HBASE_REGIONSERVERS    File naming remote hosts.
-#     Default is ${HADOOP_CONF_DIR}/regionservers
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-#   HBASE_CONF_DIR  Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
-#   HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
-#   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
-#
-# Modelled after $HADOOP_HOME/bin/slaves.sh.
-
-usage="Usage: regionservers [--config <hadoop-confdir>]\
- [--hbaseconfig <hbase-confdir>] command..."
-
-# if no args specified, show usage
-if [ $# -le 0 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hbase-config.sh
-
-# If the regionservers file is specified in the command line,
-# then it takes precedence over the definition in 
-# hbase-env.sh. Save it here.
-HOSTLIST=$HBASE_REGIONSERVERS
-
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
-  . "${HBASE_CONF_DIR}/hbase-env.sh"
-fi
-
-if [ "$HOSTLIST" = "" ]; then
-  if [ "$HBASE_REGIONSERVERS" = "" ]; then
-    export HOSTLIST="${HBASE_CONF_DIR}/regionservers"
-  else
-    export HOSTLIST="${HBASE_REGIONSERVERS}"
-  fi
-fi
-
-for regionserver in `cat "$HOSTLIST"`; do
- ssh $HADOOP_SSH_OPTS $regionserver $"${@// /\\ }" \
-   2>&1 | sed "s/^/$regionserver: /" &
- if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
-   sleep $HADOOP_SLAVE_SLEEP
- fi
-done
-
-wait

+ 0 - 45
src/contrib/hbase/bin/start-hbase.sh

@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Modelled after $HADOOP_HOME/bin/start-hbase.sh.
-
-# Start hadoop hbase daemons.
-# Run this on master node.
-usage="Usage: start-hbase.sh"
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hbase-config.sh
-
-# start hbase daemons
-"${HADOOP_HOME}"/bin/hadoop dfsadmin -safemode wait
-errCode=$?
-if [ $errCode -ne 0 ]
-then
-  exit $errCode
-fi
-"$bin"/hbase-daemon.sh --config "${HADOOP_CONF_DIR}" \
-    --hbaseconfig "${HBASE_CONF_DIR}" start master 
-"$bin"/hbase-daemons.sh --config "${HADOOP_CONF_DIR}" \
-    --hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" \
-    --hosts "${HBASE_REGIONSERVERS}" start regionserver

+ 0 - 33
src/contrib/hbase/bin/stop-hbase.sh

@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
-
-# Stop hadoop hbase daemons.  Run this on master node.
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hbase-config.sh
-
-"$bin"/hbase-daemon.sh --config "${HADOOP_CONF_DIR}" \
-    --hbaseconfig "${HBASE_CONF_DIR}" stop master

+ 0 - 78
src/contrib/hbase/build-webapps.xml

@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!--
-This is a script to compile the jsp for the hbase webapps.  Currently, 
-generation of java classes from jsp is done manually and the produced
-java classes are then checked in.  We do it this way keeping all to do
-with jsp in a separate build file because trying to build jsp inline
-we trip over the 'famous' commons-logging classloader problem:
-
-  org.apache.commons.logging.LogConfigurationException: Invalid
-    class loader hierarchy.  You have more than one version of
-    'org.apache.commons.logging.Log' visible, which is not allowed.
-        
-See http://www.qos.ch/logging/classloader.jsp.  Its addressed
-with later versions of jasper apparently (Using later versions of
-jasper in this hbase subproject is not sufficent), so when hadoop
-goes to jetty6 (HADOOP-1650), we should be able to integrate jsp compiling into 
-general compile (See http://issues.apache.org/bugzilla/show_bug.cgi?id=36968)
-
-Meantime, if changes in jsps, just checkin the product of this script:
-the generated java classes and the web.xml.  To run, do following:
-
-    $ ant -f build-webapps.xml
--->
-<project name="build.hbase.jsp" default="jspc">
-  <property name="lib.dir" value="${basedir}/lib" />
-  <property name="hadoop.root" location="${basedir}/../../../"/>
-  <property name="src.webapps" value="${basedir}/src/webapps" />
-  <property name="generated.webapps.src"
-    value="${basedir}/src/java"/>
-   
-  <target name="jspc" >
-    <path id="jspc.classpath">
-      <fileset dir="${lib.dir}">
-        <include name="commons-el*jar" />
-      </fileset>
-      <fileset dir="${hadoop.root}/lib/jetty-ext/">
-        <include name="*jar" />
-      </fileset>
-      <fileset dir="${hadoop.root}/lib/">
-        <include name="servlet-api*jar" />
-        <include name="commons-logging*jar" />
-      </fileset>
-    </path>
-    <taskdef classname="org.apache.jasper.JspC" name="jspcompiler" >
-      <classpath refid="jspc.classpath"/>
-    </taskdef>
-    <jspcompiler
-     uriroot="${src.webapps}/master"
-     outputdir="${generated.webapps.src}"
-     package="org.apache.hadoop.hbase.generated.master"
-     webxml="${src.webapps}/master/WEB-INF/web.xml">
-    </jspcompiler>
-    <jspcompiler
-     uriroot="${src.webapps}/regionserver"
-     outputdir="${generated.webapps.src}"
-     package="org.apache.hadoop.hbase.generated.regionserver"
-     webxml="${src.webapps}/regionserver/WEB-INF/web.xml">
-    </jspcompiler>
-  </target>
-</project>

+ 0 - 176
src/contrib/hbase/build.xml

@@ -1,176 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project name="hbase" default="jar">
-  <import file="../build-contrib.xml"/>
-
-  <property name="build.webapps" value="${build.dir}/webapps"/>
-  <property name="build.lib" value="${build.dir}/lib"/>
-  <property name="build.conf" value="${build.dir}/conf"/>
-  <property name="build.bin" value="${build.dir}/bin"/>
-  <property name="src.webapps" value="${basedir}/src/webapps" />
-
-  <target name="init">
-    <antcall target="hadoopbuildcontrib.init"/>
-    <!--Version is set only if called from hadoop build.xml. Set a default-->
-    <condition property="version" value="0.15.0-dev">
-      <not>
-        <isset property="version" />
-      </not>
-    </condition>
-    <!--Copy webapps over to build dir. Exclude jsp and generated-src java
-      classes -->
-    <mkdir dir="${build.webapps}"/>
-    <copy todir="${build.webapps}">
-      <fileset dir="${src.webapps}">
-        <exclude name="**/*.jsp" />
-        <exclude name="**/.*" />
-        <exclude name="**/*~" />
-      </fileset>
-    </copy>
-    <!--Copy bin, lib, and conf. too-->
-    <mkdir dir="${build.lib}"/>
-    <copy todir="${build.lib}">
-      <fileset dir="${basedir}/lib" />
-    </copy>
-    <mkdir dir="${build.conf}"/>
-    <copy todir="${build.conf}">
-      <fileset dir="${basedir}/conf" />
-    </copy>
-    <mkdir dir="${build.bin}"/>
-    <copy todir="${build.bin}">
-      <fileset dir="${basedir}/bin" />
-    </copy>
-    <chmod perm="ugo+x" type="file">
-      <fileset dir="${build.bin}" />
-    </chmod>
-  </target>
-
-  <target name="javacc" if="javacc.home">
-  <echo message="javacc.home: ${javacc.home}"/>
-  <property name="hql.src.dir" 
-       value="${src.dir}/org/apache/hadoop/hbase/hql" /> 
-     <mkdir dir="${hql.src.dir}/generated" />
-     <javacc
-       target="${hql.src.dir}/HQLParser.jj"
-       outputdirectory="${hql.src.dir}/generated"
-       javacchome="${javacc.home}"
-     />
-  </target>
-
-  <target name="compile" depends="init,javacc">
-   <echo message="contrib: ${name}"/>
-   <!--Compile whats under src and generated java classes made from jsp-->
-   <javac
-    encoding="${build.encoding}"
-    srcdir="${src.dir}"
-    includes="**/*.java"
-    destdir="${build.classes}"
-    debug="${javac.debug}"
-    deprecation="${javac.deprecation}">
-     <classpath refid="classpath"/>
-     <classpath path="path"/>
-   </javac>
-  </target>
-	
-  <!-- Override jar target to specify main class -->
-  <target name="jar" depends="compile">
-    <jar jarfile="${build.dir}/hadoop-${version}-${name}.jar"
-        basedir="${build.classes}" >
-      <fileset file="${root}/conf/hbase-default.xml"/>
-      <zipfileset dir="${build.webapps}" prefix="webapps"/>
-    </jar>
-  </target>
-
-  <!--Manage our own packaging... install our dependencies,
-  bin, etc.-->
-  <target name="package" depends="jar" unless="skip.contrib"> 
-    <condition property="dist.dir" value="distribution">
-      <not>
-        <isset property="dist.dir" />
-      </not>
-    </condition>
-    <property name="hbase.dist.dir" value="${dist.dir}/contrib/${name}"/>
-    <mkdir dir="${hbase.dist.dir}"/>
-    <copy todir="${hbase.dist.dir}" includeEmptyDirs="false" flatten="true">
-      <fileset dir="${build.dir}">
-        <include name="hadoop-${version}-${name}.jar" />
-      </fileset>
-    </copy>
-    <mkdir dir="${hbase.dist.dir}/webapps"/>
-    <copy todir="${hbase.dist.dir}/webapps">
-      <fileset dir="${build.webapps}" />
-    </copy>
-    <mkdir dir="${hbase.dist.dir}/lib"/>
-    <copy todir="${hbase.dist.dir}/lib">
-      <fileset dir="${build.lib}" />
-    </copy>
-    <mkdir dir="${hbase.dist.dir}/conf" />
-    <copy todir="${hbase.dist.dir}/conf">
-      <fileset dir="${build.conf}" />
-    </copy>
-    <mkdir dir="${hbase.dist.dir}/bin" />
-    <copy todir="${hbase.dist.dir}/bin">
-      <fileset dir="${build.bin}" />
-    </copy>
-    <chmod perm="ugo+x" type="file">
-      <fileset dir="${hbase.dist.dir}/bin" />
-    </chmod>
-  </target>
-
-
-  <!-- Override compile-test  target so can generate a hbase 
-       test jar that has test and hbase classes. 
-   --> 
-  <target name="compile-test" depends="compile" if="test.available"> 
-    <echo message="contrib: ${name}"/> 
-    <javac 
-     encoding="${build.encoding}" 
-     srcdir="${src.test}" 
-     includes="**/*.java" 
-     destdir="${build.test}" 
-     debug="${javac.debug}"> 
-      <classpath refid="test.classpath"/> 
-    </javac>
-    <jar jarfile="${build.dir}/hadoop-${version}-${name}-test.jar" >
-      <fileset dir="${build.test}" includes="org/**" />
-      <fileset dir="${build.classes}" />
-      <fileset dir="${src.test}" includes="**/*.properties" />
-      <manifest>
-        <attribute name="Main-Class"
-          value="org/apache/hadoop/hbase/PerformanceEvaluation"/>
-      </manifest>
-    </jar>
-  </target>
-
-  <!-- the unit test classpath
-    Copied from ../build-contrib.xml so can add to it.
-   -->
-  <path id="test.classpath">
-    <pathelement location="${build.test}" />
-    <pathelement location="${src.test}"/>
-    <pathelement location="${hadoop.root}/build/test/classes"/>
-    <pathelement location="${hadoop.root}/src/contrib/test"/>
-    <pathelement location="${conf.dir}"/>
-    <pathelement location="${hadoop.root}/build"/>
-    <pathelement location="${root}/conf"/>
-    <pathelement location="${build.dir}"/>
-    <path refid="classpath"/>
-  </path>
-</project>

+ 0 - 239
src/contrib/hbase/conf/hbase-default.xml

@@ -1,239 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.master</name>
-    <value>local</value>
-    <description>The host and port that the HBase master runs at.
-    A value of 'local' runs the master and a regionserver in
-    a single process.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The address for the hbase master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver</name>
-    <value>0.0.0.0:60020</value>
-    <description>The host and port a HBase region server runs at.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The address for the hbase regionserver web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.class</name>
-    <value>org.apache.hadoop.hbase.HRegionInterface</value>
-    <description>An interface that is assignable to HRegionInterface.  Used in HClient for
-    opening proxy to remote region server.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>${hadoop.tmp.dir}/hbase</value>
-    <description>The directory shared by region servers.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>10000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.client.retries.number</name>
-    <value>5</value>
-    <description>Maximum retries.  Used as maximum for all retryable
-    operations such as fetching of the root region from root region
-    server, getting a cell's value, starting a row update, etc.
-    Default: 5.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>60000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>30000</value>
-    <description>HMaster server lease period in milliseconds. Default is
-    30 seconds.  Region servers must report in within this period else
-    they are considered dead.  On loaded cluster, may need to up this
-    period.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.lease.period</name>
-    <value>30000</value>
-    <description>HRegion server lease period in milliseconds. Default is
-    30 seconds. Clients must report in within this period else they are
-    considered dead.</description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>10000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>10</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>3000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.maxlogentries</name>
-    <value>30000</value>
-    <description>Rotate the HRegion HLogs when count of entries exceeds this
-    value.  Default: 30,000.  Value is checked by a thread that runs every
-    hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>1800000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush (An optional cache flush is a
-    flush even though memcache is not at the memcache.flush.size).
-    Default: 30 minutes (in miliseconds)
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memcache.flush.size</name>
-    <value>67108864</value>
-    <description>
-    A HRegion memcache will be flushed to disk if size of the memcache
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.  
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memcache.block.multiplier</name>
-    <value>1</value>
-    <description>
-    Block updates if memcache has hbase.hregion.block.memcache
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memcache during spikes in update traffic.  Without an
-    upper-bound, memcache fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>268435456</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 256M.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memcache) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    During a compaction, updates cannot be flushed to disk.  Long
-    compactions require memory sufficient to carry the logging of
-    all updates across the duration of the compaction.
-    
-    If too large, clients timeout during compaction.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
-    <value>20000</value>
-    <description>How often a region server runs the split/compaction check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.io.index.interval</name>
-    <value>32</value>
-    <description>The interval at which we record offsets in hbase
-    store files/mapfiles.  Default for stock mapfiles is 128.  Index
-    files are read into memory.  If there are many of them, could prove
-    a burden.  If so play with the hadoop io.map.index.skip property and
-    skip every nth index member when reading back the index into memory.
-    </description>
-  </property>
-  <property>
-    <name>hbase.io.seqfile.compression.type</name>
-    <value>NONE</value>
-    <description>The compression type for hbase sequencefile.Writers
-    such as hlog.
-    </description>
-  </property>
-
-  <!-- HbaseShell Configurations -->
-  <property>
- 	<name>hbaseshell.jline.bell.enabled</name>
-  	<value>true</value>
-  	<description>
-  		if true, enable audible keyboard bells if an alert is required.
-  	</description>
-  </property>  
-  <property>
- 	<name>hbaseshell.formatter</name>
-  	<value>org.apache.hadoop.hbase.hql.formatter.AsciiTableFormatter</value>
-  	<description>TableFormatter to use outputting HQL result sets.
-  	</description>
-  </property>  
-</configuration>

+ 0 - 34
src/contrib/hbase/conf/hbase-env.sh

@@ -1,34 +0,0 @@
-#
-#/**
-# * Copyright 2007 The Apache Software Foundation
-# *
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set HBase-specific environment variables here.
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HBASE_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.  Empty by default.
-# export HBASE_OPTS=-server
-
-# File naming hosts on which HRegionServers will run.  $HBASE_HOME/conf/regionservers by default.
-# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers

+ 0 - 25
src/contrib/hbase/conf/hbase-site.xml

@@ -1,25 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-</configuration>

+ 0 - 1
src/contrib/hbase/conf/regionservers

@@ -1 +0,0 @@
-localhost

BIN
src/contrib/hbase/lib/commons-math-1.1.jar


BIN
src/contrib/hbase/lib/jline-0.9.91.jar


BIN
src/contrib/hbase/lib/libthrift-r746.jar


BIN
src/contrib/hbase/lib/lucene-core-2.2.0.jar


+ 0 - 238
src/contrib/hbase/src/examples/thrift/DemoClient.cpp

@@ -1,238 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/time.h>
-#include <poll.h>
-
-#include <iostream>
-
-#include <protocol/TBinaryProtocol.h>
-#include <transport/TSocket.h>
-#include <transport/TTransportUtils.h>
-
-#include "Hbase.h"
-
-using namespace facebook::thrift;
-using namespace facebook::thrift::protocol;
-using namespace facebook::thrift::transport;
-
-using namespace apache::hadoop::hbase::thrift;
-
-typedef std::vector<std::string> StrVec;
-typedef std::map<std::string,std::string> StrMap;
-typedef std::vector<ColumnDescriptor> ColVec;
-typedef std::map<std::string,ColumnDescriptor> ColMap;
-
-
-static void
-printRow(const std::string &row, const StrMap &columns)
-{
-  std::cout << "row: " << row << ", cols: ";
-  for (StrMap::const_iterator it = columns.begin(); it != columns.end(); ++it) {
-    std::cout << it->first << " => " << it->second << "; ";
-  }
-  std::cout << std::endl;
-}
-
-static void 
-printEntry(const ScanEntry &entry)
-{
-  printRow(entry.row, entry.columns);
-}
-
-static void
-printVersions(const std::string &row, const StrVec &versions)
-{
-  std::cout << "row: " << row << ", values: ";
-  for (StrVec::const_iterator it = versions.begin(); it != versions.end(); ++it) {
-    std::cout << *it << "; ";
-  }
-  std::cout << std::endl;
-}
-
-int 
-main(int argc, char** argv) 
-{
-  boost::shared_ptr<TTransport> socket(new TSocket("localhost", 9090));
-  boost::shared_ptr<TTransport> transport(new TBufferedTransport(socket));
-  boost::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
-  HbaseClient client(protocol);
-
-  try {
-    transport->open();
-
-    std::string t("demo_table");
-
-    //
-    // Scan all tables, look for the demo table and delete it.
-    //
-    std::cout << "scanning tables..." << std::endl;
-    StrVec tables;
-    client.getTableNames(tables);
-    for (StrVec::const_iterator it = tables.begin(); it != tables.end(); ++it) {
-      std::cout << "  found: " << *it << std::endl;
-      if (t == *it) {
-        std::cout << "    deleting table: " << *it << std::endl;
-        client.deleteTable(*it);
-      }
-    }
-
-    //
-    // Create the demo table with two column families, entry: and unused:
-    //
-    ColVec columns;
-    columns.push_back(ColumnDescriptor());
-    columns.back().name = "entry:";
-    columns.back().maxVersions = 10;
-    columns.push_back(ColumnDescriptor());
-    columns.back().name = "unused:";
-
-    std::cout << "creating table: " << t << std::endl;
-    try {
-      client.createTable(t, columns);
-    } catch (AlreadyExists &ae) {
-      std::cout << "WARN: " << ae.message << std::endl;
-    }
-
-    ColMap columnMap;
-    client.getColumnDescriptors(columnMap, t);
-    std::cout << "column families in " << t << ": " << std::endl;
-    for (ColMap::const_iterator it = columnMap.begin(); it != columnMap.end(); ++it) {
-      std::cout << "  column: " << it->second.name << ", maxVer: " << it->second.maxVersions << std::endl;
-    }
-
-    //
-    // Test UTF-8 handling
-    //
-    std::string invalid("foo-\xfc\xa1\xa1\xa1\xa1\xa1");
-    std::string valid("foo-\xE7\x94\x9F\xE3\x83\x93\xE3\x83\xBC\xE3\x83\xAB");
-
-    // non-utf8 is fine for data
-    client.put(t, "foo", "entry:foo", invalid);
-
-    // try empty strings
-    client.put(t, "", "entry:", "");
-
-    // this row name is valid utf8
-    client.put(t, valid, "entry:foo", valid);
-
-    // non-utf8 is not allowed in row names
-    try {
-      client.put(t, invalid, "entry:foo", invalid);
-      std::cout << "FATAL: shouldn't get here!" << std::endl;
-      exit(-1);
-    } catch (IOError e) {
-      std::cout << "expected error: " << e.message << std::endl;
-    }
-
-    // Run a scanner on the rows we just created
-    StrVec columnNames;
-    columnNames.push_back("entry:");
-
-    std::cout << "Starting scanner..." << std::endl;
-    int scanner = client.scannerOpen(t, "", columnNames);
-    try {
-      while (true) {
-        ScanEntry value;
-        client.scannerGet(value, scanner);
-        printEntry(value);
-      }
-    } catch (NotFound &nf) {
-      client.scannerClose(scanner);
-      std::cout << "Scanner finished" << std::endl;
-    }
-
-    //
-    // Run some operations on a bunch of rows.
-    //
-    for (int i = 100; i >= 0; --i) {
-      // format row keys as "00000" to "00100"
-      char buf[32];
-      sprintf(buf, "%0.5d", i);
-      std::string row(buf);
-      
-      StrMap values;
-
-      client.put(t, row, "unused:", "DELETE_ME");
-      client.getRow(values, t, row);
-      printRow(row, values);
-      client.deleteAllRow(t, row);
-
-      client.put(t, row, "entry:num", "0");
-      client.put(t, row, "entry:foo", "FOO");
-      client.getRow(values, t, row);
-      printRow(row, values);
-
-      // sleep to force later timestamp 
-      poll(0, 0, 50);
-
-      std::vector<Mutation> mutations;
-      mutations.push_back(Mutation());
-      mutations.back().column = "entry:foo";
-      mutations.back().isDelete = true;
-      mutations.push_back(Mutation());
-      mutations.back().column = "entry:num";
-      mutations.back().value = "-1";
-      client.mutateRow(t, row, mutations);
-      client.getRow(values, t, row);
-      printRow(row, values);
-      
-      client.put(t, row, "entry:num", boost::lexical_cast<std::string>(i));
-      client.put(t, row, "entry:sqr", boost::lexical_cast<std::string>(i*i));
-      client.getRow(values, t, row);
-      printRow(row, values);
-
-      mutations.clear();
-      mutations.push_back(Mutation());
-      mutations.back().column = "entry:num";
-      mutations.back().value = "-999";
-      mutations.push_back(Mutation());
-      mutations.back().column = "entry:sqr";
-      mutations.back().isDelete = true;
-      client.mutateRowTs(t, row, mutations, 1); // shouldn't override latest
-      client.getRow(values, t, row);
-      printRow(row, values);
-
-      StrVec versions;
-      client.getVer(versions, t, row, "entry:num", 10);
-      printVersions(row, versions);
-      assert(versions.size() == 4);
-      std::cout << std::endl;
-
-      try {
-        std::string value;
-        client.get(value, t, row, "entry:foo");
-        std::cout << "FATAL: shouldn't get here!" << std::endl;
-        exit(-1);
-      } catch (NotFound &nf) {
-        // blank
-      }
-    }
-
-    // scan all rows/columns
-
-    columnNames.clear();
-    client.getColumnDescriptors(columnMap, t);
-    for (ColMap::const_iterator it = columnMap.begin(); it != columnMap.end(); ++it) {
-      columnNames.push_back(it->first);
-    }
-
-    std::cout << "Starting scanner..." << std::endl;
-    scanner = client.scannerOpenWithStop(t, "00020", "00040", columnNames);
-    try {
-      while (true) {
-        ScanEntry value;
-        client.scannerGet(value, scanner);
-        printEntry(value);
-      }
-    } catch (NotFound &nf) {
-      client.scannerClose(scanner);
-      std::cout << "Scanner finished" << std::endl;
-    }
-
-    transport->close();
-  } 
-  catch (TException &tx) {
-    printf("ERROR: %s\n", tx.what());
-  }
-
-}

+ 0 - 276
src/contrib/hbase/src/examples/thrift/DemoClient.java

@@ -1,276 +0,0 @@
-package org.apache.hadoop.hbase.thrift;
-
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
-import java.nio.charset.CharsetDecoder;
-import java.text.NumberFormat;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.TreeMap;
-import java.util.SortedMap;
-
-import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
-import org.apache.hadoop.hbase.thrift.generated.Hbase;
-import org.apache.hadoop.hbase.thrift.generated.IOError;
-import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
-import org.apache.hadoop.hbase.thrift.generated.Mutation;
-import org.apache.hadoop.hbase.thrift.generated.NotFound;
-import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
-import com.facebook.thrift.TException;
-import com.facebook.thrift.protocol.TBinaryProtocol;
-import com.facebook.thrift.protocol.TProtocol;
-import com.facebook.thrift.transport.TSocket;
-import com.facebook.thrift.transport.TTransport;
-
-public class DemoClient {
-  
-  protected int port = 9090;
-  CharsetDecoder decoder = null;
-
-  public static void main(String[] args) 
-    throws IOError, TException, NotFound, UnsupportedEncodingException, IllegalArgument, AlreadyExists 
-  {
-    DemoClient client = new DemoClient();
-    client.run();
-  }
-
-  DemoClient() {
-    decoder = Charset.forName("UTF-8").newDecoder();
-  }
-  
-  // Helper to translate byte[]'s to UTF8 strings
-  private String utf8(byte[] buf) {
-    try {
-      return decoder.decode(ByteBuffer.wrap(buf)).toString();
-    } catch (CharacterCodingException e) {
-      return "[INVALID UTF-8]";
-    }
-  }
-  
-  // Helper to translate strings to UTF8 bytes
-  private byte[] bytes(String s) {
-    try {
-      return s.getBytes("UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      e.printStackTrace();
-      return null;
-    }
-  }
-  
-  private void run() throws IOError, TException, NotFound, IllegalArgument,
-      AlreadyExists {
-    TTransport transport = new TSocket("localhost", port);
-    TProtocol protocol = new TBinaryProtocol(transport, true, true);
-    Hbase.Client client = new Hbase.Client(protocol);
-
-    transport.open();
-
-    byte[] t = bytes("demo_table");
-    
-    //
-    // Scan all tables, look for the demo table and delete it.
-    //
-    System.out.println("scanning tables...");
-    for (byte[] name : client.getTableNames()) {
-      System.out.println("  found: " + utf8(name));
-      if (utf8(name).equals(utf8(t))) {
-        System.out.println("    deleting table: " + utf8(name));  
-        client.deleteTable(name);
-      }
-    }
-    
-    //
-    // Create the demo table with two column families, entry: and unused:
-    //
-    ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
-    ColumnDescriptor col = null;
-    col = new ColumnDescriptor();
-    col.name = bytes("entry:");
-    col.maxVersions = 10;
-    columns.add(col);
-    col = new ColumnDescriptor();
-    col.name = bytes("unused:");
-    columns.add(col);
-
-    System.out.println("creating table: " + utf8(t));
-    try {
-      client.createTable(t, columns);
-    } catch (AlreadyExists ae) {
-      System.out.println("WARN: " + ae.message);
-    }
-    
-    System.out.println("column families in " + utf8(t) + ": ");
-    AbstractMap<byte[], ColumnDescriptor> columnMap = client.getColumnDescriptors(t);
-    for (ColumnDescriptor col2 : columnMap.values()) {
-      System.out.println("  column: " + utf8(col2.name) + ", maxVer: " + Integer.toString(col2.maxVersions));
-    }
-    
-    //
-    // Test UTF-8 handling
-    //
-    byte[] invalid = { (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xfc, (byte) 0xa1, (byte) 0xa1, (byte) 0xa1, (byte) 0xa1 };
-    byte[] valid = { (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xE7, (byte) 0x94, (byte) 0x9F, (byte) 0xE3, (byte) 0x83, (byte) 0x93, (byte) 0xE3, (byte) 0x83, (byte) 0xBC, (byte) 0xE3, (byte) 0x83, (byte) 0xAB};
-
-    // non-utf8 is fine for data
-    client.put(t, bytes("foo"), bytes("entry:foo"), invalid);
-
-    // try empty strings
-    client.put(t, bytes(""), bytes("entry:"), bytes(""));
-    
-    // this row name is valid utf8
-    client.put(t, valid, bytes("entry:foo"), valid);
-    
-    // non-utf8 is not allowed in row names
-    try {
-      client.put(t, invalid, bytes("entry:foo"), invalid);
-      System.out.println("FATAL: shouldn't get here");
-      System.exit(-1);
-    } catch (IOError e) {
-      System.out.println("expected error: " + e.message);
-    }
-    
-    // Run a scanner on the rows we just created
-    ArrayList<byte[]> columnNames = new ArrayList<byte[]>();
-    columnNames.add(bytes("entry:"));
-    
-    System.out.println("Starting scanner...");
-    int scanner = client
-        .scannerOpen(t, bytes(""), columnNames);
-    try {
-      while (true) {
-        ScanEntry value = client.scannerGet(scanner);
-        printEntry(value);
-      }
-    } catch (NotFound nf) {
-      client.scannerClose(scanner);
-      System.out.println("Scanner finished");
-    }
-    
-    //
-    // Run some operations on a bunch of rows
-    //
-    for (int i = 100; i >= 0; --i) {
-      // format row keys as "00000" to "00100"
-      NumberFormat nf = NumberFormat.getInstance();
-      nf.setMinimumIntegerDigits(5);
-      nf.setGroupingUsed(false);
-      byte[] row = bytes(nf.format(i));
-      
-      client.put(t, row, bytes("unused:"), bytes("DELETE_ME"));
-      printRow(row, client.getRow(t, row));
-      client.deleteAllRow(t, row);
-
-      client.put(t, row, bytes("entry:num"), bytes("0"));
-      client.put(t, row, bytes("entry:foo"), bytes("FOO"));
-      printRow(row, client.getRow(t, row));
-
-      Mutation m = null;      
-      ArrayList<Mutation> mutations = new ArrayList<Mutation>();
-      m = new Mutation();
-      m.column = bytes("entry:foo");
-      m.isDelete = true;
-      mutations.add(m);
-      m = new Mutation();
-      m.column = bytes("entry:num");
-      m.value = bytes("-1");
-      mutations.add(m);
-      client.mutateRow(t, row, mutations);
-      printRow(row, client.getRow(t, row));
-      
-      client.put(t, row, bytes("entry:num"), bytes(Integer.toString(i)));
-      client.put(t, row, bytes("entry:sqr"), bytes(Integer.toString(i * i)));
-      printRow(row, client.getRow(t, row));
-
-      // sleep to force later timestamp 
-      try {
-        Thread.sleep(50);
-      } catch (InterruptedException e) {
-        // no-op
-      }
-      
-      mutations.clear();
-      m = new Mutation();
-      m.column = bytes("entry:num");
-      m.value = bytes("-999");
-      mutations.add(m);
-      m = new Mutation();
-      m.column = bytes("entry:sqr");
-      m.isDelete = true;
-      client.mutateRowTs(t, row, mutations, 1); // shouldn't override latest
-      printRow(row, client.getRow(t, row));
-
-      ArrayList<byte[]> versions = client.getVer(t, row, bytes("entry:num"), 10);
-      printVersions(row, versions);
-      if (versions.size() != 4) {
-        System.out.println("FATAL: wrong # of versions");
-        System.exit(-1);
-      }
-      
-      try {
-        client.get(t, row, bytes("entry:foo"));
-        System.out.println("FATAL: shouldn't get here");
-        System.exit(-1);
-      } catch (NotFound nf2) {
-        // blank
-      }
-
-      System.out.println("");
-    }
-    
-    // scan all rows/columnNames
-    
-    columnNames.clear();
-    for (ColumnDescriptor col2 : client.getColumnDescriptors(t).values()) {
-      columnNames.add(col2.name);
-    }
-    
-    System.out.println("Starting scanner...");
-    scanner = client.scannerOpenWithStop(t, bytes("00020"), bytes("00040"),
-        columnNames);
-    try {
-      while (true) {
-        ScanEntry value = client.scannerGet(scanner);
-        printEntry(value);
-      }
-    } catch (NotFound nf) {
-      client.scannerClose(scanner);
-      System.out.println("Scanner finished");
-    }
-    
-    transport.close();
-  }
-  
-  private final void printVersions(byte[] row, ArrayList<byte[]> values) {
-    StringBuilder rowStr = new StringBuilder();
-    for (byte[] value : values) {
-      rowStr.append(utf8(value));
-      rowStr.append("; ");
-    }
-    System.out.println("row: " + utf8(row) + ", values: " + rowStr);
-  }
-  
-  private final void printEntry(ScanEntry entry) {
-    printRow(entry.row, entry.columns);
-  }
-  
-  private final void printRow(byte[] row, AbstractMap<byte[], byte[]> values) {
-    // copy values into a TreeMap to get them in sorted order
-    
-    TreeMap<String,byte[]> sorted = new TreeMap<String,byte[]>();
-    for (AbstractMap.Entry<byte[], byte[]> entry : values.entrySet()) {
-      sorted.put(utf8(entry.getKey()), entry.getValue());
-    }
-    
-    StringBuilder rowStr = new StringBuilder();
-    for (SortedMap.Entry<String, byte[]> entry : sorted.entrySet()) {
-      rowStr.append(entry.getKey());
-      rowStr.append(" => ");
-      rowStr.append(utf8(entry.getValue()));
-      rowStr.append("; ");
-    }
-    System.out.println("row: " + utf8(row) + ", cols: " + rowStr);
-  }
-}

+ 0 - 178
src/contrib/hbase/src/examples/thrift/DemoClient.rb

@@ -1,178 +0,0 @@
-#!/usr/bin/ruby
-
-$:.push('~/thrift/trunk/lib/rb/lib')
-$:.push('./gen-rb')
-
-require 'thrift/transport/tsocket'
-require 'thrift/protocol/tbinaryprotocol'
-
-require 'Hbase'
-
-def printRow(row, values)
-  print "row: #{row}, cols: "
-  values.sort.each do |k,v|
-    print "#{k} => #{v}; "
-  end
-  puts ""
-end
-
-def printEntry(entry)
-  printRow(entry.row, entry.columns)
-end
-
-transport = TBufferedTransport.new(TSocket.new("localhost", 9090))
-protocol = TBinaryProtocol.new(transport)
-client = Apache::Hadoop::Hbase::Thrift::Hbase::Client.new(protocol)
-
-transport.open()
-
-t = "demo_table"
-
-#
-# Scan all tables, look for the demo table and delete it.
-#
-puts "scanning tables..."
-client.getTableNames().sort.each do |name|
-  puts "  found: #{name}"
-  if (name == t) 
-    puts "    deleting table: #{name}" 
-    client.deleteTable(name)
-  end
-end
-
-#
-# Create the demo table with two column families, entry: and unused:
-#
-columns = []
-col = Apache::Hadoop::Hbase::Thrift::ColumnDescriptor.new
-col.name = "entry:"
-col.maxVersions = 10
-columns << col;
-col = Apache::Hadoop::Hbase::Thrift::ColumnDescriptor.new
-col.name = "unused:"
-columns << col;
-
-puts "creating table: #{t}"
-begin
-  client.createTable(t, columns)
-rescue Apache::Hadoop::Hbase::Thrift::AlreadyExists => ae
-  puts "WARN: #{ae.message}"
-end
-
-puts "column families in #{t}: "
-client.getColumnDescriptors(t).sort.each do |key, col|
-  puts "  column: #{col.name}, maxVer: #{col.maxVersions}"
-end
-
-#
-# Test UTF-8 handling
-#
-invalid = "foo-\xfc\xa1\xa1\xa1\xa1\xa1"
-valid = "foo-\xE7\x94\x9F\xE3\x83\x93\xE3\x83\xBC\xE3\x83\xAB";
-
-# non-utf8 is fine for data
-client.put(t, "foo", "entry:foo", invalid)
-
-# try empty strings
-client.put(t, "", "entry:", "");
-
-# this row name is valid utf8
-client.put(t, valid, "entry:foo", valid)
-
-# non-utf8 is not allowed in row names
-begin
-  client.put(t, invalid, "entry:foo", invalid)
-  raise "shouldn't get here!"
-rescue Apache::Hadoop::Hbase::Thrift::IOError => e
-  puts "expected error: #{e.message}"
-end
-
-# Run a scanner on the rows we just created
-puts "Starting scanner..."
-scanner = client.scannerOpen(t, "", ["entry:"])
-begin
-  while (true) 
-    printEntry(client.scannerGet(scanner))
-  end
-rescue Apache::Hadoop::Hbase::Thrift::NotFound => nf
-  client.scannerClose(scanner)
-  puts "Scanner finished"
-end
-
-#
-# Run some operations on a bunch of rows.
-#
-(0..100).to_a.reverse.each do |e|
-  # format row keys as "00000" to "00100"
-  row = format("%0.5d", e)
-
-  client.put(t, row, "unused:", "DELETE_ME");
-  printRow(row, client.getRow(t, row));
-  client.deleteAllRow(t, row)
-
-  client.put(t, row, "entry:num", "0")
-  client.put(t, row, "entry:foo", "FOO")
-  printRow(row, client.getRow(t, row));
-
-  mutations = []
-  m = Apache::Hadoop::Hbase::Thrift::Mutation.new
-  m.column = "entry:foo"
-  m.isDelete = 1
-  mutations << m
-  m = Apache::Hadoop::Hbase::Thrift::Mutation.new
-  m.column = "entry:num"
-  m.value = "-1"
-  mutations << m
-  client.mutateRow(t, row, mutations)
-  printRow(row, client.getRow(t, row));
-
-  client.put(t, row, "entry:num", e.to_s)
-  client.put(t, row, "entry:sqr", (e*e).to_s)
-  printRow(row, client.getRow(t, row));
-  
-  mutations = []
-  m = Apache::Hadoop::Hbase::Thrift::Mutation.new
-  m.column = "entry:num"
-  m.value = "-999"
-  mutations << m
-  m = Apache::Hadoop::Hbase::Thrift::Mutation.new
-  m.column = "entry:sqr"
-  m.isDelete = 1
-  mutations << m
-  client.mutateRowTs(t, row, mutations, 1) # shouldn't override latest
-  printRow(row, client.getRow(t, row));
-
-  versions = client.getVer(t, row, "entry:num", 10)
-  print "row: #{row}, values: "
-  versions.each do |v|
-    print "#{v}; "
-  end
-  puts ""    
-  
-  begin
-    client.get(t, row, "entry:foo")
-    raise "shouldn't get here!"
-  rescue Apache::Hadoop::Hbase::Thrift::NotFound => nf
-    # blank
-  end
-
-  puts ""
-end 
-
-columns = []
-client.getColumnDescriptors(t).each do |col, desc|
-  columns << col
-end
-
-puts "Starting scanner..."
-scanner = client.scannerOpenWithStop(t, "00020", "00040", columns)
-begin
-  while (true) 
-    printEntry(client.scannerGet(scanner))
-  end
-rescue Apache::Hadoop::Hbase::Thrift::NotFound => nf
-  client.scannerClose(scanner)
-  puts "Scanner finished"
-end
-  
-transport.close()

+ 0 - 18
src/contrib/hbase/src/examples/thrift/Makefile

@@ -1,18 +0,0 @@
-# Makefile for C++ Hbase Thrift DemoClient
-#
-# NOTE: run 'thrift -cpp Hbase.thrift' first
-
-THRIFT_DIR = /usr/local/include/thrift
-LIB_DIR = /usr/local/lib
-
-GEN_SRC = ./gen-cpp/Hbase.cpp \
-	  ./gen-cpp/Hbase_types.cpp \
-	  ./gen-cpp/Hbase_constants.cpp
-
-default: DemoClient
-
-DemoClient: DemoClient.cpp
-	g++ -o DemoClient -I${THRIFT_DIR}  -I./gen-cpp -L${LIB_DIR} -lthrift DemoClient.cpp ${GEN_SRC}
-
-clean:
-	rm -rf DemoClient

+ 0 - 15
src/contrib/hbase/src/examples/thrift/README.txt

@@ -1,15 +0,0 @@
-Hbase Thrift Client Examples
-============================
-
-Included in this directory are sample clients of the HBase ThriftServer.  They
-all perform the same actions but are implemented in C++, Java, and Ruby
-respectively.
-
-To run/compile this clients, you will first need to install the thrift package
-(from http://developers.facebook.com/thrift/) and then run thrift to generate
-the language files:
-
-thrift -cpp -java -rb \
-    ../../../src/java/org/apache/hadoop/hbase/thrift/Hbase.thrift
-
-

+ 0 - 215
src/contrib/hbase/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java

@@ -1,215 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.WritableComparable;
-
-/** 
- * Supplied as a parameter to HColumnDescriptor to specify what kind of
- * bloom filter to use for a column, and its configuration parameters.
- * 
- * There is no way to automatically determine the vector size and the number of
- * hash functions to use. In particular, bloom filters are very sensitive to the
- * number of elements inserted into them. For HBase, the number of entries
- * depends on the size of the data stored in the column. Currently the default
- * region size is 64MB, so the number of entries is approximately 
- * 64MB / (average value size for column).
- * 
- * If m denotes the number of bits in the Bloom filter (vectorSize),
- * n denotes the number of elements inserted into the Bloom filter and
- * k represents the number of hash functions used (nbHash), then according to
- * Broder and Mitzenmacher,
- * 
- * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey.pdf )
- * 
- * the probability of false positives is minimized when k is approximately
- * m/n ln(2).
- * 
- */
-public class BloomFilterDescriptor implements WritableComparable {
-  private static final double DEFAULT_NUMBER_OF_HASH_FUNCTIONS = 4.0;
-  
-  /*
-   * Specify the kind of bloom filter that will be instantiated
-   */
-
-  /** The type of bloom filter */
-  public static enum BloomFilterType {
-    /** <i>Bloom filter</i>, as defined by Bloom in 1970. */
-    BLOOMFILTER,
-    /**
-     * <i>Counting Bloom filter</i>, as defined by Fan et al. in a ToN 2000 paper.
-     */
-    COUNTING_BLOOMFILTER,
-    /**
-     * <i>Retouched Bloom filter</i>, as defined in the CoNEXT 2006 paper.
-     */
-    RETOUCHED_BLOOMFILTER
-  }
-
-  /** Default constructor - used in conjunction with Writable */
-  public BloomFilterDescriptor() {
-    super();
-  }
-  
-  /**
-   * Creates a BloomFilterDescriptor for the specified type of filter, fixes
-   * the number of hash functions to 4 and computes a vector size using:
-   * 
-   * vectorSize = ceil((4 * n) / ln(2))
-   * 
-   * @param type
-   * @param numberOfEntries
-   */
-  public BloomFilterDescriptor(final BloomFilterType type,
-      final int numberOfEntries) {
-    
-    switch(type) {
-    case BLOOMFILTER:
-    case COUNTING_BLOOMFILTER:
-    case RETOUCHED_BLOOMFILTER:
-      this.filterType = type;
-      break;
-
-    default:
-      throw new IllegalArgumentException("Invalid bloom filter type: " + type);
-    }
-    this.nbHash = (int) DEFAULT_NUMBER_OF_HASH_FUNCTIONS;
-    this.vectorSize = (int) Math.ceil(
-        (DEFAULT_NUMBER_OF_HASH_FUNCTIONS * (1.0 * numberOfEntries)) /
-        Math.log(2.0));
-  }
-  
-  /**
-   * @param type The kind of bloom filter to use.
-   * @param vectorSize The vector size of <i>this</i> filter.
-   * @param nbHash The number of hash functions to consider.
-   */
-  public BloomFilterDescriptor(final BloomFilterType type, final int vectorSize,
-      final int nbHash) {
-    
-    switch(type) {
-    case BLOOMFILTER:
-    case COUNTING_BLOOMFILTER:
-    case RETOUCHED_BLOOMFILTER:
-      this.filterType = type;
-      break;
-
-    default:
-      throw new IllegalArgumentException("Invalid bloom filter type: " + type);
-    }
-    this.vectorSize = vectorSize;
-    this.nbHash = nbHash;
-  }
-  
-  BloomFilterType filterType;
-  int vectorSize;
-  int nbHash;
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    StringBuilder value = new StringBuilder();
-
-    switch(filterType) {
-    case BLOOMFILTER:
-      value.append("standard");
-      break;
-    case COUNTING_BLOOMFILTER:
-      value.append("counting");
-      break;
-    case RETOUCHED_BLOOMFILTER:
-      value.append("retouched");
-    }
-    
-    value.append("(vector size=");
-    value.append(vectorSize);
-    value.append(", number hashes=");
-    value.append(nbHash);
-    value.append(")");
-    
-    return value.toString();
-  }
-
-  public BloomFilterType getType() {
-    return filterType;
-  }
-  
-  public int getVectorSize() {
-    return vectorSize;
-  }
-  
-  public int getNbHash() {
-    return nbHash;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public boolean equals(Object obj) {
-    return compareTo(obj) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public int hashCode() {
-    int result = this.filterType.hashCode();
-    result ^= Integer.valueOf(this.vectorSize).hashCode();
-    result ^= Integer.valueOf(this.nbHash).hashCode();
-    return result;
-  }
-
-  // Writable
-  
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    int ordinal = in.readInt();
-    this.filterType = BloomFilterType.values()[ordinal];
-    vectorSize = in.readInt();
-    nbHash = in.readInt();
-  }
-  
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(filterType.ordinal());
-    out.writeInt(vectorSize);
-    out.writeInt(nbHash);
-  }
-  
-  // Comparable
-  
-  /** {@inheritDoc} */
-  public int compareTo(Object o) {
-    BloomFilterDescriptor other = (BloomFilterDescriptor)o;
-    int result = this.filterType.ordinal() - other.filterType.ordinal();
-
-    if(result == 0) {
-      result = this.vectorSize - other.vectorSize;
-    }
-    
-    if(result == 0) {
-      result = this.nbHash - other.nbHash;
-    }
-    return result;
-  }
-}

+ 0 - 36
src/contrib/hbase/src/java/org/apache/hadoop/hbase/CacheFlushListener.java

@@ -1,36 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase;
-
-/**
- * Implementors of this interface want to be notified when an HRegion
- * determines that a cache flush is needed. A CacheFlushListener (or null)
- * must be passed to the HRegion constructor.
- */
-public interface CacheFlushListener {
-
-  /**
-   * Tell the listener the cache needs to be flushed.
-   * 
-   * @param region the HRegion requesting the cache flush
-   */
-  void flushRequested(HRegion region);
-}

+ 0 - 98
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Chore.java

@@ -1,98 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.util.Sleeper;
-
-/**
- * Chore is a task performed on a period in hbase.  The chore is run in its own
- * thread. This base abstract class provides while loop and sleeping facility.
- * If an unhandled exception, the threads exit is logged.
- * Implementers just need to add checking if there is work to be done and if
- * so, do it.  Its the base of most of the chore threads in hbase.
- */
-public abstract class Chore extends Thread {
-  private final Log LOG = LogFactory.getLog(this.getClass());
-  private final Sleeper sleeper;
-  protected volatile AtomicBoolean stop;
-  
-  /**
-   * @param p Period at which we should run.  Will be adjusted appropriately
-   * should we find work and it takes time to complete.
-   * @param s When this flag is set to true, this thread will cleanup and exit
-   * cleanly.
-   */
-  public Chore(final int p, final AtomicBoolean s) {
-    super();
-    this.sleeper = new Sleeper(p, s);
-    this.stop = s;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public void run() {
-    try {
-      while (!initialChore()) {
-        this.sleeper.sleep();
-      }
-      this.sleeper.sleep();
-      while(!this.stop.get()) {
-        try {
-          long startTime = System.currentTimeMillis();
-          chore();
-          this.sleeper.sleep(startTime);
-        } catch (Exception e) {
-          LOG.error("Caught exception", e);
-        }
-      }
-    } catch (Throwable t) {
-      LOG.fatal("Caught error. Starting shutdown.", t);
-      this.stop.set(true);
-      
-    } finally {
-      LOG.info(getName() + " exiting");
-    }
-  }
-  
-  /**
-   * Override to run a task before we start looping.
-   * @return true if initial chore was successful
-   */
-  protected boolean initialChore() {
-    // Default does nothing.
-    return true;
-  }
-  
-  /**
-   * Look for chores.  If any found, do them else just return.
-   */
-  protected abstract void chore();
-
-  /**
-   * Sleep for period.
-   */
-  protected void sleep() {
-    this.sleeper.sleep();
-  }
-}

+ 0 - 32
src/contrib/hbase/src/java/org/apache/hadoop/hbase/DroppedSnapshotException.java

@@ -1,32 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-import java.io.IOException;
-
-
-/**
- * Thrown during flush if the possibility snapshot content was not properly
- * persisted into store files.  Response should include replay of hlog content.
- */
-public class DroppedSnapshotException extends IOException {
-  public DroppedSnapshotException(String msg) {
-    super(msg);
-  }
-
-  public DroppedSnapshotException() {
-    super();
-  }
-}

+ 0 - 280
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java

@@ -1,280 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Vector;
-import java.util.Map.Entry;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-/**
- * Abstract base class that implements the HScannerInterface.
- * Used by the concrete HMemcacheScanner and HStoreScanners
- */
-public abstract class HAbstractScanner implements HInternalScannerInterface {
-  final Log LOG = LogFactory.getLog(this.getClass().getName());
-
-  // Pattern to determine if a column key is a regex
-  static Pattern isRegexPattern =
-    Pattern.compile("^.*[\\\\+|^&*$\\[\\]\\}{)(]+.*$");
-  
-  /** The kind of match we are doing on a column: */
-  private static enum MATCH_TYPE {
-    /** Just check the column family name */
-    FAMILY_ONLY,
-    /** Column family + matches regex */
-    REGEX,
-    /** Literal matching */
-    SIMPLE
-  }
-
-  /**
-   * This class provides column matching functions that are more sophisticated
-   * than a simple string compare. There are three types of matching:
-   * <ol>
-   * <li>Match on the column family name only</li>
-   * <li>Match on the column family + column key regex</li>
-   * <li>Simple match: compare column family + column key literally</li>
-   * </ul>
-   */
-  private static class ColumnMatcher {
-    private boolean wildCardmatch;
-    private MATCH_TYPE matchType;
-    private Text family;
-    private Pattern columnMatcher;
-    private Text col;
-  
-    ColumnMatcher(final Text col) throws IOException {
-      Text qualifier = HStoreKey.extractQualifier(col);
-      try {
-        if(qualifier == null || qualifier.getLength() == 0) {
-          this.matchType = MATCH_TYPE.FAMILY_ONLY;
-          this.family = HStoreKey.extractFamily(col).toText();
-          this.wildCardmatch = true;
-        } else if(isRegexPattern.matcher(qualifier.toString()).matches()) {
-          this.matchType = MATCH_TYPE.REGEX;
-          this.columnMatcher = Pattern.compile(col.toString());
-          this.wildCardmatch = true;
-        } else {
-          this.matchType = MATCH_TYPE.SIMPLE;
-          this.col = col;
-          this.wildCardmatch = false;
-        }
-      } catch(Exception e) {
-        throw new IOException("Column: " + col + ": " + e.getMessage());
-      }
-    }
-    
-    /** Matching method */
-    boolean matches(Text c) throws IOException {
-      if(this.matchType == MATCH_TYPE.SIMPLE) {
-        return c.equals(this.col);
-      } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
-        return HStoreKey.extractFamily(c).equals(this.family);
-      } else if(this.matchType == MATCH_TYPE.REGEX) {
-        return this.columnMatcher.matcher(c.toString()).matches();
-      } else {
-        throw new IOException("Invalid match type: " + this.matchType);
-      }
-    }
-    
-    boolean isWildCardMatch() {
-      return this.wildCardmatch;
-    }
-  }
-
-  protected TreeMap<Text, Vector<ColumnMatcher>> okCols;        // Holds matchers for each column family 
-  
-  protected boolean scannerClosed = false;                      // True when scanning is done
-  
-  // Keys retrieved from the sources
-  protected HStoreKey keys[];
-  // Values that correspond to those keys
-  protected byte [][] vals;
-  
-  protected long timestamp;                                     // The timestamp to match entries against
-  private boolean wildcardMatch;
-  private boolean multipleMatchers;
-
-  /** Constructor for abstract base class */
-  HAbstractScanner(long timestamp, Text[] targetCols) throws IOException {
-    this.timestamp = timestamp;
-    this.wildcardMatch = false;
-    this.multipleMatchers = false;
-    this.okCols = new TreeMap<Text, Vector<ColumnMatcher>>();
-    for(int i = 0; i < targetCols.length; i++) {
-      Text family = HStoreKey.extractFamily(targetCols[i]).toText();
-      Vector<ColumnMatcher> matchers = okCols.get(family);
-      if(matchers == null) {
-        matchers = new Vector<ColumnMatcher>();
-      }
-      ColumnMatcher matcher = new ColumnMatcher(targetCols[i]);
-      if (matcher.isWildCardMatch()) {
-        this.wildcardMatch = true;
-      }
-      matchers.add(matcher);
-      if (matchers.size() > 1) {
-        this.multipleMatchers = true;
-      }
-      okCols.put(family, matchers);
-    }
-  }
-
-  /**
-   * For a particular column i, find all the matchers defined for the column.
-   * Compare the column family and column key using the matchers. The first one
-   * that matches returns true. If no matchers are successful, return false.
-   * 
-   * @param i index into the keys array
-   * @return true  - if any of the matchers for the column match the column family
-   *                 and the column key.
-   *                 
-   * @throws IOException
-   */
-  boolean columnMatch(int i) throws IOException {
-    Text column = keys[i].getColumn();
-    Vector<ColumnMatcher> matchers =
-      okCols.get(HStoreKey.extractFamily(column));
-    if(matchers == null) {
-      return false;
-    }
-    for(int m = 0; m < matchers.size(); m++) {
-      if(matchers.get(m).matches(column)) {
-        return true;
-      }
-    }
-    return false;
-  }
-  
-  /**
-   * If the user didn't want to start scanning at the first row, this method
-   * seeks to the requested row.
-   */
-  abstract boolean findFirstRow(int i, Text firstRow) throws IOException;
-  
-  /** The concrete implementations provide a mechanism to find the next set of values */
-  abstract boolean getNext(int i) throws IOException;
-  
-  /** Mechanism used by concrete implementation to shut down a particular scanner */
-  abstract void closeSubScanner(int i);
-  
-  /** {@inheritDoc} */
-  public boolean isWildcardScanner() {
-    return this.wildcardMatch;
-  }
-  
-  /** {@inheritDoc} */
-  public boolean isMultipleMatchScanner() {
-    return this.multipleMatchers;
-  }
-  
-  /**
-   * Get the next set of values for this scanner.
-   * 
-   * @param key The key that matched
-   * @param results All the results for <code>key</code>
-   * @return true if a match was found
-   * @throws IOException
-   * 
-   * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
-   */
-  public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
-  throws IOException {
-    if (scannerClosed) {
-      return false;
-    }
-    // Find the next row label (and timestamp)
-    Text chosenRow = null;
-    long chosenTimestamp = -1;
-    for(int i = 0; i < keys.length; i++) {
-      if((keys[i] != null)
-          && (columnMatch(i))
-          && (keys[i].getTimestamp() <= this.timestamp)
-          && ((chosenRow == null)
-              || (keys[i].getRow().compareTo(chosenRow) < 0)
-              || ((keys[i].getRow().compareTo(chosenRow) == 0)
-                  && (keys[i].getTimestamp() > chosenTimestamp)))) {
-        chosenRow = new Text(keys[i].getRow());
-        chosenTimestamp = keys[i].getTimestamp();
-      }
-    }
-
-    // Grab all the values that match this row/timestamp
-    boolean insertedItem = false;
-    if(chosenRow != null) {
-      key.setRow(chosenRow);
-      key.setVersion(chosenTimestamp);
-      key.setColumn(new Text(""));
-
-      for(int i = 0; i < keys.length; i++) {
-        // Fetch the data
-        while((keys[i] != null)
-            && (keys[i].getRow().compareTo(chosenRow) == 0)) {
-
-          // If we are doing a wild card match or there are multiple matchers
-          // per column, we need to scan all the older versions of this row
-          // to pick up the rest of the family members
-          
-          if(!wildcardMatch
-              && !multipleMatchers
-              && (keys[i].getTimestamp() != chosenTimestamp)) {
-            break;
-          }
-
-          if(columnMatch(i)) {              
-            // We only want the first result for any specific family member
-            if(!results.containsKey(keys[i].getColumn())) {
-              results.put(new Text(keys[i].getColumn()), vals[i]);
-              insertedItem = true;
-            }
-          }
-
-          if(!getNext(i)) {
-            closeSubScanner(i);
-          }
-        }
-
-        // Advance the current scanner beyond the chosen row, to
-        // a valid timestamp, so we're ready next time.
-        
-        while((keys[i] != null)
-            && ((keys[i].getRow().compareTo(chosenRow) <= 0)
-                || (keys[i].getTimestamp() > this.timestamp)
-                || (! columnMatch(i)))) {
-          getNext(i);
-        }
-      }
-    }
-    return insertedItem;
-  }
-  
-  /** {@inheritDoc} */
-  public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
-    throw new UnsupportedOperationException("Unimplemented serverside. " +
-      "next(HStoreKey, StortedMap(...) is more efficient");
-  }
-}

+ 0 - 546
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java

@@ -1,546 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * Provides administrative functions for HBase
- */
-public class HBaseAdmin implements HConstants {
-  protected final Log LOG = LogFactory.getLog(this.getClass().getName());
-
-  protected final HConnection connection;
-  protected final long pause;
-  protected final int numRetries;
-  protected volatile HMasterInterface master;
-  
-  /**
-   * Constructor
-   * 
-   * @param conf Configuration object
-   * @throws MasterNotRunningException
-   */
-  public HBaseAdmin(HBaseConfiguration conf) throws MasterNotRunningException {
-    this.connection = HConnectionManager.getConnection(conf);
-    this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
-    this.numRetries = conf.getInt("hbase.client.retries.number", 5);
-    this.master = connection.getMaster();
-  }
-
-  /**
-   * @return proxy connection to master server for this instance
-   * @throws MasterNotRunningException
-   */
-  public HMasterInterface getMaster() throws MasterNotRunningException{
-    return this.connection.getMaster();
-  }
-  
-  /** @return - true if the master server is running */
-  public boolean isMasterRunning() {
-    return this.connection.isMasterRunning();
-  }
-
-  /**
-   * @param tableName Table to check.
-   * @return True if table exists already.
-   * @throws MasterNotRunningException
-   */
-  public boolean tableExists(final Text tableName) throws MasterNotRunningException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    return connection.tableExists(tableName);
-  }
-
-  /**
-   * List all the userspace tables.  In other words, scan the META table.
-   *
-   * If we wanted this to be really fast, we could implement a special
-   * catalog table that just contains table names and their descriptors.
-   * Right now, it only exists as part of the META table's region info.
-   *
-   * @return - returns an array of HTableDescriptors 
-   * @throws IOException
-   */
-  public HTableDescriptor[] listTables() throws IOException {
-    return this.connection.listTables();
-  }
-
-  /**
-   * Creates a new table
-   * 
-   * @param desc table descriptor for table
-   * 
-   * @throws IllegalArgumentException if the table name is reserved
-   * @throws MasterNotRunningException if master is not running
-   * @throws NoServerForRegionException if root region is not being served
-   * @throws TableExistsException if table already exists (If concurrent
-   * threads, the table may have been created between test-for-existence
-   * and attempt-at-creation).
-   * @throws IOException
-   */
-  public void createTable(HTableDescriptor desc)
-  throws IOException {
-    createTableAsync(desc);
-
-    for (int tries = 0; tries < numRetries; tries++) {
-      try {
-        // Wait for new table to come on-line
-        connection.locateRegion(desc.getName(), EMPTY_START_ROW);
-        break;
-        
-      } catch (TableNotFoundException e) {
-        if (tries == numRetries - 1) {
-          // Ran out of tries
-          throw e;
-        }
-      }
-      try {
-        Thread.sleep(pause);
-      } catch (InterruptedException e) {
-        // continue
-      }
-    }
-  }
-  
-  /**
-   * Creates a new table but does not block and wait for it to come online.
-   * 
-   * @param desc table descriptor for table
-   * 
-   * @throws IllegalArgumentException if the table name is reserved
-   * @throws MasterNotRunningException if master is not running
-   * @throws NoServerForRegionException if root region is not being served
-   * @throws TableExistsException if table already exists (If concurrent
-   * threads, the table may have been created between test-for-existence
-   * and attempt-at-creation).
-   * @throws IOException
-   */
-  public void createTableAsync(HTableDescriptor desc)
-  throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    checkReservedTableName(desc.getName());
-    try {
-      this.master.createTable(desc);
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-  }
-
-  /**
-   * Deletes a table
-   * 
-   * @param tableName name of table to delete
-   * @throws IOException
-   */
-  public void deleteTable(Text tableName) throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
-
-    try {
-      this.master.deleteTable(tableName);
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-
-    // Wait until first region is deleted
-    HRegionInterface server =
-      connection.getHRegionConnection(firstMetaServer.getServerAddress());
-    HRegionInfo info = new HRegionInfo();
-    for (int tries = 0; tries < numRetries; tries++) {
-      long scannerId = -1L;
-      try {
-        scannerId =
-          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
-            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
-        HbaseMapWritable values = server.next(scannerId);
-        if (values == null || values.size() == 0) {
-          break;
-        }
-        boolean found = false;
-        for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-          HStoreKey key = (HStoreKey) e.getKey();
-          if (key.getColumn().equals(COL_REGIONINFO)) {
-            info = (HRegionInfo) Writables.getWritable(
-                  ((ImmutableBytesWritable) e.getValue()).get(), info);
-            
-            if (info.getTableDesc().getName().equals(tableName)) {
-              found = true;
-            }
-          }
-        }
-        if (!found) {
-          break;
-        }
-
-      } catch (IOException ex) {
-        if(tries == numRetries - 1) {           // no more tries left
-          if (ex instanceof RemoteException) {
-            ex = RemoteExceptionHandler.decodeRemoteException((RemoteException) ex);
-          }
-          throw ex;
-        }
-
-      } finally {
-        if (scannerId != -1L) {
-          try {
-            server.close(scannerId);
-          } catch (Exception ex) {
-            LOG.warn(ex);
-          }
-        }
-      }
-
-      try {
-        Thread.sleep(pause);
-      } catch (InterruptedException e) {
-        // continue
-      }
-    }
-    LOG.info("table " + tableName + " deleted");
-  }
-
-  /**
-   * Brings a table on-line (enables it)
-   * 
-   * @param tableName name of the table
-   * @throws IOException
-   */
-  public void enableTable(Text tableName) throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
-    
-    try {
-      this.master.enableTable(tableName);
-      
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-
-    // Wait until first region is enabled
-    
-    HRegionInterface server =
-      connection.getHRegionConnection(firstMetaServer.getServerAddress());
-
-    HRegionInfo info = new HRegionInfo();
-    for (int tries = 0; tries < numRetries; tries++) {
-      int valuesfound = 0;
-      long scannerId = -1L;
-      try {
-        scannerId =
-          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
-            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
-        boolean isenabled = false;
-        
-        while (true) {
-          HbaseMapWritable values = server.next(scannerId);
-          if (values == null || values.size() == 0) {
-            if (valuesfound == 0) {
-              throw new NoSuchElementException(
-                  "table " + tableName + " not found");
-            }
-            break;
-          }
-          valuesfound += 1;
-          for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-            HStoreKey key = (HStoreKey) e.getKey();
-            if (key.getColumn().equals(COL_REGIONINFO)) {
-              info = (HRegionInfo) Writables.getWritable(
-                    ((ImmutableBytesWritable) e.getValue()).get(), info);
-            
-              isenabled = !info.isOffline();
-              break;
-            }
-          }
-          if (isenabled) {
-            break;
-          }
-        }
-        if (isenabled) {
-          break;
-        }
-        
-      } catch (IOException e) {
-        if (tries == numRetries - 1) {                  // no more retries
-          if (e instanceof RemoteException) {
-            e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-          }
-          throw e;
-        }
-        
-      } finally {
-        if (scannerId != -1L) {
-          try {
-            server.close(scannerId);
-            
-          } catch (Exception e) {
-            LOG.warn(e);
-          }
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Sleep. Waiting for first region to be enabled from " +
-            tableName);
-      }
-      try {
-        Thread.sleep(pause);
-        
-      } catch (InterruptedException e) {
-        // continue
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Wake. Waiting for first region to be enabled from " +
-            tableName);
-      }
-    }
-    LOG.info("Enabled table " + tableName);
-  }
-
-  /**
-   * Disables a table (takes it off-line) If it is being served, the master
-   * will tell the servers to stop serving it.
-   * 
-   * @param tableName name of table
-   * @throws IOException
-   */
-  public void disableTable(Text tableName) throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
-
-    try {
-      this.master.disableTable(tableName);
-      
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-
-    // Wait until first region is disabled
-    
-    HRegionInterface server =
-      connection.getHRegionConnection(firstMetaServer.getServerAddress());
-
-    HRegionInfo info = new HRegionInfo();
-    for(int tries = 0; tries < numRetries; tries++) {
-      int valuesfound = 0;
-      long scannerId = -1L;
-      try {
-        scannerId =
-          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
-            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
-        
-        boolean disabled = false;
-        while (true) {
-          HbaseMapWritable values = server.next(scannerId);
-          if (values == null || values.size() == 0) {
-            if (valuesfound == 0) {
-              throw new NoSuchElementException("table " + tableName + " not found");
-            }
-            break;
-          }
-          valuesfound += 1;
-          for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-            HStoreKey key = (HStoreKey) e.getKey();
-            if (key.getColumn().equals(COL_REGIONINFO)) {
-              info = (HRegionInfo) Writables.getWritable(
-                    ((ImmutableBytesWritable) e.getValue()).get(), info);
-            
-              disabled = info.isOffline();
-              break;
-            }
-          }
-          if (disabled) {
-            break;
-          }
-        }
-        if (disabled) {
-          break;
-        }
-        
-      } catch (IOException e) {
-        if (tries == numRetries - 1) {                  // no more retries
-          if (e instanceof RemoteException) {
-            e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-          }
-          throw e;
-        }
-        
-      } finally {
-        if (scannerId != -1L) {
-          try {
-            server.close(scannerId);
-            
-          } catch (Exception e) {
-            LOG.warn(e);
-          }
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Sleep. Waiting for first region to be disabled from " +
-            tableName);
-      }
-      try {
-        Thread.sleep(pause);
-      } catch (InterruptedException e) {
-        // continue
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Wake. Waiting for first region to be disabled from " +
-            tableName);
-      }
-    }
-    LOG.info("Disabled table " + tableName);
-  }
-  
-  /**
-   * Add a column to an existing table
-   * 
-   * @param tableName name of the table to add column to
-   * @param column column descriptor of column to be added
-   * @throws IOException
-   */
-  public void addColumn(Text tableName, HColumnDescriptor column)
-  throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    try {
-      this.master.addColumn(tableName, column);
-      
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-  }
-
-  /**
-   * Delete a column from a table
-   * 
-   * @param tableName name of table
-   * @param columnName name of column to be deleted
-   * @throws IOException
-   */
-  public void deleteColumn(Text tableName, Text columnName)
-  throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    try {
-      this.master.deleteColumn(tableName, columnName);
-      
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-  }
-
-  /**
-   * Modify an existing column family on a table
-   * 
-   * @param tableName name of table
-   * @param columnName name of column to be modified
-   * @param descriptor new column descriptor to use
-   * @throws IOException
-   */
-  public void modifyColumn(Text tableName, Text columnName, 
-    HColumnDescriptor descriptor)
-  throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    checkReservedTableName(tableName);
-    try {
-      this.master.modifyColumn(tableName, columnName, descriptor);
-      
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    }
-  }
-
-  
-  /** 
-   * Shuts down the HBase instance 
-   * @throws IOException
-   */
-  public synchronized void shutdown() throws IOException {
-    if (this.master == null) {
-      throw new MasterNotRunningException("master has been shut down");
-    }
-    
-    try {
-      this.master.shutdown();
-    } catch (RemoteException e) {
-      throw RemoteExceptionHandler.decodeRemoteException(e);
-    } finally {
-      this.master = null;
-    }
-  }
-
-  /*
-   * Verifies that the specified table name is not a reserved name
-   * @param tableName - the table name to be checked
-   * @throws IllegalArgumentException - if the table name is reserved
-   */
-  protected void checkReservedTableName(Text tableName) {
-    if (tableName == null || tableName.getLength() <= 0) {
-      throw new IllegalArgumentException("Null or empty table name");
-    }
-    if(tableName.charAt(0) == '-' ||
-        tableName.charAt(0) == '.' ||
-        tableName.find(",") != -1) {
-      throw new IllegalArgumentException(tableName + " is a reserved table name");
-    }
-  }
-  
-  private HRegionLocation getFirstMetaServerForTable(Text tableName)
-  throws IOException {
-    Text tableKey = new Text(tableName.toString() + ",,99999999999999");
-    return connection.locateRegion(META_TABLE_NAME, tableKey);
-  }
-}

+ 0 - 51
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java

@@ -1,51 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.util.Map.Entry;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Adds HBase configuration files to a Configuration
- */
-public class HBaseConfiguration extends Configuration {
-  /** constructor */
-  public HBaseConfiguration() {
-    super();
-    addHbaseResources();
-  }
-  
-  /**
-   * Create a clone of passed configuration.
-   * @param c Configuration to clone.
-   */
-  public HBaseConfiguration(final Configuration c) {
-    this();
-    for (Entry<String, String>e: c) {
-      set(e.getKey(), e.getValue());
-    }
-  }
-  
-  private void addHbaseResources() {
-    addResource("hbase-default.xml");
-    addResource("hbase-site.xml");
-  }
-}

+ 0 - 352
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java

@@ -1,352 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-
-import org.apache.hadoop.hbase.io.TextSequence;
-
-/**
- * An HColumnDescriptor contains information about a column family such as the
- * number of versions, compression settings, etc.
- * 
- * It is used as input when creating a table or adding a column. Once set, the
- * parameters that specify a column cannot be changed without deleting the
- * column and recreating it. If there is data stored in the column, it will be
- * deleted when the column is deleted.
- */
-public class HColumnDescriptor implements WritableComparable {
-  
-  // For future backward compatibility
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)1;
-  
-  /** Legal family names can only contain 'word characters' and end in a colon. */
-  public static final Pattern LEGAL_FAMILY_NAME = Pattern.compile("\\w+:");
-
-  /** 
-   * The type of compression.
-   * @see org.apache.hadoop.io.SequenceFile.Writer
-   */
-  public static enum CompressionType {
-    /** Do not compress records. */
-    NONE, 
-    /** Compress values only, each separately. */
-    RECORD,
-    /** Compress sequences of records together in blocks. */
-    BLOCK
-  }
-  
-  /**
-   * Default compression type.
-   */
-  public static final CompressionType DEFAULT_COMPRESSION_TYPE =
-    CompressionType.NONE;
-  
-  /**
-   * Default number of versions of a record to keep.
-   */
-  public static final int DEFAULT_N_VERSIONS = 3;
-  
-  /**
-   * Default setting for whether to serve from memory or not.
-   */
-  public static final boolean DEFAULT_IN_MEMORY = false;
-  
-  /**
-   * Default maximum length of cell contents.
-   */
-  public static final int DEFAULT_MAX_VALUE_LENGTH = Integer.MAX_VALUE;
-  
-  /**
-   * Default bloom filter description.
-   */
-  public static final BloomFilterDescriptor DEFAULT_BLOOM_FILTER_DESCRIPTOR =
-    null;
-  
-  // Column family name
-  private Text name;
-  // Number of versions to keep
-  private int maxVersions;
-  // Compression setting if any
-  private CompressionType compressionType;
-  // Serve reads from in-memory cache
-  private boolean inMemory;
-  // Maximum value size
-  private int maxValueLength;
-  // True if bloom filter was specified
-  private boolean bloomFilterSpecified;
-  // Descriptor of bloom filter
-  private BloomFilterDescriptor bloomFilter;
-  // Version number of this class
-  private byte versionNumber;
-  // Family name without the ':'
-  private transient Text familyName = null;
-  
-  /**
-   * Default constructor. Must be present for Writable.
-   */
-  public HColumnDescriptor() {
-    this(null);
-  }
-  
-  /**
-   * Construct a column descriptor specifying only the family name 
-   * The other attributes are defaulted.
-   * 
-   * @param columnName - column family name
-   */
-  public HColumnDescriptor(String columnName) {
-    this(columnName == null || columnName.length() <= 0?
-      new Text(): new Text(columnName),
-      DEFAULT_N_VERSIONS, DEFAULT_COMPRESSION_TYPE, DEFAULT_IN_MEMORY,
-      Integer.MAX_VALUE, DEFAULT_BLOOM_FILTER_DESCRIPTOR);
-  }
-  
-  /**
-   * Constructor
-   * Specify all parameters.
-   * @param name Column family name
-   * @param maxVersions Maximum number of versions to keep
-   * @param compression Compression type
-   * @param inMemory If true, column data should be kept in an HRegionServer's
-   * cache
-   * @param maxValueLength Restrict values to &lt;= this value
-   * @param bloomFilter Enable the specified bloom filter for this column
-   * 
-   * @throws IllegalArgumentException if passed a family name that is made of 
-   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
-   * end in a <code>:</code>
-   * @throws IllegalArgumentException if the number of versions is &lt;= 0
-   */
-  public HColumnDescriptor(final Text name, final int maxVersions,
-      final CompressionType compression, final boolean inMemory,
-      final int maxValueLength, final BloomFilterDescriptor bloomFilter) {
-    String familyStr = name.toString();
-    // Test name if not null (It can be null when deserializing after
-    // construction but before we've read in the fields);
-    if (familyStr.length() > 0) {
-      Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
-      if(m == null || !m.matches()) {
-        throw new IllegalArgumentException("Illegal family name <" + name +
-          ">. Family names can only contain " +
-          "'word characters' and must end with a ':'");
-      }
-    }
-    this.name = name;
-
-    if(maxVersions <= 0) {
-      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
-      // Until there is support, consider 0 or < 0 -- a configuration error.
-      throw new IllegalArgumentException("Maximum versions must be positive");
-    }
-    this.maxVersions = maxVersions;
-    this.inMemory = inMemory;
-    this.maxValueLength = maxValueLength;
-    this.bloomFilter = bloomFilter;
-    this.bloomFilterSpecified = this.bloomFilter == null ? false : true;
-    this.versionNumber = COLUMN_DESCRIPTOR_VERSION;
-    this.compressionType = compression;
-  }
-  
-  /** @return name of column family */
-  public Text getName() {
-    return name;
-  }
-
-  /** @return name of column family without trailing ':' */
-  public synchronized Text getFamilyName() {
-    if (name != null) {
-      if (familyName == null) {
-        familyName = new TextSequence(name, 0, name.getLength() - 1).toText();
-      }
-      return familyName;
-    }
-    return null;
-  }
-  
-  /** @return compression type being used for the column family */
-  public CompressionType getCompression() {
-    return this.compressionType;
-  }
-  
-  /** @return maximum number of versions */
-  public int getMaxVersions() {
-    return this.maxVersions;
-  }
-  
-  /**
-   * @return Compression type setting.
-   */
-  public CompressionType getCompressionType() {
-    return this.compressionType;
-  }
-
-  /**
-   * @return True if we are to keep all in use HRegionServer cache.
-   */
-  public boolean isInMemory() {
-    return this.inMemory;
-  }
-
-  /**
-   * @return Maximum value length.
-   */
-  public int getMaxValueLength() {
-    return this.maxValueLength;
-  }
-
-  /**
-   * @return Bloom filter descriptor or null if none set.
-   */
-  public BloomFilterDescriptor getBloomFilter() {
-    return this.bloomFilter;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    // Output a name minus ':'.
-    String tmp = name.toString();
-    return "{name: " + tmp.substring(0, tmp.length() - 1) +
-      ", max versions: " + maxVersions +
-      ", compression: " + this.compressionType + ", in memory: " + inMemory +
-      ", max length: " + maxValueLength + ", bloom filter: " +
-      (bloomFilterSpecified ? bloomFilter.toString() : "none") + "}";
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public boolean equals(Object obj) {
-    return compareTo(obj) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public int hashCode() {
-    int result = this.name.hashCode();
-    result ^= Integer.valueOf(this.maxVersions).hashCode();
-    result ^= this.compressionType.hashCode();
-    result ^= Boolean.valueOf(this.inMemory).hashCode();
-    result ^= Integer.valueOf(this.maxValueLength).hashCode();
-    result ^= Boolean.valueOf(this.bloomFilterSpecified).hashCode();
-    result ^= Byte.valueOf(this.versionNumber).hashCode();
-    if(this.bloomFilterSpecified) {
-      result ^= this.bloomFilter.hashCode();
-    }
-    return result;
-  }
-  
-  // Writable
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    this.versionNumber = in.readByte();
-    this.name.readFields(in);
-    this.maxVersions = in.readInt();
-    int ordinal = in.readInt();
-    this.compressionType = CompressionType.values()[ordinal];
-    this.inMemory = in.readBoolean();
-    this.maxValueLength = in.readInt();
-    this.bloomFilterSpecified = in.readBoolean();
-    
-    if(bloomFilterSpecified) {
-      bloomFilter = new BloomFilterDescriptor();
-      bloomFilter.readFields(in);
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeByte(this.versionNumber);
-    this.name.write(out);
-    out.writeInt(this.maxVersions);
-    out.writeInt(this.compressionType.ordinal());
-    out.writeBoolean(this.inMemory);
-    out.writeInt(this.maxValueLength);
-    out.writeBoolean(this.bloomFilterSpecified);
-    
-    if(bloomFilterSpecified) {
-      bloomFilter.write(out);
-    }
-  }
-
-  // Comparable
-
-  /** {@inheritDoc} */
-  public int compareTo(Object o) {
-    // NOTE: we don't do anything with the version number yet.
-    // Version numbers will come into play when we introduce an incompatible
-    // change in the future such as the addition of access control lists.
-    
-    HColumnDescriptor other = (HColumnDescriptor)o;
-    
-    int result = this.name.compareTo(other.getName());
-    
-    if(result == 0) {
-      result = Integer.valueOf(this.maxVersions).compareTo(
-          Integer.valueOf(other.maxVersions));
-    }
-    
-    if(result == 0) {
-      result = this.compressionType.compareTo(other.compressionType);
-    }
-    
-    if(result == 0) {
-      if(this.inMemory == other.inMemory) {
-        result = 0;
-        
-      } else if(this.inMemory) {
-        result = -1;
-        
-      } else {
-        result = 1;
-      }
-    }
-    
-    if(result == 0) {
-      result = other.maxValueLength - this.maxValueLength;
-    }
-    
-    if(result == 0) {
-      if(this.bloomFilterSpecified == other.bloomFilterSpecified) {
-        result = 0;
-        
-      } else if(this.bloomFilterSpecified) {
-        result = -1;
-        
-      } else {
-        result = 1;
-      }
-    }
-    
-    if(result == 0 && this.bloomFilterSpecified) {
-      result = this.bloomFilter.compareTo(other.bloomFilter);
-    }
-    
-    return result;
-  }
-}

+ 0 - 95
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnection.java

@@ -1,95 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.SortedMap;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * 
- */
-public interface HConnection {
-  /**
-   * @return proxy connection to master server for this instance
-   * @throws MasterNotRunningException
-   */
-  public HMasterInterface getMaster() throws MasterNotRunningException;
-
-  /** @return - true if the master server is running */
-  public boolean isMasterRunning();
-  
-  /**
-   * Checks if <code>tableName</code> exists.
-   * @param tableName Table to check.
-   * @return True if table exists already.
-   */
-  public boolean tableExists(final Text tableName);
-  
-  /**
-   * List all the userspace tables.  In other words, scan the META table.
-   *
-   * If we wanted this to be really fast, we could implement a special
-   * catalog table that just contains table names and their descriptors.
-   * Right now, it only exists as part of the META table's region info.
-   *
-   * @return - returns an array of HTableDescriptors 
-   * @throws IOException
-   */
-  public HTableDescriptor[] listTables() throws IOException;
-  
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the reigon in 
-   * question
-   */
-  public HRegionLocation locateRegion(Text tableName, Text row)
-  throws IOException;
-  
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the reigon in 
-   * question
-   */
-  public HRegionLocation relocateRegion(Text tableName, Text row)
-  throws IOException;  
-  
-  /** 
-   * Establishes a connection to the region server at the specified address.
-   * @param regionServer - the server to connect to
-   * @return proxy for HRegionServer
-   * @throws IOException
-   */
-  public HRegionInterface getHRegionConnection(HServerAddress regionServer)
-  throws IOException;
-  
-  /**
-   * Discard all the information about this table
-   * @param tableName the name of the table to close
-   */
-  public void close(Text tableName);
-}

+ 0 - 769
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java

@@ -1,769 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.ipc.HbaseRPC;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * A non-instantiable class that manages connections to multiple tables in
- * multiple HBase instances
- */
-public class HConnectionManager implements HConstants {
-  /*
-   * Private. Not instantiable.
-   */
-  private HConnectionManager() {
-    super();
-  }
-  
-  // A Map of master HServerAddress -> connection information for that instance
-  // Note that although the Map is synchronized, the objects it contains
-  // are mutable and hence require synchronized access to them
-  
-  private static final Map<String, TableServers> HBASE_INSTANCES =
-    Collections.synchronizedMap(new HashMap<String, TableServers>());
-
-  /**
-   * Get the connection object for the instance specified by the configuration
-   * If no current connection exists, create a new connection for that instance
-   * @param conf
-   * @return HConnection object for the instance specified by the configuration
-   */
-  public static HConnection getConnection(HBaseConfiguration conf) {
-    TableServers connection;
-    synchronized (HBASE_INSTANCES) {
-      String instanceName = conf.get(HBASE_DIR, DEFAULT_HBASE_DIR);
-
-      connection = HBASE_INSTANCES.get(instanceName);
-
-      if (connection == null) {
-        connection = new TableServers(conf);
-        HBASE_INSTANCES.put(instanceName, connection);
-      }
-    }
-    return connection;
-  }
-  
-  /**
-   * Delete connection information for the instance specified by the configuration
-   * @param conf
-   */
-  public static void deleteConnection(HBaseConfiguration conf) {
-    synchronized (HBASE_INSTANCES) {
-      TableServers instance =
-        HBASE_INSTANCES.remove(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
-      if (instance != null) {
-        instance.closeAll();
-      }
-    }    
-  }
-  
-  /* Encapsulates finding the servers for an HBase instance */
-  private static class TableServers implements HConnection, HConstants {
-    private static final Log LOG = LogFactory.getLog(TableServers.class);
-    private final Class<? extends HRegionInterface> serverInterfaceClass;
-    private final long pause;
-    private final int numRetries;
-
-    private final Integer masterLock = new Integer(0);
-    private volatile boolean closed;
-    private volatile HMasterInterface master;
-    private volatile boolean masterChecked;
-    
-    private final Integer rootRegionLock = new Integer(0);
-    private final Integer metaRegionLock = new Integer(0);
-    private final Integer userRegionLock = new Integer(0);
-        
-    private volatile HBaseConfiguration conf;
-
-    // Set of closed tables
-    private Set<Text> closedTables;
-    
-    // Known region HServerAddress.toString() -> HRegionInterface 
-    private Map<String, HRegionInterface> servers;
-
-    private HRegionLocation rootRegionLocation; 
-    
-    private Map<Text, SortedMap<Text, HRegionLocation>> cachedRegionLocations;
-    
-    /** 
-     * constructor
-     * @param conf Configuration object
-     */
-    @SuppressWarnings("unchecked")
-    public TableServers(HBaseConfiguration conf) {
-      this.conf = LocalHBaseCluster.doLocal(new HBaseConfiguration(conf));
-      
-      String serverClassName =
-        conf.get(REGION_SERVER_CLASS, DEFAULT_REGION_SERVER_CLASS);
-
-      this.closed = false;
-      
-      try {
-        this.serverInterfaceClass =
-          (Class<? extends HRegionInterface>) Class.forName(serverClassName);
-        
-      } catch (ClassNotFoundException e) {
-        throw new UnsupportedOperationException(
-            "Unable to find region server interface " + serverClassName, e);
-      }
-
-      this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
-      this.numRetries = conf.getInt("hbase.client.retries.number", 5);
-      
-      this.master = null;
-      this.masterChecked = false;
-
-      this.cachedRegionLocations = 
-        new ConcurrentHashMap<Text, SortedMap<Text, HRegionLocation>>();
-      this.closedTables = Collections.synchronizedSet(new HashSet<Text>());
-      this.servers = new ConcurrentHashMap<String, HRegionInterface>();
-    }
-    
-    /** {@inheritDoc} */
-    public HMasterInterface getMaster() throws MasterNotRunningException {
-      synchronized (this.masterLock) {
-        for (int tries = 0;
-          !this.closed &&
-          !this.masterChecked && this.master == null &&
-          tries < numRetries;
-        tries++) {
-          
-          HServerAddress masterLocation = new HServerAddress(this.conf.get(
-              MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS));
-
-          try {
-            HMasterInterface tryMaster = (HMasterInterface)HbaseRPC.getProxy(
-                HMasterInterface.class, HMasterInterface.versionID, 
-                masterLocation.getInetSocketAddress(), this.conf);
-            
-            if (tryMaster.isMasterRunning()) {
-              this.master = tryMaster;
-              break;
-            }
-            
-          } catch (IOException e) {
-            if(tries == numRetries - 1) {
-              // This was our last chance - don't bother sleeping
-              break;
-            }
-            LOG.info("Attempt " + tries + " of " + this.numRetries +
-                " failed with <" + e + ">. Retrying after sleep of " + this.pause);
-          }
-
-          // We either cannot connect to master or it is not running. Sleep & retry
-          
-          try {
-            Thread.sleep(this.pause);
-          } catch (InterruptedException e) {
-            // continue
-          }
-        }
-        this.masterChecked = true;
-      }
-      if (this.master == null) {
-        throw new MasterNotRunningException();
-      }
-      return this.master;
-    }
-
-    /** {@inheritDoc} */
-    public boolean isMasterRunning() {
-      if (this.master == null) {
-        try {
-          getMaster();
-          
-        } catch (MasterNotRunningException e) {
-          return false;
-        }
-      }
-      return true;
-    }
-
-    /** {@inheritDoc} */
-    public boolean tableExists(final Text tableName) {
-      if (tableName == null) {
-        throw new IllegalArgumentException("Table name cannot be null");
-      }
-      boolean exists = false;
-      try {
-        HTableDescriptor[] tables = listTables();
-        for (int i = 0; i < tables.length; i++) {
-          if (tables[i].getName().equals(tableName)) {
-            exists = true;
-          }
-        }
-      } catch (IOException e) {
-        LOG.warn("Testing for table existence threw exception", e);
-      }
-      return exists;
-    }
-
-    /** {@inheritDoc} */
-    public HTableDescriptor[] listTables() throws IOException {
-      HashSet<HTableDescriptor> uniqueTables = new HashSet<HTableDescriptor>();
-      long scannerId = -1L;
-      HRegionInterface server = null;
-      
-      Text startRow = EMPTY_START_ROW;
-      HRegionLocation metaLocation = null;
-
-      // scan over the each meta region
-      do {
-        try{
-          // turn the start row into a location
-          metaLocation = locateRegion(META_TABLE_NAME, startRow);
-
-          // connect to the server hosting the .META. region
-          server = getHRegionConnection(metaLocation.getServerAddress());
-
-          // open a scanner over the meta region
-          scannerId = server.openScanner(
-            metaLocation.getRegionInfo().getRegionName(),
-            COLUMN_FAMILY_ARRAY, EMPTY_START_ROW, LATEST_TIMESTAMP,
-            null);
-          
-          // iterate through the scanner, accumulating unique table names
-          while (true) {
-            HbaseMapWritable values = server.next(scannerId);
-            if (values == null || values.size() == 0) {
-              break;
-            }
-            for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-              HStoreKey key = (HStoreKey) e.getKey();
-              if (key.getColumn().equals(COL_REGIONINFO)) {
-                HRegionInfo info = new HRegionInfo();
-                info = (HRegionInfo) Writables.getWritable(
-                    ((ImmutableBytesWritable) e.getValue()).get(), info);
-
-                // Only examine the rows where the startKey is zero length   
-                if (info.getStartKey().getLength() == 0) {
-                  uniqueTables.add(info.getTableDesc());
-                }
-              }
-            }
-          }
-          
-          server.close(scannerId);
-          scannerId = -1L;
-          
-          // advance the startRow to the end key of the current region
-          startRow = metaLocation.getRegionInfo().getEndKey();          
-        } catch (IOException e) {
-          // Retry once.
-          metaLocation = relocateRegion(META_TABLE_NAME, startRow);
-          continue;
-        }
-        finally {
-          if (scannerId != -1L) {
-            server.close(scannerId);
-          }
-        }
-      } while (startRow.compareTo(EMPTY_START_ROW) != 0);
-      
-      return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
-    }
-
-    public HRegionLocation locateRegion(Text tableName, Text row)
-    throws IOException{
-      return locateRegion(tableName, row, true);
-    }
-
-    public HRegionLocation relocateRegion(Text tableName, Text row)
-    throws IOException{
-      return locateRegion(tableName, row, false);
-    }
-
-    private HRegionLocation locateRegion(Text tableName, Text row, 
-      boolean useCache)
-    throws IOException{
-      if (tableName == null || tableName.getLength() == 0) {
-        throw new IllegalArgumentException(
-            "table name cannot be null or zero length");
-      }
-            
-      if (tableName.equals(ROOT_TABLE_NAME)) {
-        synchronized (rootRegionLock) {
-          // This block guards against two threads trying to find the root
-          // region at the same time. One will go do the find while the 
-          // second waits. The second thread will not do find.
-          
-          if (!useCache || rootRegionLocation == null) {
-            return locateRootRegion();
-          }
-          return rootRegionLocation;
-        }        
-      } else if (tableName.equals(META_TABLE_NAME)) {
-        synchronized (metaRegionLock) {
-          // This block guards against two threads trying to load the meta 
-          // region at the same time. The first will load the meta region and
-          // the second will use the value that the first one found.
-
-          return locateRegionInMeta(ROOT_TABLE_NAME, tableName, row, useCache);
-        }
-      } else {
-        synchronized(userRegionLock){
-          return locateRegionInMeta(META_TABLE_NAME, tableName, row, useCache);
-        }
-      }
-    }
-
-    /**
-      * Convenience method for turning a MapWritable into the underlying
-      * SortedMap we all know and love.
-      */
-    private SortedMap<Text, byte[]> sortedMapFromMapWritable(
-      HbaseMapWritable writable) {
-      SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-      for (Map.Entry<Writable, Writable> e: writable.entrySet()) {
-        HStoreKey key = (HStoreKey) e.getKey();
-        results.put(key.getColumn(), 
-          ((ImmutableBytesWritable) e.getValue()).get());
-      }
-      
-      return results;
-    }
-
-    /**
-      * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation
-      * info that contains the table and row we're seeking.
-      */
-    private HRegionLocation locateRegionInMeta(Text parentTable,
-      Text tableName, Text row, boolean useCache)
-    throws IOException{
-      HRegionLocation location = null;
-      
-      // if we're supposed to be using the cache, then check it for a possible
-      // hit. otherwise, delete any existing cached location so it won't 
-      // interfere.
-      if (useCache) {
-        location = getCachedLocation(tableName, row);
-        if (location != null) {
-          return location;
-        }
-      } else{
-        deleteCachedLocation(tableName, row);
-      }
-
-      // build the key of the meta region we should be looking for.
-      // the extra 9's on the end are necessary to allow "exact" matches
-      // without knowing the precise region names.
-      Text metaKey = new Text(tableName.toString() + "," 
-        + row.toString() + ",999999999999999");
-
-      int tries = 0;
-      while (true) {
-        tries++;
-        
-        if (tries >= numRetries) {
-          throw new NoServerForRegionException("Unable to find region for " 
-            + row + " after " + numRetries + " tries.");
-        }
-
-        try{
-          // locate the root region
-          HRegionLocation metaLocation = locateRegion(parentTable, metaKey);
-          HRegionInterface server = 
-            getHRegionConnection(metaLocation.getServerAddress());
-
-          // query the root region for the location of the meta region
-          HbaseMapWritable regionInfoRow = server.getClosestRowBefore(
-            metaLocation.getRegionInfo().getRegionName(), 
-            metaKey, HConstants.LATEST_TIMESTAMP);
-
-          if (regionInfoRow == null) {
-            throw new TableNotFoundException("Table '" + tableName + 
-              "' does not exist.");
-          }
-
-          // convert the MapWritable into a Map we can use
-          SortedMap<Text, byte[]> results = 
-            sortedMapFromMapWritable(regionInfoRow);
-
-          byte[] bytes = results.get(COL_REGIONINFO);
-
-          if (bytes == null || bytes.length == 0) {
-            throw new IOException("HRegionInfo was null or empty in " + 
-              parentTable);
-          }
-
-          // convert the row result into the HRegionLocation we need!
-          HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
-              results.get(COL_REGIONINFO), new HRegionInfo());
-
-          if (regionInfo.isOffline()) {
-            throw new IllegalStateException("region offline: " + 
-              regionInfo.getRegionName());
-          }
-
-          // possible we got a region of a different table...
-          if (!regionInfo.getTableDesc().getName().equals(tableName)) {
-            throw new TableNotFoundException(
-              "Table '" + tableName + "' was not found.");
-          }
-
-          String serverAddress = 
-            Writables.bytesToString(results.get(COL_SERVER));
-        
-          if (serverAddress.equals("")) { 
-            throw new NoServerForRegionException(
-              "No server address listed in " + parentTable + " for region "
-              + regionInfo.getRegionName());
-          }
-        
-          // instantiate the location
-          location = new HRegionLocation(regionInfo, 
-            new HServerAddress(serverAddress));
-      
-          cacheLocation(tableName, location);
-
-          return location;
-        } catch (IllegalStateException e) {
-          if (tries < numRetries - 1) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("reloading table servers because: " + e.getMessage());
-            }
-            relocateRegion(parentTable, metaKey);
-          } else {
-            throw e;
-          }
-        } catch (IOException e) {
-          if (e instanceof RemoteException) {
-            e = RemoteExceptionHandler.decodeRemoteException(
-                (RemoteException) e);
-          }
-          if (tries < numRetries - 1) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("reloading table servers because: " + e.getMessage());
-            }
-            relocateRegion(parentTable, metaKey);
-          } else {
-            throw e;
-          }
-        }
-      
-        try{
-          Thread.sleep(pause);              
-        } catch (InterruptedException e){
-          // continue
-        }
-      }
-    }
-
-    /** 
-      * Search the cache for a location that fits our table and row key.
-      * Return null if no suitable region is located. TODO: synchronization note
-      */
-    private HRegionLocation getCachedLocation(Text tableName, Text row) {
-      // find the map of cached locations for this table
-      SortedMap<Text, HRegionLocation> tableLocations = 
-        cachedRegionLocations.get(tableName);
-
-      // if tableLocations for this table isn't built yet, make one
-      if (tableLocations == null) {
-        tableLocations = new TreeMap<Text, HRegionLocation>();
-        cachedRegionLocations.put(tableName, tableLocations);
-      }
-
-      // start to examine the cache. we can only do cache actions
-      // if there's something in the cache for this table.
-      if (!tableLocations.isEmpty()) {
-        if (tableLocations.containsKey(row)) {
-          return tableLocations.get(row);
-        }
-        
-        // cut the cache so that we only get the part that could contain
-        // regions that match our key
-        SortedMap<Text, HRegionLocation> matchingRegions =
-          tableLocations.headMap(row);
-
-        // if that portion of the map is empty, then we're done. otherwise,
-        // we need to examine the cached location to verify that it is 
-        // a match by end key as well.
-        if (!matchingRegions.isEmpty()) {
-          HRegionLocation possibleRegion = 
-            matchingRegions.get(matchingRegions.lastKey());
-          
-          Text endKey = possibleRegion.getRegionInfo().getEndKey();
-          
-          // make sure that the end key is greater than the row we're looking 
-          // for, otherwise the row actually belongs in the next region, not 
-          // this one. the exception case is when the endkey is EMPTY_START_ROW,
-          // signifying that the region we're checking is actually the last 
-          // region in the table.
-          if (endKey.equals(EMPTY_TEXT) || endKey.compareTo(row) > 0) {
-            return possibleRegion;
-          }
-        }
-      }
-      
-      // passed all the way through, so we got nothin - complete cache miss
-      return null;
-    }
-
-
-    /**
-      * Delete a cached location, if it satisfies the table name and row
-      * requirements.
-      */
-    private void deleteCachedLocation(Text tableName, Text row){
-      // find the map of cached locations for this table
-      SortedMap<Text, HRegionLocation> tableLocations = 
-        cachedRegionLocations.get(tableName);
-
-      // if tableLocations for this table isn't built yet, make one
-      if (tableLocations == null) {
-        tableLocations = new TreeMap<Text, HRegionLocation>();
-        cachedRegionLocations.put(tableName, tableLocations);
-      }
-
-      // start to examine the cache. we can only do cache actions
-      // if there's something in the cache for this table.
-      if (!tableLocations.isEmpty()) {
-        // cut the cache so that we only get the part that could contain
-        // regions that match our key
-        SortedMap<Text, HRegionLocation> matchingRegions =
-          tableLocations.headMap(row);
-
-        // if that portion of the map is empty, then we're done. otherwise,
-        // we need to examine the cached location to verify that it is 
-        // a match by end key as well.
-        if (!matchingRegions.isEmpty()) {
-          HRegionLocation possibleRegion = 
-            matchingRegions.get(matchingRegions.lastKey());
-          
-          Text endKey = possibleRegion.getRegionInfo().getEndKey();
-          
-          // by nature of the map, we know that the start key has to be < 
-          // otherwise it wouldn't be in the headMap. 
-          if (endKey.compareTo(row) <= 0) {
-            // delete any matching entry
-            tableLocations.remove(matchingRegions.lastKey());
-          }
-        }
-      }      
-    }
-
-
-    /**
-      * Put a newly discovered HRegionLocation into the cache.
-      */
-    private void cacheLocation(Text tableName, HRegionLocation location){
-      Text startKey = location.getRegionInfo().getStartKey();
-      
-      // find the map of cached locations for this table
-      SortedMap<Text, HRegionLocation> tableLocations = 
-        cachedRegionLocations.get(tableName);
-
-      // if tableLocations for this table isn't built yet, make one
-      if (tableLocations == null) {
-        tableLocations = new TreeMap<Text, HRegionLocation>();
-        cachedRegionLocations.put(tableName, tableLocations);
-      }
-      
-      // save the HRegionLocation under the startKey
-      tableLocations.put(startKey, location);
-    }
-    
-    /** {@inheritDoc} */
-    public HRegionInterface getHRegionConnection(
-      HServerAddress regionServer) 
-    throws IOException {
-
-      HRegionInterface server;
-      synchronized (this.servers) {
-        // See if we already have a connection
-        server = this.servers.get(regionServer.toString());
-
-        if (server == null) { // Get a connection
-          long versionId = 0;
-          try {
-            versionId =
-              serverInterfaceClass.getDeclaredField("versionID").getLong(server);
-          } catch (IllegalAccessException e) {
-            // Should never happen unless visibility of versionID changes
-            throw new UnsupportedOperationException(
-                "Unable to open a connection to a " +
-                serverInterfaceClass.getName() + " server.", e);
-          } catch (NoSuchFieldException e) {
-            // Should never happen unless versionID field name changes in HRegionInterface
-            throw new UnsupportedOperationException(
-                "Unable to open a connection to a " +
-                serverInterfaceClass.getName() + " server.", e);
-          }
-
-          try {
-            server = (HRegionInterface)HbaseRPC.waitForProxy(serverInterfaceClass,
-                versionId, regionServer.getInetSocketAddress(), this.conf);
-          } catch (RemoteException e) {
-            throw RemoteExceptionHandler.decodeRemoteException(e);
-          }
-          this.servers.put(regionServer.toString(), server);
-        }
-      }
-      return server;
-    }
-
-    /** {@inheritDoc} */
-    public void close(Text tableName) {
-      if (tableName == null || tableName.getLength() == 0) {
-        throw new IllegalArgumentException(
-            "table name cannot be null or zero length");
-      }
-            
-      if (closedTables.contains(tableName)) {
-        // Table already closed. Ignore it.
-        return;
-      }
-
-      closedTables.add(tableName);
-
-      if (cachedRegionLocations.containsKey(tableName)) {
-        SortedMap<Text, HRegionLocation> tableServers = 
-          cachedRegionLocations.remove(tableName);
-
-        // Shut down connections to the HRegionServers
-        synchronized (this.servers) {
-          for (HRegionLocation r: tableServers.values()) {
-            this.servers.remove(r.getServerAddress().toString());
-          }
-        }
-      }
-    }
-    
-    /** Convenience method for closing all open tables.*/
-    void closeAll() {
-      this.closed = true;
-      ArrayList<Text> tables = 
-        new ArrayList<Text>(cachedRegionLocations.keySet());
-      for (Text tableName: tables) {
-        close(tableName);
-      }
-    }
-    
-    /*
-     * Repeatedly try to find the root region by asking the master for where it is
-     * @return HRegionLocation for root region if found
-     * @throws NoServerForRegionException - if the root region can not be located
-     * after retrying
-     * @throws IOException 
-     */
-    private HRegionLocation locateRootRegion()
-    throws IOException {
-    
-      getMaster();
-      
-      HServerAddress rootRegionAddress = null;
-      
-      for (int tries = 0; tries < numRetries; tries++) {
-        int localTimeouts = 0;
-        
-        // ask the master which server has the root region
-        while (rootRegionAddress == null && localTimeouts < numRetries) {
-          rootRegionAddress = master.findRootRegion();
-          if (rootRegionAddress == null) {
-            try {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Sleeping. Waiting for root region.");
-              }
-              Thread.sleep(pause);
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Wake. Retry finding root region.");
-              }
-            } catch (InterruptedException iex) {
-              // continue
-            }
-            localTimeouts++;
-          }
-        }
-        
-        if (rootRegionAddress == null) {
-          throw new NoServerForRegionException(
-              "Timed out trying to locate root region");
-        }
-        
-        // get a connection to the region server
-        HRegionInterface server = getHRegionConnection(rootRegionAddress);
-
-        try {
-          // if this works, then we're good, and we have an acceptable address,
-          // so we can stop doing retries and return the result.
-          server.getRegionInfo(HRegionInfo.rootRegionInfo.getRegionName());
-          break;
-        } catch (IOException e) {
-          if (tries == numRetries - 1) {
-            // Don't bother sleeping. We've run out of retries.
-            if (e instanceof RemoteException) {
-              e = RemoteExceptionHandler.decodeRemoteException(
-                  (RemoteException) e);
-            }
-            throw e;
-          }
-          
-          // Sleep and retry finding root region.
-          try {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Root region location changed. Sleeping.");
-            }
-            Thread.sleep(pause);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Wake. Retry finding root region.");
-            }
-          } catch (InterruptedException iex) {
-            // continue
-          }
-        }
-        
-        rootRegionAddress = null;
-      }
-      
-      // if the adress is null by this point, then the retries have failed,
-      // and we're sort of sunk
-      if (rootRegionAddress == null) {
-        throw new NoServerForRegionException(
-          "unable to locate root region server");
-      }
-      
-      // return the region location
-      return new HRegionLocation(
-        HRegionInfo.rootRegionInfo, rootRegionAddress);
-    }
-  }
-}

+ 0 - 184
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConstants.java

@@ -1,184 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * HConstants holds a bunch of HBase-related constants
- */
-public interface HConstants {
-  
-  // For migration
-
-  /** name of version file */
-  static final String VERSION_FILE_NAME = "hbase.version";
-  
-  /** version of file system */
-  static final String FILE_SYSTEM_VERSION = "0.1";
-  
-  // Configuration parameters
-  
-  // TODO: URL for hbase master like hdfs URLs with host and port.
-  // Like jdbc URLs?  URLs could be used to refer to table cells?
-  // jdbc:mysql://[host][,failoverhost...][:port]/[database]
-  // jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
-  
-  // Key into HBaseConfiguration for the hbase.master address.
-  // TODO: Support 'local': i.e. default of all running in single
-  // process.  Same for regionserver. TODO: Is having HBase homed
-  // on port 60k OK?
-  
-  /** Parameter name for master address */
-  static final String MASTER_ADDRESS = "hbase.master";
-
-  /** default host address */
-  static final String DEFAULT_HOST = "0.0.0.0";
-
-  /** default port that the master listens on */
-  static final int DEFAULT_MASTER_PORT = 60000;
-  
-  /** Default master address */
-  static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":" +
-    DEFAULT_MASTER_PORT;
-
-  /** default port for master web api */
-  static final int DEFAULT_MASTER_INFOPORT = 60010;
-
-  /** Parameter name for hbase.regionserver address. */
-  static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
-  
-  /** Default region server address */
-  static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
-
-  /** default port for region server web api */
-  static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
-
-  /** Parameter name for what region server interface to use. */
-  static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
-  
-  /** Default region server interface class name. */
-  static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
-
-  /** Parameter name for how often threads should wake up */
-  static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
-
-  /** Parameter name for HBase instance root directory */
-  static final String HBASE_DIR = "hbase.rootdir";
-  
-  /** Default HBase instance root directory */
-  static final String DEFAULT_HBASE_DIR = "/hbase";
-  
-  /** Used to construct the name of the log directory for a region server */
-  static final String HREGION_LOGDIR_NAME = "log";
-
-  /** Name of old log file for reconstruction */
-  static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
-  
-  /** Default maximum file size */
-  static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
-
-  // Always store the location of the root table's HRegion.
-  // This HRegion is never split.
-  
-  // region name = table + startkey + regionid. This is the row key.
-  // each row in the root and meta tables describes exactly 1 region
-  // Do we ever need to know all the information that we are storing?
-
-  // Note that the name of the root table starts with "-" and the name of the
-  // meta table starts with "." Why? it's a trick. It turns out that when we
-  // store region names in memory, we use a SortedMap. Since "-" sorts before
-  // "." (and since no other table name can start with either of these
-  // characters, the root region will always be the first entry in such a Map,
-  // followed by all the meta regions (which will be ordered by their starting
-  // row key as well), followed by all user tables. So when the Master is 
-  // choosing regions to assign, it will always choose the root region first,
-  // followed by the meta regions, followed by user regions. Since the root
-  // and meta regions always need to be on-line, this ensures that they will
-  // be the first to be reassigned if the server(s) they are being served by
-  // should go down.
-
-  /** The root table's name.*/
-  static final Text ROOT_TABLE_NAME = new Text("-ROOT-");
-
-  /** The META table's name. */
-  static final Text META_TABLE_NAME = new Text(".META.");
-
-  // Defines for the column names used in both ROOT and META HBase 'meta' tables.
-  
-  /** The ROOT and META column family (string) */
-  static final String COLUMN_FAMILY_STR = "info:";
-
-  /** The ROOT and META column family (Text) */
-  static final Text COLUMN_FAMILY = new Text(COLUMN_FAMILY_STR);
-
-  /** Array of meta column names */
-  static final Text [] COLUMN_FAMILY_ARRAY = new Text [] {COLUMN_FAMILY};
-  
-  /** ROOT/META column family member - contains HRegionInfo */
-  static final Text COL_REGIONINFO = new Text(COLUMN_FAMILY + "regioninfo");
-
-  /** Array of column - contains HRegionInfo */
-  static final Text[] COL_REGIONINFO_ARRAY = new Text [] {COL_REGIONINFO};
-  
-  /** ROOT/META column family member - contains HServerAddress.toString() */
-  static final Text COL_SERVER = new Text(COLUMN_FAMILY + "server");
-  
-  /** ROOT/META column family member - contains server start code (a long) */
-  static final Text COL_STARTCODE = new Text(COLUMN_FAMILY + "serverstartcode");
-
-  /** the lower half of a split region */
-  static final Text COL_SPLITA = new Text(COLUMN_FAMILY_STR + "splitA");
-  
-  /** the upper half of a split region */
-  static final Text COL_SPLITB = new Text(COLUMN_FAMILY_STR + "splitB");
-  
-  /** All the columns in the catalog -ROOT- and .META. tables.
-   */
-  static final Text[] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER,
-    COL_STARTCODE, COL_SPLITA, COL_SPLITB};
-
-  // Other constants
-
-  /**
-   * An empty instance of Text.
-   */
-  static final Text EMPTY_TEXT = new Text();
-  
-  /**
-   * Used by scanners, etc when they want to start at the beginning of a region
-   */
-  static final Text EMPTY_START_ROW = EMPTY_TEXT;
-
-  /** When we encode strings, we always specify UTF8 encoding */
-  static final String UTF8_ENCODING = "UTF-8";
-
-  /**
-   * Timestamp to use when we want to refer to the latest cell.
-   * This is the timestamp sent by clients when no timestamp is specified on
-   * commit.
-   */
-  static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
-
-  /**
-   * Define for 'return-all-versions'.
-   */
-  static final int ALL_VERSIONS = -1;
-}

+ 0 - 35
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HInternalScannerInterface.java

@@ -1,35 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-/**
- * Internally, we need to be able to determine if the scanner is doing wildcard
- * column matches (when only a column family is specified or if a column regex
- * is specified) or if multiple members of the same column family were
- * specified. If so, we need to ignore the timestamp to ensure that we get all
- * the family members, as they may have been last updated at different times.
- */
-public interface HInternalScannerInterface extends HScannerInterface {
-  /** @return true if the scanner is matching a column family or regex */
-  public boolean isWildcardScanner();
-  
-  /** @return true if the scanner is matching multiple column family members */
-  public boolean isMultipleMatchScanner();
-}

+ 0 - 628
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java

@@ -1,628 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.SequenceFile.Reader;
-
-/**
- * HLog stores all the edits to the HStore.
- *
- * It performs logfile-rolling, so external callers are not aware that the
- * underlying file is being rolled.
- *
- * <p>
- * A single HLog is used by several HRegions simultaneously.
- *
- * <p>
- * Each HRegion is identified by a unique long <code>int</code>. HRegions do
- * not need to declare themselves before using the HLog; they simply include
- * their HRegion-id in the <code>append</code> or
- * <code>completeCacheFlush</code> calls.
- *
- * <p>
- * An HLog consists of multiple on-disk files, which have a chronological order.
- * As data is flushed to other (better) on-disk structures, the log becomes
- * obsolete. We can destroy all the log messages for a given HRegion-id up to
- * the most-recent CACHEFLUSH message from that HRegion.
- *
- * <p>
- * It's only practical to delete entire files. Thus, we delete an entire on-disk
- * file F when all of the messages in F have a log-sequence-id that's older
- * (smaller) than the most-recent CACHEFLUSH message for every HRegion that has
- * a message in F.
- *
- * <p>
- * Synchronized methods can never execute in parallel. However, between the
- * start of a cache flush and the completion point, appends are allowed but log
- * rolling is not. To prevent log rolling taking place during this period, a
- * separate reentrant lock is used.
- *
- * <p>
- * TODO: Vuk Ercegovac also pointed out that keeping HBase HRegion edit logs in
- * HDFS is currently flawed. HBase writes edits to logs and to a memcache. The
- * 'atomic' write to the log is meant to serve as insurance against abnormal
- * RegionServer exit: on startup, the log is rerun to reconstruct an HRegion's
- * last wholesome state. But files in HDFS do not 'exist' until they are cleanly
- * closed -- something that will not happen if RegionServer exits without
- * running its 'close'.
- */
-public class HLog implements HConstants {
-  private static final Log LOG = LogFactory.getLog(HLog.class);
-  private static final String HLOG_DATFILE = "hlog.dat.";
-  static final Text METACOLUMN = new Text("METACOLUMN:");
-  static final Text METAROW = new Text("METAROW");
-  final FileSystem fs;
-  final Path dir;
-  final Configuration conf;
-  final LogRollListener listener;
-  final long threadWakeFrequency;
-  private final int maxlogentries;
-
-  /*
-   * Current log file.
-   */
-  SequenceFile.Writer writer;
-
-  /*
-   * Map of all log files but the current one. 
-   */
-  final SortedMap<Long, Path> outputfiles = 
-    Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
-
-  /*
-   * Map of region to last sequence/edit id. 
-   */
-  final Map<Text, Long> lastSeqWritten = new ConcurrentHashMap<Text, Long>();
-
-  volatile boolean closed = false;
-
-  private final Integer sequenceLock = new Integer(0);
-  volatile long logSeqNum = 0;
-
-  volatile long filenum = 0;
-
-  volatile int numEntries = 0;
-
-  // This lock prevents starting a log roll during a cache flush.
-  // synchronized is insufficient because a cache flush spans two method calls.
-  private final Lock cacheFlushLock = new ReentrantLock();
-
-  // We synchronize on updateLock to prevent updates and to prevent a log roll
-  // during an update
-  private final Integer updateLock = new Integer(0);
-
-  /**
-   * Create an edit log at the given <code>dir</code> location.
-   *
-   * You should never have to load an existing log. If there is a log at
-   * startup, it should have already been processed and deleted by the time the
-   * HLog object is started up.
-   *
-   * @param fs
-   * @param dir
-   * @param conf
-   * @param listener
-   * @throws IOException
-   */
-  public HLog(final FileSystem fs, final Path dir, final Configuration conf,
-      final LogRollListener listener) throws IOException {
-    this.fs = fs;
-    this.dir = dir;
-    this.conf = conf;
-    this.listener = listener;
-    this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
-    this.maxlogentries =
-      conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
-    if (fs.exists(dir)) {
-      throw new IOException("Target HLog directory already exists: " + dir);
-    }
-    fs.mkdirs(dir);
-    rollWriter();
-  }
-  
-  /**
-   * Get the compression type for the hlog files.
-   * @param c Configuration to use.
-   * @return the kind of compression to use
-   */
-  private static CompressionType getCompressionType(final Configuration c) {
-    String name = c.get("hbase.io.seqfile.compression.type");
-    return name == null? CompressionType.NONE: CompressionType.valueOf(name);
-  }
-
-  /**
-   * Called by HRegionServer when it opens a new region to ensure that log
-   * sequence numbers are always greater than the latest sequence number of the
-   * region being brought on-line.
-   *
-   * @param newvalue We'll set log edit/sequence number to this value if it
-   * is greater than the current value.
-   */
-  void setSequenceNumber(long newvalue) {
-    synchronized (sequenceLock) {
-      if (newvalue > logSeqNum) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("changing sequence number from " + logSeqNum + " to " +
-              newvalue);
-        }
-        logSeqNum = newvalue;
-      }
-    }
-  }
-
-  /**
-   * Roll the log writer. That is, start writing log messages to a new file.
-   *
-   * Because a log cannot be rolled during a cache flush, and a cache flush
-   * spans two method calls, a special lock needs to be obtained so that a cache
-   * flush cannot start when the log is being rolled and the log cannot be
-   * rolled during a cache flush.
-   *
-   * <p>Note that this method cannot be synchronized because it is possible that
-   * startCacheFlush runs, obtaining the cacheFlushLock, then this method could
-   * start which would obtain the lock on this but block on obtaining the
-   * cacheFlushLock and then completeCacheFlush could be called which would wait
-   * for the lock on this and consequently never release the cacheFlushLock
-   *
-   * @throws IOException
-   */
-  void rollWriter() throws IOException {
-    this.cacheFlushLock.lock();
-    try {
-      if (closed) {
-        return;
-      }
-      synchronized (updateLock) {
-        if (this.writer != null) {
-          // Close the current writer, get a new one.
-          this.writer.close();
-          Path p = computeFilename(filenum - 1);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Closing current log writer " + p.toString() +
-            " to get a new one");
-          }
-          if (filenum > 0) {
-            synchronized (this.sequenceLock) {
-              this.outputfiles.put(Long.valueOf(this.logSeqNum - 1), p);
-            }
-          }
-        }
-        Path newPath = computeFilename(filenum++);
-        this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
-            HLogKey.class, HLogEdit.class, getCompressionType(this.conf));
-        LOG.info("new log writer created at " + newPath);
-
-        // Can we delete any of the old log files?
-        if (this.outputfiles.size() > 0) {
-          if (this.lastSeqWritten.size() <= 0) {
-            LOG.debug("Last sequence written is empty. Deleting all old hlogs");
-            // If so, then no new writes have come in since all regions were
-            // flushed (and removed from the lastSeqWritten map). Means can
-            // remove all but currently open log file.
-            for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
-              deleteLogFile(e.getValue(), e.getKey());
-            }
-            this.outputfiles.clear();
-          } else {
-            // Get oldest edit/sequence id.  If logs are older than this id,
-            // then safe to remove.
-            Long oldestOutstandingSeqNum =
-              Collections.min(this.lastSeqWritten.values());
-            // Get the set of all log files whose final ID is older than or
-            // equal to the oldest pending region operation
-            TreeSet<Long> sequenceNumbers =
-              new TreeSet<Long>(this.outputfiles.headMap(
-                (Long.valueOf(oldestOutstandingSeqNum.longValue() + 1L))).keySet());
-            // Now remove old log files (if any)
-            if (LOG.isDebugEnabled()) {
-              // Find region associated with oldest key -- helps debugging.
-              Text oldestRegion = null;
-              for (Map.Entry<Text, Long> e: this.lastSeqWritten.entrySet()) {
-                if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
-                  oldestRegion = e.getKey();
-                  break;
-                }
-              }
-              if (LOG.isDebugEnabled() && sequenceNumbers.size() > 0) {
-                LOG.debug("Found " + sequenceNumbers.size() +
-                  " logs to remove " +
-                  "using oldest outstanding seqnum of " +
-                  oldestOutstandingSeqNum + " from region " + oldestRegion);
-              }
-            }
-            if (sequenceNumbers.size() > 0) {
-              for (Long seq : sequenceNumbers) {
-                deleteLogFile(this.outputfiles.remove(seq), seq);
-              }
-            }
-          }
-        }
-        this.numEntries = 0;
-      }
-    } finally {
-      this.cacheFlushLock.unlock();
-    }
-  }
-  
-  private void deleteLogFile(final Path p, final Long seqno) throws IOException {
-    LOG.info("removing old log file " + p.toString() +
-      " whose highest sequence/edit id is " + seqno);
-    this.fs.delete(p);
-  }
-
-  /**
-   * This is a convenience method that computes a new filename with a given
-   * file-number.
-   */
-  Path computeFilename(final long fn) {
-    return new Path(dir,
-      HLOG_DATFILE + String.format("%1$03d", Long.valueOf(fn)));
-  }
-
-  /**
-   * Shut down the log and delete the log directory
-   *
-   * @throws IOException
-   */
-  public void closeAndDelete() throws IOException {
-    close();
-    fs.delete(dir);
-  }
-
-  /**
-   * Shut down the log.
-   *
-   * @throws IOException
-   */
-  void close() throws IOException {
-    cacheFlushLock.lock();
-    try {
-      synchronized (updateLock) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("closing log writer in " + this.dir.toString());
-        }
-        this.writer.close();
-        this.closed = true;
-      }
-    } finally {
-      cacheFlushLock.unlock();
-    }
-  }
-
-  /**
-   * Append a set of edits to the log. Log edits are keyed by regionName,
-   * rowname, and log-sequence-id.
-   *
-   * Later, if we sort by these keys, we obtain all the relevant edits for a
-   * given key-range of the HRegion (TODO). Any edits that do not have a
-   * matching {@link HConstants#COMPLETE_CACHEFLUSH} message can be discarded.
-   *
-   * <p>
-   * Logs cannot be restarted once closed, or once the HLog process dies. Each
-   * time the HLog starts, it must create a new log. This means that other
-   * systems should process the log appropriately upon each startup (and prior
-   * to initializing HLog).
-   *
-   * synchronized prevents appends during the completion of a cache flush or for
-   * the duration of a log roll.
-   *
-   * @param regionName
-   * @param tableName
-   * @param row
-   * @param columns
-   * @param timestamp
-   * @throws IOException
-   */
-  void append(Text regionName, Text tableName,
-      TreeMap<HStoreKey, byte[]> edits) throws IOException {
-    
-    if (closed) {
-      throw new IOException("Cannot append; log is closed");
-    }
-    synchronized (updateLock) {
-      long seqNum[] = obtainSeqNum(edits.size());
-      // The 'lastSeqWritten' map holds the sequence number of the oldest
-      // write for each region. When the cache is flushed, the entry for the
-      // region being flushed is removed if the sequence number of the flush
-      // is greater than or equal to the value in lastSeqWritten.
-      if (!this.lastSeqWritten.containsKey(regionName)) {
-        this.lastSeqWritten.put(regionName, Long.valueOf(seqNum[0]));
-      }
-      int counter = 0;
-      for (Map.Entry<HStoreKey, byte[]> es : edits.entrySet()) {
-        HStoreKey key = es.getKey();
-        HLogKey logKey =
-          new HLogKey(regionName, tableName, key.getRow(), seqNum[counter++]);
-        HLogEdit logEdit =
-          new HLogEdit(key.getColumn(), es.getValue(), key.getTimestamp());
-        this.writer.append(logKey, logEdit);
-        this.numEntries++;
-      }
-    }
-    if (this.numEntries > this.maxlogentries) {
-      if (listener != null) {
-        listener.logRollRequested();
-      }
-    }
-  }
-
-  /** @return How many items have been added to the log */
-  int getNumEntries() {
-    return numEntries;
-  }
-
-  /**
-   * Obtain a log sequence number.
-   */
-  private long obtainSeqNum() {
-    long value;
-    synchronized (sequenceLock) {
-      value = logSeqNum++;
-    }
-    return value;
-  }
-
-  /** @return the number of log files in use */
-  int getNumLogFiles() {
-    return outputfiles.size();
-  }
-
-  /**
-   * Obtain a specified number of sequence numbers
-   *
-   * @param num number of sequence numbers to obtain
-   * @return array of sequence numbers
-   */
-  private long[] obtainSeqNum(int num) {
-    long[] results = new long[num];
-    synchronized (this.sequenceLock) {
-      for (int i = 0; i < num; i++) {
-        results[i] = this.logSeqNum++;
-      }
-    }
-    return results;
-  }
-
-  /**
-   * By acquiring a log sequence ID, we can allow log messages to continue while
-   * we flush the cache.
-   *
-   * Acquire a lock so that we do not roll the log between the start and
-   * completion of a cache-flush. Otherwise the log-seq-id for the flush will
-   * not appear in the correct logfile.
-   *
-   * @return sequence ID to pass {@link #completeCacheFlush(Text, Text, long)}
-   * @see #completeCacheFlush(Text, Text, long)
-   * @see #abortCacheFlush()
-   */
-  long startCacheFlush() {
-    this.cacheFlushLock.lock();
-    return obtainSeqNum();
-  }
-
-  /**
-   * Complete the cache flush
-   *
-   * Protected by cacheFlushLock
-   *
-   * @param regionName
-   * @param tableName
-   * @param logSeqId
-   * @throws IOException
-   */
-  void completeCacheFlush(final Text regionName, final Text tableName,
-      final long logSeqId) throws IOException {
-
-    try {
-      if (this.closed) {
-        return;
-      }
-      synchronized (updateLock) {
-        this.writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
-            new HLogEdit(HLog.METACOLUMN, HLogEdit.completeCacheFlush.get(),
-                System.currentTimeMillis()));
-        this.numEntries++;
-        Long seq = this.lastSeqWritten.get(regionName);
-        if (seq != null && logSeqId >= seq.longValue()) {
-          this.lastSeqWritten.remove(regionName);
-        }
-      }
-    } finally {
-      this.cacheFlushLock.unlock();
-    }
-  }
-
-  /**
-   * Abort a cache flush.
-   * Call if the flush fails. Note that the only recovery for an aborted flush
-   * currently is a restart of the regionserver so the snapshot content dropped
-   * by the failure gets restored to the memcache.
-   */
-  void abortCacheFlush() {
-    this.cacheFlushLock.unlock();
-  }
-
-  /**
-   * Split up a bunch of log files, that are no longer being written to, into
-   * new files, one per region. Delete the old log files when finished.
-   *
-   * @param rootDir qualified root directory of the HBase instance
-   * @param srcDir Directory of log files to split: e.g.
-   *                <code>${ROOTDIR}/log_HOST_PORT</code>
-   * @param fs FileSystem
-   * @param conf HBaseConfiguration
-   * @throws IOException
-   */
-  static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
-    Configuration conf) throws IOException {
-    Path logfiles[] = fs.listPaths(new Path[] { srcDir });
-    LOG.info("splitting " + logfiles.length + " log(s) in " +
-      srcDir.toString());
-    Map<Text, SequenceFile.Writer> logWriters =
-      new HashMap<Text, SequenceFile.Writer>();
-    try {
-      for (int i = 0; i < logfiles.length; i++) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
-            logfiles[i]);
-        }
-        // Check for empty file.
-        if (fs.getFileStatus(logfiles[i]).getLen() <= 0) {
-          LOG.info("Skipping " + logfiles[i].toString() +
-            " because zero length");
-          continue;
-        }
-        HLogKey key = new HLogKey();
-        HLogEdit val = new HLogEdit();
-        SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
-        try {
-          int count = 0;
-          for (; in.next(key, val); count++) {
-            Text tableName = key.getTablename();
-            Text regionName = key.getRegionName();
-            SequenceFile.Writer w = logWriters.get(regionName);
-            if (w == null) {
-              Path logfile = new Path(
-                  HRegion.getRegionDir(
-                      HTableDescriptor.getTableDir(rootDir, tableName),
-                      HRegionInfo.encodeRegionName(regionName)
-                  ),
-                  HREGION_OLDLOGFILE_NAME
-              );
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Creating new log file writer for path " + logfile +
-                  "; map content " + logWriters.toString());
-              }
-              w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
-                HLogEdit.class, getCompressionType(conf));
-              // Use copy of regionName; regionName object is reused inside in
-              // HStoreKey.getRegionName so its content changes as we iterate.
-              logWriters.put(new Text(regionName), w);
-            }
-            if (count % 10000 == 0 && count > 0 && LOG.isDebugEnabled()) {
-              LOG.debug("Applied " + count + " edits");
-            }
-            w.append(key, val);
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Applied " + count + " total edits");
-          }
-        } finally {
-          in.close();
-        }
-      }
-    } finally {
-      for (SequenceFile.Writer w : logWriters.values()) {
-        w.close();
-      }
-    }
-
-    if (fs.exists(srcDir)) {
-      if (!fs.delete(srcDir)) {
-        LOG.error("Cannot delete: " + srcDir);
-        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
-          throw new IOException("Cannot delete: " + srcDir);
-        }
-      }
-    }
-    LOG.info("log file splitting completed for " + srcDir.toString());
-  }
-
-  private static void usage() {
-    System.err.println("Usage: java org.apache.hbase.HLog" +
-        " {--dump <logfile>... | --split <logdir>...}");
-  }
-
-  /**
-   * Pass one or more log file names and it will either dump out a text version
-   * on <code>stdout</code> or split the specified log files.
-   *
-   * @param args
-   * @throws IOException
-   */
-  public static void main(String[] args) throws IOException {
-    if (args.length < 2) {
-      usage();
-      System.exit(-1);
-    }
-    boolean dump = true;
-    if (args[0].compareTo("--dump") != 0) {
-      if (args[0].compareTo("--split") == 0) {
-        dump = false;
-
-      } else {
-        usage();
-        System.exit(-1);
-      }
-    }
-    Configuration conf = new HBaseConfiguration();
-    FileSystem fs = FileSystem.get(conf);
-    Path baseDir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
-
-    for (int i = 1; i < args.length; i++) {
-      Path logPath = new Path(args[i]);
-      if (!fs.exists(logPath)) {
-        throw new FileNotFoundException(args[i] + " does not exist");
-      }
-      if (dump) {
-        if (!fs.isFile(logPath)) {
-          throw new IOException(args[i] + " is not a file");
-        }
-        Reader log = new SequenceFile.Reader(fs, logPath, conf);
-        try {
-          HLogKey key = new HLogKey();
-          HLogEdit val = new HLogEdit();
-          while (log.next(key, val)) {
-            System.out.println(key.toString() + " " + val.toString());
-          }
-        } finally {
-          log.close();
-        }
-      } else {
-        if (!fs.getFileStatus(logPath).isDir()) {
-          throw new IOException(args[i] + " is not a directory");
-        }
-        splitLog(baseDir, logPath, fs, conf);
-      }
-    }
-  }
-}

+ 0 - 138
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogEdit.java

@@ -1,138 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.io.*;
-
-import java.io.*;
-
-/**
- * A log value.
- *
- * These aren't sortable; you need to sort by the matching HLogKey.
- * The table and row are already identified in HLogKey.
- * This just indicates the column and value.
- */
-public class HLogEdit implements Writable, HConstants {
-
-  /** Value stored for a deleted item */
-  public static ImmutableBytesWritable deleteBytes = null;
-
-  /** Value written to HLog on a complete cache flush */
-  public static ImmutableBytesWritable completeCacheFlush = null;
-
-  static {
-    try {
-      deleteBytes =
-        new ImmutableBytesWritable("HBASE::DELETEVAL".getBytes(UTF8_ENCODING));
-    
-      completeCacheFlush =
-        new ImmutableBytesWritable("HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING));
-      
-    } catch (UnsupportedEncodingException e) {
-      assert(false);
-    }
-  }
-  
-  /**
-   * @param value
-   * @return True if an entry and its content is {@link #deleteBytes}.
-   */
-  public static boolean isDeleted(final byte [] value) {
-    return (value == null)? false: deleteBytes.compareTo(value) == 0;
-  }
-
-  private Text column = new Text();
-  private byte [] val;
-  private long timestamp;
-  private static final int MAX_VALUE_LEN = 128;
-
-  /**
-   * Default constructor used by Writable
-   */
-  public HLogEdit() {
-    super();
-  }
-
-  /**
-   * Construct a fully initialized HLogEdit
-   * @param column column name
-   * @param bval value
-   * @param timestamp timestamp for modification
-   */
-  public HLogEdit(Text column, byte [] bval, long timestamp) {
-    this.column.set(column);
-    this.val = bval;
-    this.timestamp = timestamp;
-  }
-
-  /** @return the column */
-  public Text getColumn() {
-    return this.column;
-  }
-
-  /** @return the value */
-  public byte [] getVal() {
-    return this.val;
-  }
-
-  /** @return the timestamp */
-  public long getTimestamp() {
-    return this.timestamp;
-  }
-
-  /**
-   * @return First column name, timestamp, and first 128 bytes of the value
-   * bytes as a String.
-   */
-  @Override
-  public String toString() {
-    String value = "";
-    try {
-      value = (this.val.length > MAX_VALUE_LEN)?
-        new String(this.val, 0, MAX_VALUE_LEN, HConstants.UTF8_ENCODING) +
-          "...":
-        new String(getVal(), HConstants.UTF8_ENCODING);
-    } catch (UnsupportedEncodingException e) {
-      throw new RuntimeException("UTF8 encoding not present?", e);
-    }
-    return "(" + getColumn().toString() + "/" + getTimestamp() + "/" +
-      value + ")";
-  }
-  
-  // Writable
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    this.column.write(out);
-    out.writeInt(this.val.length);
-    out.write(this.val);
-    out.writeLong(timestamp);
-  }
-  
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    this.column.readFields(in);
-    this.val = new byte[in.readInt()];
-    in.readFully(this.val);
-    this.timestamp = in.readLong();
-  }
-}

+ 0 - 159
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java

@@ -1,159 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.io.*;
-
-import java.io.*;
-
-/**
- * A Key for an entry in the change log.
- * 
- * The log intermingles edits to many tables and rows, so each log entry 
- * identifies the appropriate table and row.  Within a table and row, they're 
- * also sorted.
- */
-public class HLogKey implements WritableComparable {
-  Text regionName = new Text();
-  Text tablename = new Text();
-  Text row = new Text();
-  long logSeqNum = 0L;
-
-  /** Create an empty key useful when deserializing */
-  public HLogKey() {
-    super();
-  }
-  
-  /**
-   * Create the log key!
-   * We maintain the tablename mainly for debugging purposes.
-   * A regionName is always a sub-table object.
-   *
-   * @param regionName  - name of region
-   * @param tablename   - name of table
-   * @param row         - row key
-   * @param logSeqNum   - log sequence number
-   */
-  public HLogKey(Text regionName, Text tablename, Text row, long logSeqNum) {
-    // TODO: Is this copy of the instances necessary? They are expensive.
-    this.regionName.set(regionName);
-    this.tablename.set(tablename);
-    this.row.set(row);
-    this.logSeqNum = logSeqNum;
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // A bunch of accessors
-  //////////////////////////////////////////////////////////////////////////////
-
-  Text getRegionName() {
-    return regionName;
-  }
-  
-  Text getTablename() {
-    return tablename;
-  }
-  
-  Text getRow() {
-    return row;
-  }
-  
-  long getLogSeqNum() {
-    return logSeqNum;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    return tablename + "/" + regionName + "/" + row + "/" + logSeqNum;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean equals(Object obj) {
-    return compareTo(obj) == 0;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int hashCode() {
-    int result = this.regionName.hashCode();
-    result ^= this.row.hashCode(); 
-    result ^= this.logSeqNum;
-    return result;
-  }
-
-  //
-  // Comparable
-  //
-
-  /**
-   * {@inheritDoc}
-   */
-  public int compareTo(Object o) {
-    HLogKey other = (HLogKey) o;
-    int result = this.regionName.compareTo(other.regionName);
-    
-    if(result == 0) {
-      result = this.row.compareTo(other.row);
-      
-      if(result == 0) {
-        
-        if (this.logSeqNum < other.logSeqNum) {
-          result = -1;
-          
-        } else if (this.logSeqNum > other.logSeqNum) {
-          result = 1;
-        }
-      }
-    }
-    return result;
-  }
-
-  //
-  // Writable
-  //
-
-  /**
-   * {@inheritDoc}
-   */
-  public void write(DataOutput out) throws IOException {
-    this.regionName.write(out);
-    this.tablename.write(out);
-    this.row.write(out);
-    out.writeLong(logSeqNum);
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  public void readFields(DataInput in) throws IOException {
-    this.regionName.readFields(in);
-    this.tablename.readFields(in);
-    this.row.readFields(in);
-    this.logSeqNum = in.readLong();
-  }
-}

+ 0 - 3262
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java

@@ -1,3262 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.lang.reflect.Constructor;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.DelayQueue;
-import java.util.concurrent.Delayed;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.ipc.HbaseRPC;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.InfoServer;
-import org.apache.hadoop.hbase.util.Sleeper;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.Server;
-
-
-/**
- * HMaster is the "master server" for a HBase.
- * There is only one HMaster for a single HBase deployment.
- */
-public class HMaster extends Thread implements HConstants, HMasterInterface, 
-  HMasterRegionInterface {
-  
-  static final Log LOG = LogFactory.getLog(HMaster.class.getName());
-  static final Long ZERO_L = Long.valueOf(0L);
-
-  /** {@inheritDoc} */
-  public long getProtocolVersion(String protocol,
-      @SuppressWarnings("unused") long clientVersion)
-  throws IOException {
-    if (protocol.equals(HMasterInterface.class.getName())) {
-      return HMasterInterface.versionID; 
-    } else if (protocol.equals(HMasterRegionInterface.class.getName())) {
-      return HMasterRegionInterface.versionID;
-    } else {
-      throw new IOException("Unknown protocol to name node: " + protocol);
-    }
-  }
-
-  // We start out with closed flag on.  Using AtomicBoolean rather than
-  // plain boolean because want to pass a reference to supporting threads
-  // started here in HMaster rather than have them have to know about the
-  // hosting class
-  volatile AtomicBoolean closed = new AtomicBoolean(true);
-  volatile boolean shutdownRequested = false;
-  volatile AtomicInteger quiescedMetaServers = new AtomicInteger(0);
-  volatile boolean fsOk = true;
-  final Path rootdir;
-  final HBaseConfiguration conf;
-  final FileSystem fs;
-  final Random rand;
-  final int threadWakeFrequency; 
-  final int numRetries;
-  final long maxRegionOpenTime;
-
-  volatile DelayQueue<RegionServerOperation> delayedToDoQueue =
-    new DelayQueue<RegionServerOperation>();
-  volatile BlockingQueue<RegionServerOperation> toDoQueue =
-    new LinkedBlockingQueue<RegionServerOperation>();
-
-  final int leaseTimeout;
-  private final Leases serverLeases;
-  private final Server server;
-  private final HServerAddress address;
-
-  final HConnection connection;
-
-  final int metaRescanInterval;
-
-  volatile AtomicReference<HServerAddress> rootRegionLocation =
-    new AtomicReference<HServerAddress>(null);
-  
-  final Lock splitLogLock = new ReentrantLock();
-  
-  // A Sleeper that sleeps for threadWakeFrequency
-  protected final Sleeper sleeper;
-  
-  // Default access so accesible from unit tests. MASTER is name of the webapp
-  // and the attribute name used stuffing this instance into web context.
-  InfoServer infoServer;
-  
-  /** Name of master server */
-  public static final String MASTER = "master";
-
-  /**
-   * Base HRegion scanner class. Holds utilty common to <code>ROOT</code> and
-   * <code>META</code> HRegion scanners.
-   * 
-   * <p>How do we know if all regions are assigned? After the initial scan of
-   * the <code>ROOT</code> and <code>META</code> regions, all regions known at
-   * that time will have been or are in the process of being assigned.</p>
-   * 
-   * <p>When a region is split the region server notifies the master of the
-   * split and the new regions are assigned. But suppose the master loses the
-   * split message? We need to periodically rescan the <code>ROOT</code> and
-   * <code>META</code> regions.
-   *    <ul>
-   *    <li>If we rescan, any regions that are new but not assigned will have
-   *    no server info. Any regions that are not being served by the same
-   *    server will get re-assigned.</li>
-   *      
-   *    <li>Thus a periodic rescan of the root region will find any new
-   *    <code>META</code> regions where we missed the <code>META</code> split
-   *    message or we failed to detect a server death and consequently need to
-   *    assign the region to a new server.</li>
-   *        
-   *    <li>if we keep track of all the known <code>META</code> regions, then
-   *    we can rescan them periodically. If we do this then we can detect any
-   *    regions for which we missed a region split message.</li>
-   *    </ul>
-   *    
-   * Thus just keeping track of all the <code>META</code> regions permits
-   * periodic rescanning which will detect unassigned regions (new or
-   * otherwise) without the need to keep track of every region.</p>
-   * 
-   * <p>So the <code>ROOT</code> region scanner needs to wake up:
-   * <ol>
-   * <li>when the master receives notification that the <code>ROOT</code>
-   * region has been opened.</li>
-   * <li>periodically after the first scan</li>
-   * </ol>
-   * 
-   * The <code>META</code>  scanner needs to wake up:
-   * <ol>
-   * <li>when a <code>META</code> region comes on line</li>
-   * </li>periodically to rescan the online <code>META</code> regions</li>
-   * </ol>
-   * 
-   * <p>A <code>META</code> region is not 'online' until it has been scanned
-   * once.
-   */
-  abstract class BaseScanner extends Chore {
-    protected boolean rootRegion;
-
-    protected abstract boolean initialScan();
-    protected abstract void maintenanceScan();
-
-    BaseScanner(final boolean rootRegion, final int period,
-        final AtomicBoolean stop) {
-      super(period, stop);
-      this.rootRegion = rootRegion;
-    }
-    
-    @Override
-    protected boolean initialChore() {
-      return initialScan();
-    }
-    
-    @Override
-    protected void chore() {
-      maintenanceScan();
-    }
-
-    /**
-     * @param region Region to scan
-     * @throws IOException
-     */
-    protected void scanRegion(final MetaRegion region) throws IOException {
-      HRegionInterface regionServer = null;
-      long scannerId = -1L;
-      LOG.info(Thread.currentThread().getName() + " scanning meta region " +
-        region.toString());
-
-      // Array to hold list of split parents found.  Scan adds to list.  After
-      // scan we go check if parents can be removed.
-      Map<HRegionInfo, SortedMap<Text, byte[]>> splitParents =
-        new HashMap<HRegionInfo, SortedMap<Text, byte[]>>();
-      try {
-        regionServer = connection.getHRegionConnection(region.getServer());
-        scannerId =
-          regionServer.openScanner(region.getRegionName(), COLUMN_FAMILY_ARRAY,
-              EMPTY_START_ROW, System.currentTimeMillis(), null);
-
-        int numberOfRegionsFound = 0;
-        while (true) {
-          HbaseMapWritable values = regionServer.next(scannerId);
-          if (values == null || values.size() == 0) {
-            break;
-          }
-
-          // TODO: Why does this have to be a sorted map?
-          SortedMap<Text, byte[]> results = toRowMap(values).getMap();
-          
-          HRegionInfo info = getHRegionInfo(results);
-          if (info == null) {
-            continue;
-          }
-
-          String serverName = Writables.bytesToString(results.get(COL_SERVER));
-          long startCode = Writables.bytesToLong(results.get(COL_STARTCODE));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(Thread.currentThread().getName() + " regioninfo: {" +
-              info.toString() + "}, server: " + serverName + ", startCode: " +
-              startCode);
-          }
-
-          // Note Region has been assigned.
-          checkAssigned(info, serverName, startCode);
-          if (isSplitParent(info)) {
-            splitParents.put(info, results);
-          }
-          numberOfRegionsFound += 1;
-        }
-        if (this.rootRegion) {
-          numberOfMetaRegions.set(numberOfRegionsFound);
-        }
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-          if (e instanceof UnknownScannerException) {
-            // Reset scannerId so we do not try closing a scanner the other side
-            // has lost account of: prevents duplicated stack trace out of the 
-            // below close in the finally.
-            scannerId = -1L;
-          }
-        }
-        throw e;
-      } finally {
-        try {
-          if (scannerId != -1L && regionServer != null) {
-            regionServer.close(scannerId);
-          }
-        } catch (IOException e) {
-          LOG.error("Closing scanner",
-            RemoteExceptionHandler.checkIOException(e));
-        }
-      }
-
-      // Scan is finished.  Take a look at split parents to see if any we can
-      // clean up.
-      if (splitParents.size() > 0) {
-        for (Map.Entry<HRegionInfo, SortedMap<Text, byte[]>> e:
-            splitParents.entrySet()) {
-          HRegionInfo hri = e.getKey();
-          cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue());
-        }
-      }
-      LOG.info(Thread.currentThread().getName() + " scan of meta region " +
-        region.toString() + " complete");
-    }
-
-    /*
-     * @param info Region to check.
-     * @return True if this is a split parent.
-     */
-    private boolean isSplitParent(final HRegionInfo info) {
-      if (!info.isSplit()) {
-        return false;
-      }
-      if (!info.isOffline()) {
-        LOG.warn("Region is split but not offline: " + info.getRegionName());
-      }
-      return true;
-    }
-
-    /*
-     * If daughters no longer hold reference to the parents, delete the parent.
-     * @param metaRegionName Meta region name.
-     * @param server HRegionInterface of meta server to talk to 
-     * @param parent HRegionInfo of split parent
-     * @param rowContent Content of <code>parent</code> row in
-     * <code>metaRegionName</code>
-     * @return True if we removed <code>parent</code> from meta table and from
-     * the filesystem.
-     * @throws IOException
-     */
-    private boolean cleanupSplits(final Text metaRegionName, 
-        final HRegionInterface srvr, final HRegionInfo parent,
-        SortedMap<Text, byte[]> rowContent)
-    throws IOException {
-      boolean result = false;
-
-      boolean hasReferencesA = hasReferences(metaRegionName, srvr,
-          parent.getRegionName(), rowContent, COL_SPLITA);
-      boolean hasReferencesB = hasReferences(metaRegionName, srvr,
-          parent.getRegionName(), rowContent, COL_SPLITB);
-      
-      if (!hasReferencesA && !hasReferencesB) {
-        LOG.info("Deleting region " + parent.getRegionName() +
-          " because daughter splits no longer hold references");
-        if (!HRegion.deleteRegion(fs, rootdir, parent)) {
-          LOG.warn("Deletion of " + parent.getRegionName() + " failed");
-        }
-        
-        HRegion.removeRegionFromMETA(srvr, metaRegionName,
-          parent.getRegionName());
-        result = true;
-      } else if (LOG.isDebugEnabled()) {
-        // If debug, note we checked and current state of daughters.
-        LOG.debug("Checked " + parent.getRegionName() +
-          " for references: splitA: " + hasReferencesA + ", splitB: "+
-          hasReferencesB);
-      }
-      return result;
-    }
-    
-    /* 
-     * Checks if a daughter region -- either splitA or splitB -- still holds
-     * references to parent.  If not, removes reference to the split from
-     * the parent meta region row.
-     * @param metaRegionName Name of meta region to look in.
-     * @param srvr Where region resides.
-     * @param parent Parent region name. 
-     * @param rowContent Keyed content of the parent row in meta region.
-     * @param splitColumn Column name of daughter split to examine
-     * @return True if still has references to parent.
-     * @throws IOException
-     */
-    protected boolean hasReferences(final Text metaRegionName, 
-      final HRegionInterface srvr, final Text parent,
-      SortedMap<Text, byte[]> rowContent, final Text splitColumn)
-    throws IOException {
-      boolean result = false;
-      HRegionInfo split =
-        Writables.getHRegionInfoOrNull(rowContent.get(splitColumn));
-      if (split == null) {
-        return result;
-      }
-      Path tabledir =
-        HTableDescriptor.getTableDir(rootdir, split.getTableDesc().getName());
-      for (HColumnDescriptor family: split.getTableDesc().families().values()) {
-        Path p = HStoreFile.getMapDir(tabledir, split.getEncodedName(),
-            family.getFamilyName());
-
-        // Look for reference files.  Call listPaths with an anonymous
-        // instance of PathFilter.
-
-        Path [] ps = fs.listPaths(p,
-            new PathFilter () {
-              public boolean accept(Path path) {
-                return HStore.isReference(path);
-              }
-            }
-        );
-
-        if (ps != null && ps.length > 0) {
-          result = true;
-          break;
-        }
-      }
-      
-      if (result) {
-        return result;
-      }
-      
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(split.getRegionName().toString()
-            +" no longer has references to " + parent.toString());
-      }
-      
-      BatchUpdate b = new BatchUpdate(rand.nextLong());
-      long lockid = b.startUpdate(parent);
-      b.delete(lockid, splitColumn);
-      srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
-        
-      return result;
-    }
-
-    protected void checkAssigned(final HRegionInfo info,
-      final String serverName, final long startCode) throws IOException {
-      
-      // Skip region - if ...
-      if(info.isOffline()                                 // offline
-          || killedRegions.contains(info.getRegionName()) // queued for offline
-          || regionsToDelete.contains(info.getRegionName())) { // queued for delete
-
-        unassignedRegions.remove(info);
-        return;
-      }
-      HServerInfo storedInfo = null;
-      boolean deadServer = false;
-      if (serverName.length() != 0) {
-        synchronized (killList) {
-          Map<Text, HRegionInfo> regionsToKill = killList.get(serverName);
-          if (regionsToKill != null &&
-              regionsToKill.containsKey(info.getRegionName())) {
-
-            // Skip if region is on kill list
-            if(LOG.isDebugEnabled()) {
-              LOG.debug("not assigning region (on kill list): " +
-                  info.getRegionName());
-            }
-            return;
-          }
-        }
-        storedInfo = serversToServerInfo.get(serverName);
-        deadServer = deadServers.contains(serverName);
-      }
-
-      /*
-       * If the server is not dead and either:
-       *   the stored info is not null and the start code does not match
-       * or:
-       *   the stored info is null and the region is neither unassigned nor pending
-       * then:
-       */ 
-      if (!deadServer &&
-          ((storedInfo != null && storedInfo.getStartCode() != startCode) ||
-              (storedInfo == null &&
-                  !unassignedRegions.containsKey(info) &&
-                  !pendingRegions.contains(info.getRegionName())
-              )
-          )
-        ) {
-
-        // The current assignment is invalid
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Current assignment of " + info.getRegionName() +
-            " is not valid: storedInfo: " + storedInfo + ", startCode: " +
-            startCode + ", storedInfo.startCode: " +
-            ((storedInfo != null)? storedInfo.getStartCode(): -1) +
-            ", unassignedRegions: " + unassignedRegions.containsKey(info) +
-            ", pendingRegions: " +
-            pendingRegions.contains(info.getRegionName()));
-        }
-        // Recover the region server's log if there is one.
-        // This is only done from here if we are restarting and there is stale
-        // data in the meta region. Once we are on-line, dead server log
-        // recovery is handled by lease expiration and ProcessServerShutdown
-        if (!initialMetaScanComplete && serverName.length() != 0) {
-          StringBuilder dirName = new StringBuilder("log_");
-          dirName.append(serverName.replace(":", "_"));
-          Path logDir = new Path(rootdir, dirName.toString());
-          try {
-            if (fs.exists(logDir)) {
-              splitLogLock.lock();
-              try {
-                HLog.splitLog(rootdir, logDir, fs, conf);
-              } finally {
-                splitLogLock.unlock();
-              }
-            }
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Split " + logDir.toString());
-            }
-          } catch (IOException e) {
-            LOG.warn("unable to split region server log because: ", e);
-            throw e;
-          }
-        }
-        // Now get the region assigned
-        unassignedRegions.put(info, ZERO_L);
-      }
-    }
-  }
-
-  volatile boolean rootScanned = false;
-
-  /** Scanner for the <code>ROOT</code> HRegion. */
-  class RootScanner extends BaseScanner {
-    /** Constructor */
-    public RootScanner() {
-      super(true, metaRescanInterval, closed);
-    }
-
-    private boolean scanRoot() {
-      // Don't retry if we get an error while scanning. Errors are most often
-      // caused by the server going away. Wait until next rescan interval when
-      // things should be back to normal
-      boolean scanSuccessful = false;
-      synchronized (rootRegionLocation) {
-        while(!closed.get() && rootRegionLocation.get() == null) {
-          // rootRegionLocation will be filled in when we get an 'open region'
-          // regionServerReport message from the HRegionServer that has been
-          // allocated the ROOT region below.
-          try {
-            rootRegionLocation.wait();
-          } catch (InterruptedException e) {
-            // continue
-          }
-        }
-      }
-      if (closed.get()) {
-        return scanSuccessful;
-      }
-
-      try {
-        // Don't interrupt us while we're working
-        synchronized(rootScannerLock) {
-          scanRegion(new MetaRegion(rootRegionLocation.get(),
-              HRegionInfo.rootRegionInfo.getRegionName(), null));
-        }
-        scanSuccessful = true;
-      } catch (IOException e) {
-        e = RemoteExceptionHandler.checkIOException(e);
-        LOG.warn("Scan ROOT region", e);
-        // Make sure the file system is still available
-        checkFileSystem();
-      } catch (Exception e) {
-        // If for some reason we get some other kind of exception, 
-        // at least log it rather than go out silently.
-        LOG.error("Unexpected exception", e);
-      }
-      return scanSuccessful;
-    }
-
-    @Override
-    protected boolean initialScan() {
-      rootScanned = scanRoot();
-      return rootScanned;
-    }
-
-    @Override
-    protected void maintenanceScan() {
-      scanRoot();
-    }
-  }
-
-  private final RootScanner rootScannerThread;
-  final Integer rootScannerLock = new Integer(0);
-
-  /** Describes a meta region and its server */
-  @SuppressWarnings("unchecked")
-  public static class MetaRegion implements Comparable {
-    private HServerAddress server;
-    private Text regionName;
-    private Text startKey;
-
-    MetaRegion(HServerAddress server, Text regionName, Text startKey) {
-      if (server == null) {
-        throw new IllegalArgumentException("server cannot be null");
-      }
-      this.server = server;
-      
-      if (regionName == null) {
-        throw new IllegalArgumentException("regionName cannot be null");
-      }
-      this.regionName = new Text(regionName);
-      
-      this.startKey = new Text();
-      if (startKey != null) {
-        this.startKey.set(startKey);
-      }
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return "{regionname: " + this.regionName.toString() + ", startKey: <" +
-        this.startKey.toString() + ">, server: " + this.server.toString() + "}";
-    }
-
-    /** @return the regionName */
-    public Text getRegionName() {
-      return regionName;
-    }
-
-    /** @return the server */
-    public HServerAddress getServer() {
-      return server;
-    }
-
-    /** @return the startKey */
-    public Text getStartKey() {
-      return startKey;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public boolean equals(Object o) {
-      return this.compareTo(o) == 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public int hashCode() {
-      int result = this.regionName.hashCode();
-      result ^= this.startKey.hashCode();
-      return result;
-    }
-
-    // Comparable
-
-    /** {@inheritDoc} */
-    public int compareTo(Object o) {
-      MetaRegion other = (MetaRegion)o;
-      int result = this.regionName.compareTo(other.getRegionName());
-      if(result == 0) {
-        result = this.startKey.compareTo(other.getStartKey());
-        if (result == 0) {
-          // Might be on different host?
-          result = this.server.compareTo(other.server);
-        }
-      }
-      return result;
-    }
-  }
-
-  /** Set by root scanner to indicate the number of meta regions */
-  volatile AtomicInteger numberOfMetaRegions = new AtomicInteger();
-
-  /** Work for the meta scanner is queued up here */
-  volatile BlockingQueue<MetaRegion> metaRegionsToScan =
-    new LinkedBlockingQueue<MetaRegion>();
-
-  /** These are the online meta regions */
-  volatile SortedMap<Text, MetaRegion> onlineMetaRegions =
-    Collections.synchronizedSortedMap(new TreeMap<Text, MetaRegion>());
-
-  /** Set by meta scanner after initial scan */
-  volatile boolean initialMetaScanComplete = false;
-
-  /**
-   * MetaScanner <code>META</code> table.
-   * 
-   * When a <code>META</code> server comes on line, a MetaRegion object is
-   * queued up by regionServerReport() and this thread wakes up.
-   *
-   * It's important to do this work in a separate thread, or else the blocking 
-   * action would prevent other work from getting done.
-   */
-  class MetaScanner extends BaseScanner {
-    private final List<MetaRegion> metaRegionsToRescan =
-      new ArrayList<MetaRegion>();
-    
-    /** Constructor */
-    public MetaScanner() {
-      super(false, metaRescanInterval, closed);
-    }
-
-    private boolean scanOneMetaRegion(MetaRegion region) {
-      // Don't retry if we get an error while scanning. Errors are most often
-      // caused by the server going away. Wait until next rescan interval when
-      // things should be back to normal
-      boolean scanSuccessful = false;
-      while (!closed.get() && !rootScanned &&
-          rootRegionLocation.get() == null) {
-        sleeper.sleep();
-      }
-      if (closed.get()) {
-        return scanSuccessful;
-      }
-
-      try {
-        // Don't interrupt us while we're working
-        synchronized (metaScannerLock) {
-          scanRegion(region);
-          onlineMetaRegions.put(region.getStartKey(), region);
-        }
-        scanSuccessful = true;
-      } catch (IOException e) {
-        e = RemoteExceptionHandler.checkIOException(e);
-        LOG.warn("Scan one META region: " + region.toString(), e);
-        // The region may have moved (TestRegionServerAbort, etc.).  If
-        // so, either it won't be in the onlineMetaRegions list or its host
-        // address has changed and the containsValue will fail. If not
-        // found, best thing to do here is probably return.
-        if (!onlineMetaRegions.containsValue(region.getStartKey())) {
-          LOG.debug("Scanned region is no longer in map of online " +
-          "regions or its value has changed");
-          return scanSuccessful;
-        }
-        // Make sure the file system is still available
-        checkFileSystem();
-      } catch (Exception e) {
-        // If for some reason we get some other kind of exception, 
-        // at least log it rather than go out silently.
-        LOG.error("Unexpected exception", e);
-      }
-      return scanSuccessful;
-    }
-
-    @Override
-    protected boolean initialScan() {
-      MetaRegion region = null;
-      while (!closed.get() && region == null && !metaRegionsScanned()) {
-        try {
-          region =
-            metaRegionsToScan.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
-        } catch (InterruptedException e) {
-          // continue
-        }
-        if (region == null && metaRegionsToRescan.size() != 0) {
-          region = metaRegionsToRescan.remove(0);
-        }
-        if (region != null) {
-          if (!scanOneMetaRegion(region)) {
-            metaRegionsToRescan.add(region);
-          }
-        }
-      }
-      initialMetaScanComplete = true;
-      return true;
-    }
-
-    @Override
-    protected void maintenanceScan() {
-      ArrayList<MetaRegion> regions = new ArrayList<MetaRegion>();
-      synchronized (onlineMetaRegions) {
-        regions.addAll(onlineMetaRegions.values());
-      }
-      for (MetaRegion r: regions) {
-        scanOneMetaRegion(r);
-      }
-      metaRegionsScanned();
-    }
-
-    /**
-     * Called by the meta scanner when it has completed scanning all meta 
-     * regions. This wakes up any threads that were waiting for this to happen.
-     */
-    private synchronized boolean metaRegionsScanned() {
-      if (!rootScanned ||
-          numberOfMetaRegions.get() != onlineMetaRegions.size()) {
-        return false;
-      }
-      LOG.info("all meta regions scanned");
-      notifyAll();
-      return true;
-    }
-
-    /**
-     * Other threads call this method to wait until all the meta regions have
-     * been scanned.
-     */
-    synchronized boolean waitForMetaRegionsOrClose() {
-      while (!closed.get()) {
-        if (rootScanned &&
-            numberOfMetaRegions.get() == onlineMetaRegions.size()) {
-          break;
-        }
-
-        try {
-          wait(threadWakeFrequency);
-        } catch (InterruptedException e) {
-          // continue
-        }
-      }
-      return closed.get();
-    }
-  }
-
-  final MetaScanner metaScannerThread;
-  final Integer metaScannerLock = new Integer(0);
-
-  /** The map of known server names to server info */
-  volatile Map<String, HServerInfo> serversToServerInfo =
-    new ConcurrentHashMap<String, HServerInfo>();
-  
-  /** Set of known dead servers */
-  volatile Set<String> deadServers =
-    Collections.synchronizedSet(new HashSet<String>());
-
-  /** SortedMap server load -> Set of server names */
-  volatile SortedMap<HServerLoad, Set<String>> loadToServers =
-    Collections.synchronizedSortedMap(new TreeMap<HServerLoad, Set<String>>());
-
-  /** Map of server names -> server load */
-  volatile Map<String, HServerLoad> serversToLoad =
-    new ConcurrentHashMap<String, HServerLoad>();
-
-  /**
-   * The 'unassignedRegions' table maps from a HRegionInfo to a timestamp that
-   * indicates the last time we *tried* to assign the region to a RegionServer.
-   * If the timestamp is out of date, then we can try to reassign it. 
-   * 
-   * We fill 'unassignedRecords' by scanning ROOT and META tables, learning the
-   * set of all known valid regions.
-   * 
-   * <p>Items are removed from this list when a region server reports in that
-   * the region has been deployed.
-   */
-  volatile SortedMap<HRegionInfo, Long> unassignedRegions =
-    Collections.synchronizedSortedMap(new TreeMap<HRegionInfo, Long>());
-
-  /**
-   * Regions that have been assigned, and the server has reported that it has
-   * started serving it, but that we have not yet recorded in the meta table.
-   */
-  volatile Set<Text> pendingRegions =
-    Collections.synchronizedSet(new HashSet<Text>());
-
-  /**
-   * The 'killList' is a list of regions that are going to be closed, but not
-   * reopened.
-   */
-  volatile Map<String, HashMap<Text, HRegionInfo>> killList =
-    new ConcurrentHashMap<String, HashMap<Text, HRegionInfo>>();
-
-  /** 'killedRegions' contains regions that are in the process of being closed */
-  volatile Set<Text> killedRegions =
-    Collections.synchronizedSet(new HashSet<Text>());
-
-  /**
-   * 'regionsToDelete' contains regions that need to be deleted, but cannot be
-   * until the region server closes it
-   */
-  volatile Set<Text> regionsToDelete =
-    Collections.synchronizedSet(new HashSet<Text>());
-
-  /** Set of tables currently in creation. */
-  private volatile Set<Text> tableInCreation = 
-    Collections.synchronizedSet(new HashSet<Text>());
-
-  /** Build the HMaster out of a raw configuration item.
-   * 
-   * @param conf - Configuration object
-   * @throws IOException
-   */
-  public HMaster(HBaseConfiguration conf) throws IOException {
-    this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
-        new HServerAddress(conf.get(MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS)),
-        conf);
-  }
-
-  /** 
-   * Build the HMaster
-   * @param rootdir base directory of this HBase instance
-   * @param address server address and port number
-   * @param conf configuration
-   * 
-   * @throws IOException
-   */
-  public HMaster(Path rootdir, HServerAddress address, HBaseConfiguration conf)
-    throws IOException {
-    
-    this.conf = conf;
-    this.fs = FileSystem.get(conf);
-    this.rootdir = fs.makeQualified(rootdir);
-    this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
-    this.rand = new Random();
-    
-    Path rootRegionDir =
-      HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
-    LOG.info("Root region dir: " + rootRegionDir.toString());
-
-    try {
-      // Make sure the root directory exists!
-      if(! fs.exists(rootdir)) {
-        fs.mkdirs(rootdir);
-        FSUtils.setVersion(fs, rootdir);
-      } else if (!FSUtils.checkVersion(fs, rootdir)) {
-        throw new IOException("File system needs upgrade. Run " +
-          "the '${HBASE_HOME}/bin/hbase migrate' script");
-      }
-
-      if (!fs.exists(rootRegionDir)) {
-        LOG.info("bootstrap: creating ROOT and first META regions");
-        try {
-          HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
-              this.rootdir, this.conf);
-          HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
-            this.rootdir, this.conf);
-
-          // Add first region from the META table to the ROOT region.
-          HRegion.addRegionToMETA(root, meta);
-          root.close();
-          root.getLog().closeAndDelete();
-          meta.close();
-          meta.getLog().closeAndDelete();
-        } catch (IOException e) {
-          e = RemoteExceptionHandler.checkIOException(e);
-          LOG.error("bootstrap", e);
-          throw e;
-        }
-      }
-    } catch (IOException e) {
-      LOG.fatal("Not starting HMaster because:", e);
-      throw e;
-    }
-
-    this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
-    this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
-    this.maxRegionOpenTime =
-      conf.getLong("hbase.hbasemaster.maxregionopen", 30 * 1000);
-
-    this.leaseTimeout = conf.getInt("hbase.master.lease.period", 30 * 1000);
-    this.serverLeases = new Leases(this.leaseTimeout, 
-        conf.getInt("hbase.master.lease.thread.wakefrequency", 15 * 1000));
-    
-    this.server = HbaseRPC.getServer(this, address.getBindAddress(),
-        address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
-        false, conf);
-
-    //  The rpc-server port can be ephemeral... ensure we have the correct info
-    this.address = new HServerAddress(server.getListenerAddress());
-    conf.set(MASTER_ADDRESS, address.toString());
-
-    this.connection = HConnectionManager.getConnection(conf);
-
-    this.metaRescanInterval =
-      conf.getInt("hbase.master.meta.thread.rescanfrequency", 60 * 1000);
-
-    // The root region
-    this.rootScannerThread = new RootScanner();
-
-    // Scans the meta table
-    this.metaScannerThread = new MetaScanner();
-    
-    unassignRootRegion();
-
-    this.sleeper = new Sleeper(this.threadWakeFrequency, this.closed);
-    
-    // We're almost open for business
-    this.closed.set(false);
-    LOG.info("HMaster initialized on " + this.address.toString());
-  }
-  
-  /*
-   * Unassign the root region.
-   * This method would be used in case where root region server had died
-   * without reporting in.  Currently, we just flounder and never recover.  We
-   * could 'notice' dead region server in root scanner -- if we failed access
-   * multiple times -- but reassigning root is catastrophic.
-   * 
-   */
-  void unassignRootRegion() {
-    this.rootRegionLocation.set(null);
-    if (!this.shutdownRequested) {
-      this.unassignedRegions.put(HRegionInfo.rootRegionInfo, ZERO_L);
-    }
-  }
-
-  /**
-   * Checks to see if the file system is still accessible.
-   * If not, sets closed
-   * @return false if file system is not available
-   */
-  protected boolean checkFileSystem() {
-    if (fsOk) {
-      if (!FSUtils.isFileSystemAvailable(fs)) {
-        LOG.fatal("Shutting down HBase cluster: file system not available");
-        closed.set(true);
-        fsOk = false;
-      }
-    }
-    return fsOk;
-  }
-
-  /** @return HServerAddress of the master server */
-  public HServerAddress getMasterAddress() {
-    return address;
-  }
-  
-  /**
-   * @return Hbase root dir.
-   */
-  public Path getRootDir() {
-    return this.rootdir;
-  }
-
-  /**
-   * @return Read-only map of servers to serverinfo.
-   */
-  public Map<String, HServerInfo> getServersToServerInfo() {
-    return Collections.unmodifiableMap(this.serversToServerInfo);
-  }
-
-  /**
-   * @return Read-only map of servers to load.
-   */
-  public Map<String, HServerLoad> getServersToLoad() {
-    return Collections.unmodifiableMap(this.serversToLoad);
-  }
-
-  /**
-   * @return Location of the <code>-ROOT-</code> region.
-   */
-  public HServerAddress getRootRegionLocation() {
-    HServerAddress rootServer = null;
-    if (!shutdownRequested && !closed.get()) {
-      rootServer = this.rootRegionLocation.get();
-    }
-    return rootServer;
-  }
-  
-  /**
-   * @return Read-only map of online regions.
-   */
-  public Map<Text, MetaRegion> getOnlineMetaRegions() {
-    return Collections.unmodifiableSortedMap(this.onlineMetaRegions);
-  }
-
-  /** Main processing loop */
-  @Override
-  public void run() {
-    final String threadName = "HMaster";
-    Thread.currentThread().setName(threadName);
-    startServiceThreads();
-    /* Main processing loop */
-    try {
-      while (!closed.get()) {
-        RegionServerOperation op = null;
-        if (shutdownRequested && serversToServerInfo.size() == 0) {
-          startShutdown();
-          break;
-        }
-        if (rootRegionLocation.get() != null) {
-          // We can't process server shutdowns unless the root region is online 
-          op = this.delayedToDoQueue.poll();
-        }
-        if (op == null ) {
-          try {
-            op = toDoQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
-          } catch (InterruptedException e) {
-            // continue
-          }
-        }
-        if (op == null || closed.get()) {
-          continue;
-        }
-        try {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Main processing loop: " + op.toString());
-          }
-
-          if (!op.process()) {
-            // Operation would have blocked because not all meta regions are
-            // online. This could cause a deadlock, because this thread is waiting
-            // for the missing meta region(s) to come back online, but since it
-            // is waiting, it cannot process the meta region online operation it
-            // is waiting for. So put this operation back on the queue for now.
-            if (toDoQueue.size() == 0) {
-              // The queue is currently empty so wait for a while to see if what
-              // we need comes in first
-              sleeper.sleep();
-            }
-            try {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Put " + op.toString() + " back on queue");
-              }
-              toDoQueue.put(op);
-            } catch (InterruptedException e) {
-              throw new RuntimeException(
-                  "Putting into toDoQueue was interrupted.", e);
-            }
-          }
-        } catch (Exception ex) {
-          if (ex instanceof RemoteException) {
-            try {
-              ex = RemoteExceptionHandler.decodeRemoteException(
-                  (RemoteException)ex);
-            } catch (IOException e) {
-              ex = e;
-              LOG.warn("main processing loop: " + op.toString(), e);
-            }
-          }
-          if (!checkFileSystem()) {
-            break;
-          }
-          LOG.warn("Processing pending operations: " + op.toString(), ex);
-          try {
-            toDoQueue.put(op);
-          } catch (InterruptedException e) {
-            throw new RuntimeException(
-                "Putting into toDoQueue was interrupted.", e);
-          } catch (Exception e) {
-            LOG.error("main processing loop: " + op.toString(), e);
-          }
-        }
-      }
-    } catch (Throwable t) {
-      LOG.fatal("Unhandled exception. Starting shutdown.", t);
-      this.closed.set(true);
-    }
-    // The region servers won't all exit until we stop scanning the meta regions
-    stopScanners();
-    
-    // Wait for all the remaining region servers to report in.
-    letRegionServersShutdown();
-
-    /*
-     * Clean up and close up shop
-     */
-    if (this.infoServer != null) {
-      LOG.info("Stopping infoServer");
-      try {
-        this.infoServer.stop();
-      } catch (InterruptedException ex) {
-        ex.printStackTrace();
-      }
-    }
-    server.stop();                      // Stop server
-    serverLeases.close();               // Turn off the lease monitor
-
-    // Join up with all threads
-    try {
-      if (rootScannerThread.isAlive()) {
-        rootScannerThread.join();       // Wait for the root scanner to finish.
-      }
-    } catch (Exception iex) {
-      LOG.warn("root scanner", iex);
-    }
-    try {
-      if (metaScannerThread.isAlive()) {
-        metaScannerThread.join();       // Wait for meta scanner to finish.
-      }
-    } catch(Exception iex) {
-      LOG.warn("meta scanner", iex);
-    }
-    LOG.info("HMaster main thread exiting");
-  }
-  
-  /*
-   * Start up all services. If any of these threads gets an unhandled exception
-   * then they just die with a logged message.  This should be fine because
-   * in general, we do not expect the master to get such unhandled exceptions
-   *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
-   *  need to install an unexpected exception handler.
-   */
-  private void startServiceThreads() {
-    String threadName = Thread.currentThread().getName();
-    try {
-      Threads.setDaemonThreadRunning(this.rootScannerThread,
-        threadName + ".rootScanner");
-      Threads.setDaemonThreadRunning(this.metaScannerThread,
-        threadName + ".metaScanner");
-      // Leases are not the same as Chore threads. Set name differently.
-      this.serverLeases.setName(threadName + ".leaseChecker");
-      this.serverLeases.start();
-      // Put up info server.
-      int port = this.conf.getInt("hbase.master.info.port", 60010);
-      if (port >= 0) {
-        String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
-        this.infoServer = new InfoServer(MASTER, a, port, false);
-        this.infoServer.setAttribute(MASTER, this);
-        this.infoServer.start();
-      }
-      // Start the server so everything else is running before we start
-      // receiving requests.
-      this.server.start();
-    } catch (IOException e) {
-      if (e instanceof RemoteException) {
-        try {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        } catch (IOException ex) {
-          LOG.warn("thread start", ex);
-        }
-      }
-      // Something happened during startup. Shut things down.
-      this.closed.set(true);
-      LOG.error("Failed startup", e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Started service threads");
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Started service threads");
-    }
-  }
-
-  /*
-   * Start shutting down the master
-   */
-  private void startShutdown() {
-    closed.set(true);
-    stopScanners();
-    synchronized(toDoQueue) {
-      toDoQueue.clear();                         // Empty the queue
-      delayedToDoQueue.clear();                  // Empty shut down queue
-      toDoQueue.notifyAll();                     // Wake main thread
-    }
-    synchronized (serversToServerInfo) {
-      serversToServerInfo.notifyAll();
-    }
-  }
-
-  /*
-   * Stop the root and meta scanners so that the region servers serving meta
-   * regions can shut down.
-   */
-  private void stopScanners() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("telling root scanner to stop");
-    }
-    synchronized(rootScannerLock) {
-      if (rootScannerThread.isAlive()) {
-        rootScannerThread.interrupt();  // Wake root scanner
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("telling meta scanner to stop");
-    }
-    synchronized(metaScannerLock) {
-      if (metaScannerThread.isAlive()) {
-        metaScannerThread.interrupt();  // Wake meta scanner
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("meta and root scanners notified");
-    }
-  }
-
-  /*
-   * Wait on regionservers to report in
-   * with {@link #regionServerReport(HServerInfo, HMsg[])} so they get notice
-   * the master is going down.  Waits until all region servers come back with
-   * a MSG_REGIONSERVER_STOP which will cancel their lease or until leases held
-   * by remote region servers have expired.
-   */
-  private void letRegionServersShutdown() {
-    if (!fsOk) {
-      // Forget waiting for the region servers if the file system has gone
-      // away. Just exit as quickly as possible.
-      return;
-    }
-    synchronized (serversToServerInfo) {
-      while (this.serversToServerInfo.size() > 0) {
-        LOG.info("Waiting on following regionserver(s) to go down (or " +
-            "region server lease expiration, whichever happens first): " +
-            this.serversToServerInfo.values());
-        try {
-          serversToServerInfo.wait(threadWakeFrequency);
-        } catch (InterruptedException e) {
-          // continue
-        }
-      }
-    }
-  }
-
-  /*
-   * HMasterRegionInterface
-   */
-
-  /** {@inheritDoc} */
-  @SuppressWarnings("unused")
-  public HbaseMapWritable regionServerStartup(HServerInfo serverInfo)
-    throws IOException {
-
-    String s = serverInfo.getServerAddress().toString().trim();
-    LOG.info("received start message from: " + s);
-
-    HServerLoad load = serversToLoad.remove(s);
-    if (load != null) {
-      // The startup message was from a known server.
-      // Remove stale information about the server's load.
-      Set<String> servers = loadToServers.get(load);
-      if (servers != null) {
-        servers.remove(s);
-        loadToServers.put(load, servers);
-      }
-    }
-
-    HServerInfo storedInfo = serversToServerInfo.remove(s);
-    if (storedInfo != null && !closed.get()) {
-      // The startup message was from a known server with the same name.
-      // Timeout the old one right away.
-      HServerAddress root = rootRegionLocation.get();
-      if (root != null && root.equals(storedInfo.getServerAddress())) {
-        unassignRootRegion();
-      }
-      delayedToDoQueue.put(new ProcessServerShutdown(storedInfo));
-    }
-
-    // record new server
-
-    load = new HServerLoad();
-    serverInfo.setLoad(load);
-    serversToServerInfo.put(s, serverInfo);
-    serversToLoad.put(s, load);
-    Set<String> servers = loadToServers.get(load);
-    if (servers == null) {
-      servers = new HashSet<String>();
-    }
-    servers.add(s);
-    loadToServers.put(load, servers);
-
-    if (!closed.get()) {
-      long serverLabel = getServerLabel(s);
-      serverLeases.createLease(serverLabel, serverLabel, new ServerExpirer(s));
-    }
-    
-    return createConfigurationSubset();
-  }
-  
-  /**
-   * @return Subset of configuration to pass initializing regionservers: e.g.
-   * the filesystem to use and root directory to use.
-   */
-  protected HbaseMapWritable createConfigurationSubset() {
-    HbaseMapWritable mw = addConfig(new HbaseMapWritable(), HConstants.HBASE_DIR);
-    return addConfig(mw, "fs.default.name");
-  }
-
-  private HbaseMapWritable addConfig(final HbaseMapWritable mw, final String key) {
-    mw.put(new Text(key), new Text(this.conf.get(key)));
-    return mw;
-  }
-
-  private long getServerLabel(final String s) {
-    return s.hashCode();
-  }
-
-  /** {@inheritDoc} */
-  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[])
-  throws IOException {
-    String serverName = serverInfo.getServerAddress().toString().trim();
-    long serverLabel = getServerLabel(serverName);
-    if (msgs.length > 0) {
-      if (msgs[0].getMsg() == HMsg.MSG_REPORT_EXITING) {
-        synchronized (serversToServerInfo) {
-          try {
-            // HRegionServer is shutting down. Cancel the server's lease.
-            // Note that canceling the server's lease takes care of updating
-            // serversToServerInfo, etc.
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Region server " + serverName +
-              ": MSG_REPORT_EXITING -- cancelling lease");
-            }
-
-            if (cancelLease(serverName, serverLabel)) {
-              // Only process the exit message if the server still has a lease.
-              // Otherwise we could end up processing the server exit twice.
-              LOG.info("Region server " + serverName +
-              ": MSG_REPORT_EXITING -- lease cancelled");
-              // Get all the regions the server was serving reassigned
-              // (if we are not shutting down).
-              if (!closed.get()) {
-                for (int i = 1; i < msgs.length; i++) {
-                  HRegionInfo info = msgs[i].getRegionInfo();
-                  if (info.isRootRegion()) {
-                    rootRegionLocation.set(null);
-                  } else if (info.isMetaTable()) {
-                    onlineMetaRegions.remove(info.getStartKey());
-                  }
-
-                  this.unassignedRegions.put(info, ZERO_L);
-                }
-              }
-            }
-
-            // We don't need to return anything to the server because it isn't
-            // going to do any more work.
-            return new HMsg[0];
-          } finally {
-            serversToServerInfo.notifyAll();
-          }
-        }
-      } else if (msgs[0].getMsg() == HMsg.MSG_REPORT_QUIESCED) {
-        LOG.info("Region server " + serverName + " quiesced");
-        quiescedMetaServers.incrementAndGet();
-      }
-    }
-
-    if(quiescedMetaServers.get() >= serversToServerInfo.size()) {
-      // If the only servers we know about are meta servers, then we can
-      // proceed with shutdown
-      LOG.info("All user tables quiesced. Proceeding with shutdown");
-      startShutdown();
-    }
-
-    if (shutdownRequested && !closed.get()) {
-      // Tell the server to stop serving any user regions
-      return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_QUIESCE)};
-    }
-
-    if (closed.get()) {
-      // Tell server to shut down if we are shutting down.  This should
-      // happen after check of MSG_REPORT_EXITING above, since region server
-      // will send us one of these messages after it gets MSG_REGIONSERVER_STOP
-      return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
-    }
-
-    HServerInfo storedInfo = serversToServerInfo.get(serverName);
-    if (storedInfo == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("received server report from unknown server: " + serverName);
-      }
-
-      // The HBaseMaster may have been restarted.
-      // Tell the RegionServer to start over and call regionServerStartup()
-
-      return new HMsg[]{new HMsg(HMsg.MSG_CALL_SERVER_STARTUP)};
-
-    } else if (storedInfo.getStartCode() != serverInfo.getStartCode()) {
-
-      // This state is reachable if:
-      //
-      // 1) RegionServer A started
-      // 2) RegionServer B started on the same machine, then 
-      //    clobbered A in regionServerStartup.
-      // 3) RegionServer A returns, expecting to work as usual.
-      //
-      // The answer is to ask A to shut down for good.
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("region server race condition detected: " + serverName);
-      }
-
-      synchronized (serversToServerInfo) {
-        cancelLease(serverName, serverLabel);
-        serversToServerInfo.notifyAll();
-      }
-      return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
-
-    } else {
-
-      // All's well.  Renew the server's lease.
-      // This will always succeed; otherwise, the fetch of serversToServerInfo
-      // would have failed above.
-
-      serverLeases.renewLease(serverLabel, serverLabel);
-
-      // Refresh the info object and the load information
-
-      serversToServerInfo.put(serverName, serverInfo);
-
-      HServerLoad load = serversToLoad.get(serverName);
-      if (load != null && !load.equals(serverInfo.getLoad())) {
-        // We have previous information about the load on this server
-        // and the load on this server has changed
-
-        Set<String> servers = loadToServers.get(load);
-
-        // Note that servers should never be null because loadToServers
-        // and serversToLoad are manipulated in pairs
-
-        servers.remove(serverName);
-        loadToServers.put(load, servers);
-      }
-
-      // Set the current load information
-
-      load = serverInfo.getLoad();
-      serversToLoad.put(serverName, load);
-      Set<String> servers = loadToServers.get(load);
-      if (servers == null) {
-        servers = new HashSet<String>();
-      }
-      servers.add(serverName);
-      loadToServers.put(load, servers);
-
-      // Next, process messages for this server
-      return processMsgs(serverInfo, msgs);
-    }
-  }
-
-  /** Cancel a server's lease and update its load information */
-  private boolean cancelLease(final String serverName, final long serverLabel) {
-    boolean leaseCancelled = false;
-    HServerInfo info = serversToServerInfo.remove(serverName);
-    if (info != null) {
-      // Only cancel lease and update load information once.
-      // This method can be called a couple of times during shutdown.
-      if (rootRegionLocation.get() != null &&
-          info.getServerAddress().equals(rootRegionLocation.get())) {
-        unassignRootRegion();
-      }
-      LOG.info("Cancelling lease for " + serverName);
-      serverLeases.cancelLease(serverLabel, serverLabel);
-      leaseCancelled = true;
-
-      // update load information
-      HServerLoad load = serversToLoad.remove(serverName);
-      if (load != null) {
-        Set<String> servers = loadToServers.get(load);
-        if (servers != null) {
-          servers.remove(serverName);
-          loadToServers.put(load, servers);
-        }
-      }
-    }
-    return leaseCancelled;
-  }
-
-  /** 
-   * Process all the incoming messages from a server that's contacted us.
-   * 
-   * Note that we never need to update the server's load information because
-   * that has already been done in regionServerReport.
-   */
-  private HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[])
-  throws IOException {
-    
-    ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>();
-    String serverName = info.getServerAddress().toString();
-    HashMap<Text, HRegionInfo> regionsToKill = null;
-    regionsToKill = killList.remove(serverName);
-
-    // Get reports on what the RegionServer did.
-
-    for (int i = 0; i < incomingMsgs.length; i++) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received " + incomingMsgs[i].toString() + " from " +
-            serverName);
-      }
-      HRegionInfo region = incomingMsgs[i].getRegionInfo();
-
-      switch (incomingMsgs[i].getMsg()) {
-
-      case HMsg.MSG_REPORT_PROCESS_OPEN:
-        synchronized (unassignedRegions) {
-          // Region server has acknowledged request to open region.
-          // Extend region open time by max region open time.
-          unassignedRegions.put(region,
-              System.currentTimeMillis() + this.maxRegionOpenTime);
-        }
-        break;
-        
-      case HMsg.MSG_REPORT_OPEN:
-        boolean duplicateAssignment = false;
-        synchronized (unassignedRegions) {
-          if (unassignedRegions.remove(region) == null) {
-            if (region.getRegionName().compareTo(
-                HRegionInfo.rootRegionInfo.getRegionName()) == 0) {
-              // Root region
-              HServerAddress rootServer = rootRegionLocation.get();
-              if (rootServer != null) {
-                if (rootServer.toString().compareTo(serverName) == 0) {
-                  // A duplicate open report from the correct server
-                  break;
-                }
-                // We received an open report on the root region, but it is
-                // assigned to a different server
-                duplicateAssignment = true;
-              }
-            } else {
-              // Not root region. If it is not a pending region, then we are
-              // going to treat it as a duplicate assignment
-              if (pendingRegions.contains(region.getRegionName())) {
-                // A duplicate report from the correct server
-                break;
-              }
-              // Although we can't tell for certain if this is a duplicate
-              // report from the correct server, we are going to treat it
-              // as such
-              duplicateAssignment = true;
-            }
-          }
-          if (duplicateAssignment) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("region server " + info.getServerAddress().toString()
-                  + " should not have opened region " + region.getRegionName());
-            }
-
-            // This Region should not have been opened.
-            // Ask the server to shut it down, but don't report it as closed.  
-            // Otherwise the HMaster will think the Region was closed on purpose, 
-            // and then try to reopen it elsewhere; that's not what we want.
-
-            returnMsgs.add(
-                new HMsg(HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT, region)); 
-
-          } else {
-            LOG.info(info.getServerAddress().toString() + " serving " +
-                region.getRegionName());
-
-            if (region.getRegionName().compareTo(
-                HRegionInfo.rootRegionInfo.getRegionName()) == 0) {
-              // Store the Root Region location (in memory)
-              synchronized (rootRegionLocation) {
-                this.rootRegionLocation.set(
-                    new HServerAddress(info.getServerAddress()));
-                this.rootRegionLocation.notifyAll();
-              }
-            } else {
-              // Note that the table has been assigned and is waiting for the
-              // meta table to be updated.
-
-              pendingRegions.add(region.getRegionName());
-
-              // Queue up an update to note the region location.
-
-              try {
-                toDoQueue.put(new ProcessRegionOpen(info, region));
-              } catch (InterruptedException e) {
-                throw new RuntimeException(
-                    "Putting into toDoQueue was interrupted.", e);
-              }
-            } 
-          }
-        }
-        break;
-
-      case HMsg.MSG_REPORT_CLOSE:
-        LOG.info(info.getServerAddress().toString() + " no longer serving " +
-            region.getRegionName());
-
-        if (region.getRegionName().compareTo(
-            HRegionInfo.rootRegionInfo.getRegionName()) == 0) {
-
-          // Root region
-
-          if (region.isOffline()) {
-            // Can't proceed without root region. Shutdown.
-            LOG.fatal("root region is marked offline");
-            shutdown();
-          }
-          unassignRootRegion();
-
-        } else {
-          boolean reassignRegion = !region.isOffline();
-          boolean deleteRegion = false;
-
-          if (killedRegions.remove(region.getRegionName())) {
-            reassignRegion = false;
-          }
-
-          if (regionsToDelete.remove(region.getRegionName())) {
-            reassignRegion = false;
-            deleteRegion = true;
-          }
-
-          if (region.isMetaTable()) {
-            // Region is part of the meta table. Remove it from onlineMetaRegions
-            onlineMetaRegions.remove(region.getStartKey());
-          }
-
-          // NOTE: we cannot put the region into unassignedRegions as that
-          //       could create a race with the pending close if it gets 
-          //       reassigned before the close is processed.
-
-          unassignedRegions.remove(region);
-
-          try {
-            toDoQueue.put(new ProcessRegionClose(region, reassignRegion,
-                deleteRegion));
-
-          } catch (InterruptedException e) {
-            throw new RuntimeException(
-                "Putting into toDoQueue was interrupted.", e);
-          }
-        }
-        break;
-
-      case HMsg.MSG_REPORT_SPLIT:
-        // A region has split.
-
-        HRegionInfo newRegionA = incomingMsgs[++i].getRegionInfo();
-        unassignedRegions.put(newRegionA, ZERO_L);
-
-        HRegionInfo newRegionB = incomingMsgs[++i].getRegionInfo();
-        unassignedRegions.put(newRegionB, ZERO_L);
-
-        LOG.info("region " + region.getRegionName() +
-            " split. New regions are: " + newRegionA.getRegionName() + ", " +
-            newRegionB.getRegionName());
-
-        if (region.isMetaTable()) {
-          // A meta region has split.
-
-          onlineMetaRegions.remove(region.getStartKey());
-          numberOfMetaRegions.incrementAndGet();
-        }
-        break;
-
-      default:
-        throw new IOException(
-            "Impossible state during msg processing.  Instruction: " +
-            incomingMsgs[i].getMsg());
-      }
-    }
-
-    // Process the kill list
-
-    if (regionsToKill != null) {
-      for (HRegionInfo i: regionsToKill.values()) {
-        returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i));
-        killedRegions.add(i.getRegionName());
-      }
-    }
-
-    // Figure out what the RegionServer ought to do, and write back.
-    assignRegions(info, serverName, returnMsgs);
-    return returnMsgs.toArray(new HMsg[returnMsgs.size()]);
-  }
-  
-  /*
-   * Assigns regions to region servers attempting to balance the load across
-   * all region servers
-   * 
-   * @param info
-   * @param serverName
-   * @param returnMsgs
-   */
-  private void assignRegions(HServerInfo info, String serverName,
-      ArrayList<HMsg> returnMsgs) {
-    
-    synchronized (this.unassignedRegions) {
-      
-      // We need to hold a lock on assign attempts while we figure out what to
-      // do so that multiple threads do not execute this method in parallel
-      // resulting in assigning the same region to multiple servers.
-      
-      long now = System.currentTimeMillis();
-      Set<HRegionInfo> regionsToAssign = new HashSet<HRegionInfo>();
-      for (Map.Entry<HRegionInfo, Long> e: this.unassignedRegions.entrySet()) {
-        HRegionInfo i = e.getKey();
-        if (numberOfMetaRegions.get() != onlineMetaRegions.size() &&
-            !i.isMetaRegion()) {
-          // Can't assign user regions until all meta regions have been assigned
-          // and are on-line
-          continue;
-        }
-        long diff = now - e.getValue().longValue();
-        if (diff > this.maxRegionOpenTime) {
-          regionsToAssign.add(e.getKey());
-        }
-      }
-      int nRegionsToAssign = regionsToAssign.size();
-      if (nRegionsToAssign <= 0) {
-        // No regions to assign.  Return.
-        return;
-      }
-
-      if (this.serversToServerInfo.size() == 1) {
-        assignRegionsToOneServer(regionsToAssign, serverName, returnMsgs);
-        // Finished.  Return.
-        return;
-      }
-
-      // Multiple servers in play.
-      // We need to allocate regions only to most lightly loaded servers.
-      HServerLoad thisServersLoad = info.getLoad();
-      int nregions = regionsPerServer(nRegionsToAssign, thisServersLoad);
-      nRegionsToAssign -= nregions;
-      if (nRegionsToAssign > 0) {
-        // We still have more regions to assign. See how many we can assign
-        // before this server becomes more heavily loaded than the next
-        // most heavily loaded server.
-        SortedMap<HServerLoad, Set<String>> heavyServers =
-          new TreeMap<HServerLoad, Set<String>>();
-        synchronized (this.loadToServers) {
-          heavyServers.putAll(this.loadToServers.tailMap(thisServersLoad));
-        }
-        int nservers = 0;
-        HServerLoad heavierLoad = null;
-        for (Map.Entry<HServerLoad, Set<String>> e : heavyServers.entrySet()) {
-          Set<String> servers = e.getValue();
-          nservers += servers.size();
-          if (e.getKey().compareTo(thisServersLoad) == 0) {
-            // This is the load factor of the server we are considering
-            nservers -= 1;
-            continue;
-          }
-
-          // If we get here, we are at the first load entry that is a
-          // heavier load than the server we are considering
-          heavierLoad = e.getKey();
-          break;
-        }
-
-        nregions = 0;
-        if (heavierLoad != null) {
-          // There is a more heavily loaded server
-          for (HServerLoad load =
-            new HServerLoad(thisServersLoad.getNumberOfRequests(),
-                thisServersLoad.getNumberOfRegions());
-          load.compareTo(heavierLoad) <= 0 && nregions < nRegionsToAssign;
-          load.setNumberOfRegions(load.getNumberOfRegions() + 1), nregions++) {
-            // continue;
-          }
-        }
-
-        if (nregions < nRegionsToAssign) {
-          // There are some more heavily loaded servers
-          // but we can't assign all the regions to this server.
-          if (nservers > 0) {
-            // There are other servers that can share the load.
-            // Split regions that need assignment across the servers.
-            nregions = (int) Math.ceil((1.0 * nRegionsToAssign)
-                / (1.0 * nservers));
-          } else {
-            // No other servers with same load.
-            // Split regions over all available servers
-            nregions = (int) Math.ceil((1.0 * nRegionsToAssign)
-                / (1.0 * serversToServerInfo.size()));
-          }
-        } else {
-          // Assign all regions to this server
-          nregions = nRegionsToAssign;
-        }
-
-        now = System.currentTimeMillis();
-        for (HRegionInfo regionInfo: regionsToAssign) {
-          LOG.info("assigning region " + regionInfo.getRegionName() +
-              " to server " + serverName);
-          this.unassignedRegions.put(regionInfo, Long.valueOf(now));
-          returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
-          if (--nregions <= 0) {
-            break;
-          }
-        }
-      }
-    }
-  }
-  
-  /*
-   * @param nRegionsToAssign
-   * @param thisServersLoad
-   * @return How many regions we can assign to more lightly loaded servers
-   */
-  private int regionsPerServer(final int nRegionsToAssign,
-      final HServerLoad thisServersLoad) {
-    
-    SortedMap<HServerLoad, Set<String>> lightServers =
-      new TreeMap<HServerLoad, Set<String>>();
-    
-    synchronized (this.loadToServers) {
-      lightServers.putAll(this.loadToServers.headMap(thisServersLoad));
-    }
-
-    int nRegions = 0;
-    for (Map.Entry<HServerLoad, Set<String>> e : lightServers.entrySet()) {
-      HServerLoad lightLoad = new HServerLoad(e.getKey().getNumberOfRequests(),
-          e.getKey().getNumberOfRegions());
-      do {
-        lightLoad.setNumberOfRegions(lightLoad.getNumberOfRegions() + 1);
-        nRegions += 1;
-      } while (lightLoad.compareTo(thisServersLoad) <= 0
-          && nRegions < nRegionsToAssign);
-
-      nRegions *= e.getValue().size();
-      if (nRegions >= nRegionsToAssign) {
-        break;
-      }
-    }
-    return nRegions;
-  }
-  
-  /*
-   * Assign all to the only server. An unlikely case but still possible.
-   * @param regionsToAssign
-   * @param serverName
-   * @param returnMsgs
-   */
-  private void assignRegionsToOneServer(final Set<HRegionInfo> regionsToAssign,
-      final String serverName, final ArrayList<HMsg> returnMsgs) {
-    long now = System.currentTimeMillis();
-    for (HRegionInfo regionInfo: regionsToAssign) {
-      LOG.info("assigning region " + regionInfo.getRegionName() +
-          " to the only server " + serverName);
-      this.unassignedRegions.put(regionInfo, Long.valueOf(now));
-      returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
-    }
-  }
-  
-  /*
-   * Some internal classes to manage msg-passing and region server operations
-   */
-
-  private abstract class RegionServerOperation implements Delayed {
-    private long expire;
-
-    protected RegionServerOperation() {
-      // Set the future time at which we expect to be released from the
-      // DelayQueue we're inserted in on lease expiration.
-      this.expire = System.currentTimeMillis() + leaseTimeout / 2;
-    }
-    
-    /** {@inheritDoc} */
-    public long getDelay(TimeUnit unit) {
-      return unit.convert(this.expire - System.currentTimeMillis(),
-        TimeUnit.MILLISECONDS);
-    }
-    
-    /** {@inheritDoc} */
-    public int compareTo(Delayed o) {
-      return Long.valueOf(getDelay(TimeUnit.MILLISECONDS)
-          - o.getDelay(TimeUnit.MILLISECONDS)).intValue();
-    }
-    
-    protected void requeue() {
-      this.expire = System.currentTimeMillis() + leaseTimeout / 2;
-      delayedToDoQueue.put(this);
-    }
-    
-    protected boolean rootAvailable() {
-      boolean available = true;
-      if (rootRegionLocation.get() == null) {
-        available = false;
-        requeue();
-      }
-      return available;
-    }
-
-    protected boolean metaTableAvailable() {
-      boolean available = true;
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("numberOfMetaRegions: " + numberOfMetaRegions.get() +
-            ", onlineMetaRegions.size(): " + onlineMetaRegions.size());
-      }
-      if (numberOfMetaRegions.get() != onlineMetaRegions.size()) {
-        // We can't proceed because not all of the meta regions are online.
-        // We can't block either because that would prevent the meta region
-        // online message from being processed. In order to prevent spinning
-        // in the run queue, put this request on the delay queue to give
-        // other threads the opportunity to get the meta regions on-line.
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Requeuing because not all meta regions are online");
-        }
-        available = false;
-        requeue();
-      }
-      return available;
-    }
-    
-    protected abstract boolean process() throws IOException;
-  }
-
-  /** 
-   * Instantiated when a server's lease has expired, meaning it has crashed.
-   * The region server's log file needs to be split up for each region it was
-   * serving, and the regions need to get reassigned.
-   */
-  private class ProcessServerShutdown extends RegionServerOperation {
-    private HServerAddress deadServer;
-    private String deadServerName;
-    private Path oldLogDir;
-    private boolean logSplit;
-    private boolean rootRescanned;
-
-    private class ToDoEntry {
-      boolean deleteRegion;
-      boolean regionOffline;
-      Text row;
-      HRegionInfo info;
-
-      ToDoEntry(Text row, HRegionInfo info) {
-        this.deleteRegion = false;
-        this.regionOffline = false;
-        this.row = row;
-        this.info = info;
-      }
-    }
-
-    /**
-     * @param serverInfo
-     */
-    public ProcessServerShutdown(HServerInfo serverInfo) {
-      super();
-      this.deadServer = serverInfo.getServerAddress();
-      this.deadServerName = this.deadServer.toString();
-      this.logSplit = false;
-      this.rootRescanned = false;
-      StringBuilder dirName = new StringBuilder("log_");
-      dirName.append(deadServer.getBindAddress());
-      dirName.append("_");
-      dirName.append(serverInfo.getStartCode());
-      dirName.append("_");
-      dirName.append(deadServer.getPort());
-      this.oldLogDir = new Path(rootdir, dirName.toString());
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return "ProcessServerShutdown of " + this.deadServer.toString();
-    }
-
-    /** Finds regions that the dead region server was serving */
-    private void scanMetaRegion(HRegionInterface server, long scannerId,
-        Text regionName) throws IOException {
-
-      ArrayList<ToDoEntry> toDoList = new ArrayList<ToDoEntry>();
-      HashSet<HRegionInfo> regions = new HashSet<HRegionInfo>();
-
-      try {
-        while (true) {
-          HbaseMapWritable values = null;
-          try {
-            values = server.next(scannerId);
-          } catch (IOException e) {
-            LOG.error("Shutdown scanning of meta region",
-              RemoteExceptionHandler.checkIOException(e));
-            break;
-          }
-          if (values == null || values.size() == 0) {
-            break;
-          }
-          // TODO: Why does this have to be a sorted map?
-          RowMap rm = toRowMap(values);
-          Text row = rm.getRow();
-          SortedMap<Text, byte[]> map = rm.getMap();
-          if (LOG.isDebugEnabled() && row != null) {
-            LOG.debug("shutdown scanner looking at " + row.toString());
-          }
-
-          // Check server name.  If null, be conservative and treat as though
-          // region had been on shutdown server (could be null because we
-          // missed edits in hlog because hdfs does not do write-append).
-          String serverName;
-          try {
-            serverName = Writables.bytesToString(map.get(COL_SERVER));
-          } catch (UnsupportedEncodingException e) {
-            LOG.error("Server name", e);
-            break;
-          }
-          if (serverName.length() > 0 &&
-              deadServerName.compareTo(serverName) != 0) {
-            // This isn't the server you're looking for - move along
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Server name " + serverName + " is not same as " +
-                  deadServerName + ": Passing");
-            }
-            continue;
-          }
-
-          // Bingo! Found it.
-          HRegionInfo info = getHRegionInfo(map);
-          if (info == null) {
-            continue;
-          }
-          LOG.info(info.getRegionName() + " was on shutdown server <" +
-              serverName + "> (or server is null). Marking unassigned in " +
-          "meta and clearing pendingRegions");
-
-          if (info.isMetaTable()) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("removing meta region " + info.getRegionName() +
-                  " from online meta regions");
-            }
-            onlineMetaRegions.remove(info.getStartKey());
-          }
-
-          ToDoEntry todo = new ToDoEntry(row, info);
-          toDoList.add(todo);
-
-          if (killList.containsKey(deadServerName)) {
-            HashMap<Text, HRegionInfo> regionsToKill =
-              new HashMap<Text, HRegionInfo>();
-            synchronized (killList) {
-              regionsToKill.putAll(killList.get(deadServerName));
-            }
-
-            if (regionsToKill.containsKey(info.getRegionName())) {
-              regionsToKill.remove(info.getRegionName());
-              killList.put(deadServerName, regionsToKill);
-              unassignedRegions.remove(info);
-              synchronized (regionsToDelete) {
-                if (regionsToDelete.contains(info.getRegionName())) {
-                  // Delete this region
-                  regionsToDelete.remove(info.getRegionName());
-                  todo.deleteRegion = true;
-                } else {
-                  // Mark region offline
-                  todo.regionOffline = true;
-                }
-              }
-            }
-
-          } else {
-            // Get region reassigned
-            regions.add(info);
-
-            // If it was pending, remove.
-            // Otherwise will obstruct its getting reassigned.
-            pendingRegions.remove(info.getRegionName());
-          }
-        }
-      } finally {
-        if(scannerId != -1L) {
-          try {
-            server.close(scannerId);
-          } catch (IOException e) {
-            LOG.error("Closing scanner",
-              RemoteExceptionHandler.checkIOException(e));
-          }
-        }
-      }
-
-      // Update server in root/meta entries
-      for (ToDoEntry e: toDoList) {
-        if (e.deleteRegion) {
-          HRegion.removeRegionFromMETA(server, regionName, e.row);
-        } else if (e.regionOffline) {
-          HRegion.offlineRegionInMETA(server, regionName, e.info);
-        }
-      }
-
-      // Get regions reassigned
-      for (HRegionInfo info: regions) {
-        unassignedRegions.put(info, ZERO_L);
-      }
-    }
-
-    @Override
-    protected boolean process() throws IOException {
-      LOG.info("process shutdown of server " + deadServer + ": logSplit: " +
-          this.logSplit + ", rootRescanned: " + this.rootRescanned +
-          ", numberOfMetaRegions: " + numberOfMetaRegions.get() +
-          ", onlineMetaRegions.size(): " + onlineMetaRegions.size());
-
-      if (!logSplit) {
-        // Process the old log file
-        if (fs.exists(oldLogDir)) {
-          if (!splitLogLock.tryLock()) {
-            return false;
-          }
-          try {
-            HLog.splitLog(rootdir, oldLogDir, fs, conf);
-          } finally {
-            splitLogLock.unlock();
-          }
-        }
-        logSplit = true;
-      }
-
-      if (!rootAvailable()) {
-        // Return true so that worker does not put this request back on the
-        // toDoQueue.
-        // rootAvailable() has already put it on the delayedToDoQueue
-        return true;
-      }
-
-      if (!rootRescanned) {
-        // Scan the ROOT region
-
-        HRegionInterface server = null;
-        long scannerId = -1L;
-        for (int tries = 0; tries < numRetries; tries ++) {
-          if (closed.get()) {
-            return true;
-          }
-          server = connection.getHRegionConnection(rootRegionLocation.get());
-          scannerId = -1L;
-
-          try {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("process server shutdown scanning root region on " +
-                  rootRegionLocation.get().getBindAddress());
-            }
-            scannerId =
-              server.openScanner(HRegionInfo.rootRegionInfo.getRegionName(),
-                  COLUMN_FAMILY_ARRAY, EMPTY_START_ROW,
-                  System.currentTimeMillis(), null);
-            
-            scanMetaRegion(server, scannerId,
-                HRegionInfo.rootRegionInfo.getRegionName());
-            break;
-
-          } catch (IOException e) {
-            if (tries == numRetries - 1) {
-              throw RemoteExceptionHandler.checkIOException(e);
-            }
-          }
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("process server shutdown scanning root region on " +
-              rootRegionLocation.get().getBindAddress() + " finished " +
-              Thread.currentThread().getName());
-        }
-        rootRescanned = true;
-      }
-      
-      if (!metaTableAvailable()) {
-        // We can't proceed because not all meta regions are online.
-        // metaAvailable() has put this request on the delayedToDoQueue
-        // Return true so that worker does not put this on the toDoQueue
-        return true;
-      }
-
-      for (int tries = 0; tries < numRetries; tries++) {
-        try {
-          if (closed.get()) {
-            return true;
-          }
-          List<MetaRegion> regions = new ArrayList<MetaRegion>();
-          synchronized (onlineMetaRegions) {
-            regions.addAll(onlineMetaRegions.values());
-          }
-          for (MetaRegion r: regions) {
-            HRegionInterface server = null;
-            long scannerId = -1L;
-
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("process server shutdown scanning " +
-                  r.getRegionName() + " on " + r.getServer() + " " +
-                  Thread.currentThread().getName());
-            }
-            server = connection.getHRegionConnection(r.getServer());
-
-            scannerId =
-              server.openScanner(r.getRegionName(), COLUMN_FAMILY_ARRAY,
-                  EMPTY_START_ROW, System.currentTimeMillis(), null);
-
-            scanMetaRegion(server, scannerId, r.getRegionName());
-
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("process server shutdown finished scanning " +
-                  r.getRegionName() + " on " + r.getServer() + " " +
-                  Thread.currentThread().getName());
-            }
-          }
-          deadServers.remove(deadServerName);
-          break;
-
-        } catch (IOException e) {
-          if (tries == numRetries - 1) {
-            throw RemoteExceptionHandler.checkIOException(e);
-          }
-        }
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Abstract class that performs common operations for 
-   * @see #ProcessRegionClose and @see #ProcessRegionOpen
-   */
-  private abstract class ProcessRegionStatusChange
-    extends RegionServerOperation {
-
-    protected final boolean isMetaTable;
-    protected final HRegionInfo regionInfo;
-    private MetaRegion metaRegion;
-    protected Text metaRegionName;
-    
-    /**
-     * @param regionInfo
-     */
-    public ProcessRegionStatusChange(HRegionInfo regionInfo) {
-      super();
-      this.regionInfo = regionInfo;
-      this.isMetaTable = regionInfo.isMetaTable();
-      this.metaRegion = null;
-      this.metaRegionName = null;
-    }
-    
-    protected boolean metaRegionAvailable() {
-      boolean available = true;
-      if (isMetaTable) {
-        // This operation is for the meta table
-        if (!rootAvailable()) {
-          // But we can't proceed unless the root region is available
-          available = false;
-        }
-      } else {
-        if (!rootScanned || !metaTableAvailable()) {
-          // The root region has not been scanned or the meta table is not
-          // available so we can't proceed.
-          // Put the operation on the delayedToDoQueue
-          requeue();
-          available = false;
-        }
-      }
-      return available;
-    }
-    
-    protected HRegionInterface getMetaServer() throws IOException {
-      if (this.isMetaTable) {
-        this.metaRegionName = HRegionInfo.rootRegionInfo.getRegionName();
-      } else {
-        if (this.metaRegion == null) {
-          synchronized (onlineMetaRegions) {
-            metaRegion = onlineMetaRegions.size() == 1 ? 
-                onlineMetaRegions.get(onlineMetaRegions.firstKey()) :
-                  onlineMetaRegions.containsKey(regionInfo.getRegionName()) ?
-                      onlineMetaRegions.get(regionInfo.getRegionName()) :
-                        onlineMetaRegions.get(onlineMetaRegions.headMap(
-                            regionInfo.getRegionName()).lastKey());
-          }
-          this.metaRegionName = metaRegion.getRegionName();
-        }
-      }
-
-      HServerAddress server = null;
-      if (isMetaTable) {
-        server = rootRegionLocation.get();
-        
-      } else {
-        server = metaRegion.getServer();
-      }
-      return connection.getHRegionConnection(server);
-    }
-    
-  }
-  /**
-   * ProcessRegionClose is instantiated when a region server reports that it
-   * has closed a region.
-   */
-  private class ProcessRegionClose extends ProcessRegionStatusChange {
-    private boolean reassignRegion;
-    private boolean deleteRegion;
-
-    /**
-     * @param regionInfo
-     * @param reassignRegion
-     * @param deleteRegion
-     */
-    public ProcessRegionClose(HRegionInfo regionInfo, boolean reassignRegion,
-        boolean deleteRegion) {
-
-      super(regionInfo);
-      this.reassignRegion = reassignRegion;
-      this.deleteRegion = deleteRegion;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return "ProcessRegionClose of " + this.regionInfo.getRegionName();
-    }
-
-    @Override
-    protected boolean process() throws IOException {
-      for (int tries = 0; tries < numRetries; tries++) {
-        if (closed.get()) {
-          return true;
-        }
-        LOG.info("region closed: " + regionInfo.getRegionName());
-
-        // Mark the Region as unavailable in the appropriate meta table
-
-        if (!metaRegionAvailable()) {
-          // We can't proceed unless the meta region we are going to update
-          // is online. metaRegionAvailable() has put this operation on the
-          // delayedToDoQueue, so return true so the operation is not put 
-          // back on the toDoQueue
-          return true;
-        }
-
-        try {
-          if (deleteRegion) {
-            HRegion.removeRegionFromMETA(getMetaServer(), metaRegionName,
-              regionInfo.getRegionName());
-          } else {
-            HRegion.offlineRegionInMETA(getMetaServer(), metaRegionName,
-              regionInfo);
-          }
-          break;
-
-        } catch (IOException e) {
-          if (tries == numRetries - 1) {
-            throw RemoteExceptionHandler.checkIOException(e);
-          }
-          continue;
-        }
-      }
-
-      if (reassignRegion) {
-        LOG.info("reassign region: " + regionInfo.getRegionName());
-
-        unassignedRegions.put(regionInfo, ZERO_L);
-
-      } else if (deleteRegion) {
-        try {
-          HRegion.deleteRegion(fs, rootdir, regionInfo);
-        } catch (IOException e) {
-          e = RemoteExceptionHandler.checkIOException(e);
-          LOG.error("failed delete region " + regionInfo.getRegionName(), e);
-          throw e;
-        }
-      }
-      return true;
-    }
-  }
-
-  /** 
-   * ProcessRegionOpen is instantiated when a region server reports that it is
-   * serving a region. This applies to all meta and user regions except the 
-   * root region which is handled specially.
-   */
-  private class ProcessRegionOpen extends ProcessRegionStatusChange {
-    private final HServerAddress serverAddress;
-    private final byte [] startCode;
-
-    /**
-     * @param info
-     * @param regionInfo
-     * @throws IOException
-     */
-    public ProcessRegionOpen(HServerInfo info, HRegionInfo regionInfo)
-    throws IOException {
-      super(regionInfo);
-      this.serverAddress = info.getServerAddress();
-      this.startCode = Writables.longToBytes(info.getStartCode());
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return "PendingOpenOperation from " + serverAddress.toString();
-    }
-
-    @Override
-    protected boolean process() throws IOException {
-      for (int tries = 0; tries < numRetries; tries++) {
-        if (closed.get()) {
-          return true;
-        }
-        LOG.info(regionInfo.toString() + " open on " + 
-            this.serverAddress.toString());
-
-        if (!metaRegionAvailable()) {
-          // We can't proceed unless the meta region we are going to update
-          // is online. metaRegionAvailable() has put this operation on the
-          // delayedToDoQueue, so return true so the operation is not put 
-          // back on the toDoQueue
-          return true;
-        }
-
-        // Register the newly-available Region's location.
-        
-        HRegionInterface server = getMetaServer();
-        LOG.info("updating row " + regionInfo.getRegionName() + " in table " +
-          metaRegionName + " with startcode " +
-          Writables.bytesToLong(this.startCode) + " and server "+
-          serverAddress.toString());
-        try {
-          BatchUpdate b = new BatchUpdate(rand.nextLong());
-          long lockid = b.startUpdate(regionInfo.getRegionName());
-          b.put(lockid, COL_SERVER,
-            Writables.stringToBytes(serverAddress.toString()));
-          b.put(lockid, COL_STARTCODE, startCode);
-          server.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
-          if (isMetaTable) {
-            // It's a meta region.
-            MetaRegion m = new MetaRegion(this.serverAddress,
-              this.regionInfo.getRegionName(), this.regionInfo.getStartKey());
-            if (!initialMetaScanComplete) {
-              // Put it on the queue to be scanned for the first time.
-              try {
-                LOG.debug("Adding " + m.toString() + " to regions to scan");
-                metaRegionsToScan.put(m);
-              } catch (InterruptedException e) {
-                throw new RuntimeException(
-                    "Putting into metaRegionsToScan was interrupted.", e);
-              }
-            } else {
-              // Add it to the online meta regions
-              LOG.debug("Adding to onlineMetaRegions: " + m.toString());
-              onlineMetaRegions.put(this.regionInfo.getStartKey(), m);
-            }
-          }
-          // If updated successfully, remove from pending list.
-          pendingRegions.remove(regionInfo.getRegionName());
-          break;
-        } catch (IOException e) {
-          if (tries == numRetries - 1) {
-            throw RemoteExceptionHandler.checkIOException(e);
-          }
-        }
-      }
-      return true;
-    }
-  }
-
-  /*
-   * HMasterInterface
-   */
-
-  /** {@inheritDoc} */
-  public boolean isMasterRunning() {
-    return !closed.get();
-  }
-
-  /** {@inheritDoc} */
-  public void shutdown() {
-    LOG.info("Cluster shutdown requested. Starting to quiesce servers");
-    this.shutdownRequested = true;
-  }
-
-  /** {@inheritDoc} */
-  public void createTable(HTableDescriptor desc)
-  throws IOException {
-    
-    if (!isMasterRunning()) {
-      throw new MasterNotRunningException();
-    }
-    HRegionInfo newRegion = new HRegionInfo(desc, null, null);
-
-    for (int tries = 0; tries < numRetries; tries++) {
-      try {
-        // We can not access meta regions if they have not already been
-        // assigned and scanned.  If we timeout waiting, just shutdown.
-        if (this.metaScannerThread.waitForMetaRegionsOrClose()) {
-          break;
-        }
-        createTable(newRegion);
-        LOG.info("created table " + desc.getName());
-        break;
-      
-      } catch (IOException e) {
-        if (tries == numRetries - 1) {
-          throw RemoteExceptionHandler.checkIOException(e);
-        }
-      }
-    }
-  }
-
-  private void createTable(final HRegionInfo newRegion) throws IOException {
-    Text tableName = newRegion.getTableDesc().getName();
-    if (tableInCreation.contains(tableName)) {
-      throw new TableExistsException("Table " + tableName + " in process "
-          + "of being created");
-    }
-    tableInCreation.add(tableName);
-    try {
-      // 1. Check to see if table already exists. Get meta region where
-      // table would sit should it exist. Open scanner on it. If a region
-      // for the table we want to create already exists, then table already
-      // created. Throw already-exists exception.
-      
-      MetaRegion m = null;
-      synchronized (onlineMetaRegions) {
-        m = (onlineMetaRegions.size() == 1 ?
-            onlineMetaRegions.get(onlineMetaRegions.firstKey()) : 
-              (onlineMetaRegions.containsKey(newRegion.getRegionName()) ?
-                  onlineMetaRegions.get(newRegion.getRegionName()) :
-                    onlineMetaRegions.get(onlineMetaRegions.headMap(
-                        newRegion.getTableDesc().getName()).lastKey())));
-      }
-          
-      Text metaRegionName = m.getRegionName();
-      HRegionInterface server = connection.getHRegionConnection(m.getServer());
-      long scannerid = server.openScanner(metaRegionName, COL_REGIONINFO_ARRAY,
-          tableName, System.currentTimeMillis(), null);
-      try {
-        HbaseMapWritable data = server.next(scannerid);
-            
-        // Test data and that the row for the data is for our table. If table
-        // does not exist, scanner will return row after where our table would
-        // be inserted if it exists so look for exact match on table name.
-            
-        if (data != null && data.size() > 0) {
-          for (Writable k: data.keySet()) {
-            if (HRegionInfo.getTableNameFromRegionName(
-                ((HStoreKey) k).getRow()).equals(tableName)) {
-          
-              // Then a region for this table already exists. Ergo table exists.
-                  
-              throw new TableExistsException(tableName.toString());
-            }
-          }
-        }
-            
-      } finally {
-        server.close(scannerid);
-      }
-
-      // 2. Create the HRegion
-          
-      HRegion region =
-        HRegion.createHRegion(newRegion, this.rootdir, this.conf);
-
-      // 3. Insert into meta
-          
-      HRegionInfo info = region.getRegionInfo();
-      Text regionName = region.getRegionName();
-      BatchUpdate b = new BatchUpdate(rand.nextLong());
-      long lockid = b.startUpdate(regionName);
-      b.put(lockid, COL_REGIONINFO, Writables.getBytes(info));
-      server.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
-
-      // 4. Close the new region to flush it to disk.  Close its log file too.
-      
-      region.close();
-      region.getLog().closeAndDelete();
-
-      // 5. Get it assigned to a server
-
-      this.unassignedRegions.put(info, ZERO_L);
-
-    } finally {
-      tableInCreation.remove(newRegion.getTableDesc().getName());
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void deleteTable(Text tableName) throws IOException {
-    new TableDelete(tableName).process();
-    LOG.info("deleted table: " + tableName);
-  }
-
-  /** {@inheritDoc} */
-  public void addColumn(Text tableName, HColumnDescriptor column)
-  throws IOException {
-    
-    new AddColumn(tableName, column).process();
-  }
-
-  /** {@inheritDoc} */
-  public void modifyColumn(Text tableName, Text columnName, 
-    HColumnDescriptor descriptor)
-  throws IOException {
-    new ModifyColumn(tableName, columnName, descriptor).process();
-  }
-
-  /** {@inheritDoc} */
-  public void deleteColumn(Text tableName, Text columnName) throws IOException {
-    new DeleteColumn(tableName, HStoreKey.extractFamily(columnName)).process();
-  }
-
-  /** {@inheritDoc} */
-  public void enableTable(Text tableName) throws IOException {
-    new ChangeTableState(tableName, true).process();
-  }
-
-  /** {@inheritDoc} */
-  public void disableTable(Text tableName) throws IOException {
-    new ChangeTableState(tableName, false).process();
-  }
-
-  /** {@inheritDoc} */
-  public HServerAddress findRootRegion() {
-    return rootRegionLocation.get();
-  }
-
-  /*
-   * Helper classes for HMasterInterface
-   */
-
-  private abstract class TableOperation {
-    private Set<MetaRegion> metaRegions;
-    protected Text tableName;
-    protected Set<HRegionInfo> unservedRegions;
-
-    protected TableOperation(Text tableName) throws IOException {
-      if (!isMasterRunning()) {
-        throw new MasterNotRunningException();
-      }
-
-      this.metaRegions = new HashSet<MetaRegion>();
-      this.tableName = tableName;
-      this.unservedRegions = new HashSet<HRegionInfo>();
-
-      // We can not access any meta region if they have not already been
-      // assigned and scanned.
-
-      if (metaScannerThread.waitForMetaRegionsOrClose()) {
-        throw new MasterNotRunningException(); // We're shutting down. Forget it.
-      }
-
-      Text firstMetaRegion = null;
-      synchronized (onlineMetaRegions) {
-        if (onlineMetaRegions.size() == 1) {
-          firstMetaRegion = onlineMetaRegions.firstKey();
-
-        } else if (onlineMetaRegions.containsKey(tableName)) {
-          firstMetaRegion = tableName;
-
-        } else {
-          firstMetaRegion = onlineMetaRegions.headMap(tableName).lastKey();
-        }
-        this.metaRegions.addAll(onlineMetaRegions.tailMap(
-            firstMetaRegion).values());
-      }
-    }
-
-    void process() throws IOException {
-      for (int tries = 0; tries < numRetries; tries++) {
-        boolean tableExists = false;
-        try {
-          synchronized(metaScannerLock) {     // Prevent meta scanner from running
-            for (MetaRegion m: metaRegions) {
-
-              // Get a connection to a meta server
-
-              HRegionInterface server =
-                connection.getHRegionConnection(m.getServer());
-
-              // Open a scanner on the meta region
-
-              long scannerId =
-                server.openScanner(m.getRegionName(), COLUMN_FAMILY_ARRAY,
-                    tableName, System.currentTimeMillis(), null);
-
-              try {
-                while (true) {
-                  HbaseMapWritable values = server.next(scannerId);
-                  if(values == null || values.size() == 0) {
-                    break;
-                  }
-                  RowMap rm = toRowMap(values);
-                  SortedMap<Text, byte[]> map = rm.getMap();
-                  HRegionInfo info = getHRegionInfo(map);
-                  if (info == null) {
-                    throw new IOException(COL_REGIONINFO + " not found on " +
-                      rm.getRow());
-                  }
-                  String serverName = Writables.bytesToString(map.get(COL_SERVER));
-                  long startCode = Writables.bytesToLong(map.get(COL_STARTCODE));
-                  if (info.getTableDesc().getName().compareTo(tableName) > 0) {
-                    break; // Beyond any more entries for this table
-                  }
-
-                  tableExists = true;
-                  if (!isBeingServed(serverName, startCode)) {
-                    unservedRegions.add(info);
-                  }
-                  processScanItem(serverName, startCode, info);
-                } // while(true)
-              } finally {
-                if (scannerId != -1L) {
-                  try {
-                    server.close(scannerId);
-                  } catch (IOException e) {
-                    e = RemoteExceptionHandler.checkIOException(e);
-                    LOG.error("closing scanner", e);
-                  }
-                }
-                scannerId = -1L;
-              }
-
-              if (!tableExists) {
-                throw new IOException(tableName + " does not exist");
-              }
-
-              postProcessMeta(m, server);
-              unservedRegions.clear();
-
-            } // for(MetaRegion m:)
-          } // synchronized(metaScannerLock)
-
-        } catch (IOException e) {
-          if (tries == numRetries - 1) {
-            // No retries left
-            checkFileSystem();
-            throw RemoteExceptionHandler.checkIOException(e);
-          }
-          continue;
-        }
-        break;
-      } // for(tries...)
-    }
-
-    protected boolean isBeingServed(String serverName, long startCode) {
-      boolean result = false;
-      if (serverName != null && serverName.length() > 0 && startCode != -1L) {
-        HServerInfo s = serversToServerInfo.get(serverName);
-        result = s != null && s.getStartCode() == startCode;
-      }
-      return result;
-    }
-
-    protected boolean isEnabled(HRegionInfo info) {
-      return !info.isOffline();
-    }
-
-    protected abstract void processScanItem(String serverName, long startCode,
-        HRegionInfo info) throws IOException;
-
-    protected abstract void postProcessMeta(MetaRegion m,
-        HRegionInterface server) throws IOException;
-  }
-
-  /** Instantiated to enable or disable a table */
-  private class ChangeTableState extends TableOperation {
-    private boolean online;
-
-    protected Map<String, HashSet<HRegionInfo>> servedRegions =
-      new HashMap<String, HashSet<HRegionInfo>>();
-    
-    protected long lockid;
-
-    ChangeTableState(Text tableName, boolean onLine) throws IOException {
-      super(tableName);
-      this.online = onLine;
-    }
-
-    @Override
-    protected void processScanItem(String serverName, long startCode,
-        HRegionInfo info) {
-    
-      if (isBeingServed(serverName, startCode)) {
-        HashSet<HRegionInfo> regions = servedRegions.get(serverName);
-        if (regions == null) {
-          regions = new HashSet<HRegionInfo>();
-        }
-        regions.add(info);
-        servedRegions.put(serverName, regions);
-      }
-    }
-
-    @Override
-    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
-      throws IOException {
-      
-      // Process regions not being served
-      
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("processing unserved regions");
-      }
-      for (HRegionInfo i: unservedRegions) {
-        if (i.isOffline() && i.isSplit()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Skipping region " + i.toString() + " because it is " +
-                "offline because it has been split");
-          }
-          continue;
-        }
-        
-        // Update meta table
-        
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("updating columns in row: " + i.getRegionName());
-        }
-
-        BatchUpdate b = new BatchUpdate(rand.nextLong());
-        lockid = b.startUpdate(i.getRegionName());
-        updateRegionInfo(b, i);
-        b.delete(lockid, COL_SERVER);
-        b.delete(lockid, COL_STARTCODE);
-        server.batchUpdate(m.getRegionName(), System.currentTimeMillis(), b);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("updated columns in row: " + i.getRegionName());
-        }
-
-        if (online) {                         // Bring offline regions on-line
-          if (!unassignedRegions.containsKey(i)) {
-            unassignedRegions.put(i, ZERO_L);
-          }
-
-        } else {                              // Prevent region from getting assigned.
-          unassignedRegions.remove(i);
-        }
-      }
-
-      // Process regions currently being served
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("processing regions currently being served");
-      }
-      for (Map.Entry<String, HashSet<HRegionInfo>> e: servedRegions.entrySet()) {
-        String serverName = e.getKey();
-        if (online) {
-          LOG.debug("Already online");
-          continue;                             // Already being served
-        }
-
-        // Cause regions being served to be taken off-line and disabled
-
-        HashMap<Text, HRegionInfo> localKillList =
-          new HashMap<Text, HRegionInfo>();
-        
-        synchronized (killList) {
-          HashMap<Text, HRegionInfo> killedRegions = killList.get(serverName);
-          if (killedRegions != null) {
-            localKillList.putAll(killedRegions);
-          }
-        }
-        for (HRegionInfo i: e.getValue()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("adding region " + i.getRegionName() +
-                " to local kill list");
-          }
-          localKillList.put(i.getRegionName(), i);
-        }
-        if (localKillList.size() > 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("inserted local kill list into kill list for server " +
-                serverName);
-          }
-          killList.put(serverName, localKillList);
-        }
-      }
-      servedRegions.clear();
-    }
-
-    protected void updateRegionInfo(final BatchUpdate b, final HRegionInfo i)
-      throws IOException {
-      
-      i.setOffline(!online);
-      b.put(lockid, COL_REGIONINFO, Writables.getBytes(i));
-    }
-  }
-
-  /** 
-   * Instantiated to delete a table
-   * Note that it extends ChangeTableState, which takes care of disabling
-   * the table.
-   */
-  private class TableDelete extends ChangeTableState {
-
-    TableDelete(Text tableName) throws IOException {
-      super(tableName, false);
-    }
-
-    @Override
-    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
-      throws IOException {
-
-      // For regions that are being served, mark them for deletion      
-      
-      for (HashSet<HRegionInfo> s: servedRegions.values()) {
-        for (HRegionInfo i: s) {
-          regionsToDelete.add(i.getRegionName());
-        }
-      }
-
-      // Unserved regions we can delete now
-      
-      for (HRegionInfo i: unservedRegions) {
-        // Delete the region
-      
-        try {
-          HRegion.deleteRegion(fs, rootdir, i);
-        
-        } catch (IOException e) {
-          LOG.error("failed to delete region " + i.getRegionName(),
-            RemoteExceptionHandler.checkIOException(e));
-        }
-      }
-      super.postProcessMeta(m, server);
-    }
-
-    @Override
-    protected void updateRegionInfo(BatchUpdate b,
-        @SuppressWarnings("unused") HRegionInfo info) {
-      for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
-        // Be sure to clean all cells
-        b.delete(lockid, ALL_META_COLUMNS[i]);
-      }
-    }
-  }
-
-  private abstract class ColumnOperation extends TableOperation {
-    
-    protected ColumnOperation(Text tableName) throws IOException {
-      super(tableName);
-    }
-
-    @Override
-    protected void processScanItem(
-        @SuppressWarnings("unused") String serverName,
-        @SuppressWarnings("unused") long startCode,
-        final HRegionInfo info) throws IOException {
-      
-      if (isEnabled(info)) {
-        throw new TableNotDisabledException(tableName.toString());
-      }
-    }
-
-    protected void updateRegionInfo(HRegionInterface server, Text regionName,
-        HRegionInfo i) throws IOException {
-
-      BatchUpdate b = new BatchUpdate(rand.nextLong());
-      long lockid = b.startUpdate(i.getRegionName());
-      b.put(lockid, COL_REGIONINFO, Writables.getBytes(i));
-      server.batchUpdate(regionName, System.currentTimeMillis(), b);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("updated columns in row: " + i.getRegionName());
-      }
-    }
-  }
-
-  /** Instantiated to remove a column family from a table */
-  private class DeleteColumn extends ColumnOperation {
-    private Text columnName;
-
-    DeleteColumn(Text tableName, Text columnName) throws IOException {
-      super(tableName);
-      this.columnName = columnName;
-    }
-
-    @Override
-    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
-      throws IOException {
-
-      Path tabledir = new Path(rootdir, tableName.toString());
-      for (HRegionInfo i: unservedRegions) {
-        i.getTableDesc().families().remove(columnName);
-        updateRegionInfo(server, m.getRegionName(), i);
-
-        // Delete the directories used by the column
-
-        String encodedName = i.getEncodedName();
-        fs.delete(HStoreFile.getMapDir(tabledir, encodedName, columnName));
-        fs.delete(HStoreFile.getInfoDir(tabledir, encodedName, columnName));
-      }
-    }
-  }
-
-  /** Instantiated to add a column family to a table */
-  private class AddColumn extends ColumnOperation {
-    private HColumnDescriptor newColumn;
-
-    AddColumn(Text tableName, HColumnDescriptor newColumn) throws IOException {
-      super(tableName);
-      this.newColumn = newColumn;
-    }
-
-    @Override
-    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
-      throws IOException {
-
-      for (HRegionInfo i: unservedRegions) {
-
-        // All we need to do to add a column is add it to the table descriptor.
-        // When the region is brought on-line, it will find the column missing
-        // and create it.
-
-        i.getTableDesc().addFamily(newColumn);
-        updateRegionInfo(server, m.getRegionName(), i);
-      }
-    }
-  }
-
-  /** Instantiated to modify an existing column family on a table */
-  private class ModifyColumn extends ColumnOperation {
-    private HColumnDescriptor descriptor;
-    private Text columnName;
-    
-    ModifyColumn(Text tableName, Text columnName, HColumnDescriptor _descriptor) 
-      throws IOException {
-      super(tableName);
-      this.descriptor = _descriptor;
-      this.columnName = columnName;
-    }
-
-    @Override
-    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
-      throws IOException {
-
-      for (HRegionInfo i: unservedRegions) {
-        // get the column families map from the table descriptor
-        Map<Text, HColumnDescriptor> families = i.getTableDesc().families();
-        
-        // if the table already has this column, then put the new descriptor 
-        // version.
-        if (families.get(columnName) != null){
-          families.put(columnName, descriptor);
-          updateRegionInfo(server, m.getRegionName(), i);          
-        }
-        else{ // otherwise, we have an error.
-          throw new IOException("Column family '" + columnName + 
-            "' doesn't exist, so cannot be modified.");
-        }
-      }
-    }
-  }
-
-
-  /*
-   * Managing leases
-   */
-
-  /** Instantiated to monitor the health of a region server */
-  private class ServerExpirer implements LeaseListener {
-    @SuppressWarnings("hiding")
-    private String server;
-
-    ServerExpirer(String server) {
-      this.server = server;
-    }
-
-    /** {@inheritDoc} */
-    public void leaseExpired() {
-      LOG.info(server + " lease expired");
-      // Remove the server from the known servers list and update load info
-      HServerInfo info = serversToServerInfo.remove(server);
-      if (info != null) {
-        HServerAddress root = rootRegionLocation.get();
-        if (root != null && root.equals(info.getServerAddress())) {
-          unassignRootRegion();
-        }
-        String serverName = info.getServerAddress().toString();
-        HServerLoad load = serversToLoad.remove(serverName);
-        if (load != null) {
-          Set<String> servers = loadToServers.get(load);
-          if (servers != null) {
-            servers.remove(serverName);
-            loadToServers.put(load, servers);
-          }
-        }
-        deadServers.add(server);
-      }
-      synchronized (serversToServerInfo) {
-        serversToServerInfo.notifyAll();
-      }
-
-      // NOTE: If the server was serving the root region, we cannot reassign it
-      // here because the new server will start serving the root region before
-      // the ProcessServerShutdown operation has a chance to split the log file.
-      if (info != null) {
-        delayedToDoQueue.put(new ProcessServerShutdown(info));
-      }
-    }
-  }
-
-  /**
-   * @return Return configuration being used by this server.
-   */
-  public HBaseConfiguration getConfiguration() {
-    return this.conf;
-  }
-  
-  /*
-   * Data structure used to return results out of the toRowMap method.
-   */
-  private class RowMap {
-    final Text row;
-    final SortedMap<Text, byte[]> map;
-    
-    private RowMap(final Text r, final SortedMap<Text, byte[]> m) {
-      this.row = r;
-      this.map = m;
-    }
-
-    private Text getRow() {
-      return this.row;
-    }
-
-    private SortedMap<Text, byte[]> getMap() {
-      return this.map;
-    }
-  }
-  
-  /*
-   * Convert an HbaseMapWritable to a Map keyed by column.
-   * Utility method used scanning meta regions
-   * @param mw The MapWritable to convert.  Cannot be null.
-   * @return Returns a SortedMap currently.  TODO: This looks like it could
-   * be a plain Map.
-   */
-  protected RowMap toRowMap(final HbaseMapWritable mw) {
-    if (mw == null) {
-      throw new IllegalArgumentException("Passed MapWritable cannot be null");
-    }
-    SortedMap<Text, byte[]> m = new TreeMap<Text, byte[]>();
-    Text row = null;
-    for (Map.Entry<Writable, Writable> e: mw.entrySet()) {
-      HStoreKey key = (HStoreKey) e.getKey();
-      Text thisRow = key.getRow();
-      if (row == null) {
-        row = thisRow;
-      } else {
-        if (!row.equals(thisRow)) {
-          LOG.error("Multiple rows in same scanner result set. firstRow=" +
-            row + ", currentRow=" + thisRow);
-        }
-      }
-      m.put(key.getColumn(), ((ImmutableBytesWritable) e.getValue()).get());
-    }
-    return new RowMap(row, m);
-  }
-  
-  /*
-   * Get HRegionInfo from passed META map of row values.
-   * Returns null if none found (and logs fact that expected COL_REGIONINFO
-   * was missing).  Utility method used by scanners of META tables.
-   * @param map Map to do lookup in.
-   * @return Null or found HRegionInfo.
-   * @throws IOException
-   */
-  protected HRegionInfo getHRegionInfo(final Map<Text, byte[]> map)
-  throws IOException {
-    byte [] bytes = map.get(COL_REGIONINFO);
-    if (bytes == null) {
-      LOG.warn(COL_REGIONINFO.toString() + " is empty; has keys: " +
-        map.keySet().toString());
-      return null;
-    }
-    return (HRegionInfo)Writables.getWritable(bytes, new HRegionInfo());
-  }
-
-  /*
-   * Main program
-   */
-
-  private static void printUsageAndExit() {
-    System.err.println("Usage: java org.apache.hbase.HMaster " +
-    "[--bind=hostname:port] start|stop");
-    System.exit(0);
-  }
-
-  protected static void doMain(String [] args,
-      Class<? extends HMaster> masterClass) {
-
-    if (args.length < 1) {
-      printUsageAndExit();
-    }
-
-    HBaseConfiguration conf = new HBaseConfiguration();
-
-    // Process command-line args. TODO: Better cmd-line processing
-    // (but hopefully something not as painful as cli options).
-
-    final String addressArgKey = "--bind=";
-    for (String cmd: args) {
-      if (cmd.startsWith(addressArgKey)) {
-        conf.set(MASTER_ADDRESS, cmd.substring(addressArgKey.length()));
-        continue;
-      }
-
-      if (cmd.equals("start")) {
-        try {
-          // If 'local', defer to LocalHBaseCluster instance.
-          if (LocalHBaseCluster.isLocal(conf)) {
-            (new LocalHBaseCluster(conf)).startup();
-          } else {
-            Constructor<? extends HMaster> c =
-              masterClass.getConstructor(HBaseConfiguration.class);
-            HMaster master = c.newInstance(conf);
-            master.start();
-          }
-        } catch (Throwable t) {
-          LOG.error( "Can not start master", t);
-          System.exit(-1);
-        }
-        break;
-      }
-
-      if (cmd.equals("stop")) {
-        try {
-          if (LocalHBaseCluster.isLocal(conf)) {
-            LocalHBaseCluster.doLocal(conf);
-          }
-          HBaseAdmin adm = new HBaseAdmin(conf);
-          adm.shutdown();
-        } catch (Throwable t) {
-          LOG.error( "Can not stop master", t);
-          System.exit(-1);
-        }
-        break;
-      }
-
-      // Print out usage if we get to here.
-      printUsageAndExit();
-    }
-  }
-  
-  /**
-   * Main program
-   * @param args
-   */
-  public static void main(String [] args) {
-    doMain(args, HMaster.class);
-  }
-}

+ 0 - 114
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java

@@ -1,114 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.VersionedProtocol;
-
-import java.io.IOException;
-
-/**
- * Clients interact with the HMasterInterface to gain access to meta-level
- * HBase functionality, like finding an HRegionServer and creating/destroying
- * tables.
- */
-public interface HMasterInterface extends VersionedProtocol {
-  /**
-   * Interface version.
-   * Version was incremented to 2 when we brought the hadoop RPC local to hbase
-   * -- HADOOP-2495 and then to 3 when we changed the RPC to send codes instead
-   * of actual class names (HADOOP-2519).
-   */
-  public static final long versionID = 3L;
-
-  /** @return true if master is available */
-  public boolean isMasterRunning();
-  
-  // Admin tools would use these cmds
-
-  /**
-   * Creates a new table
-   * @param desc table descriptor
-   * @throws IOException
-   */
-  public void createTable(HTableDescriptor desc) throws IOException;
-
-  /**
-   * Deletes a table
-   * @param tableName
-   * @throws IOException
-   */
-  public void deleteTable(Text tableName) throws IOException;
-  
-  /**
-   * Adds a column to the specified table
-   * @param tableName
-   * @param column column descriptor
-   * @throws IOException
-   */
-  public void addColumn(Text tableName, HColumnDescriptor column) throws IOException;
-
-  /**
-   * Modifies an existing column on the specified table
-   * @param tableName
-   * @param columnName name of the column to edit
-   * @param descriptor new column descriptor
-   * @throws IOException
-   */
-  public void modifyColumn(Text tableName, Text columnName, 
-    HColumnDescriptor descriptor) 
-  throws IOException;
-
-
-  /**
-   * Deletes a column from the specified table
-   * @param tableName
-   * @param columnName
-   * @throws IOException
-   */
-  public void deleteColumn(Text tableName, Text columnName) throws IOException;
-  
-  /**
-   * Puts the table on-line (only needed if table has been previously taken offline)
-   * @param tableName
-   * @throws IOException
-   */
-  public void enableTable(Text tableName) throws IOException;
-  
-  /**
-   * Take table offline
-   * 
-   * @param tableName
-   * @throws IOException
-   */
-  public void disableTable(Text tableName) throws IOException;
-  
-  /**
-   * Shutdown an HBase cluster.
-   * @throws IOException
-   */
-  public void shutdown() throws IOException;
-
-  /**
-   * Get the location of the root region
-   * @return address of server that serves the root region
-   */
-  public HServerAddress findRootRegion();
-}

+ 0 - 55
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterRegionInterface.java

@@ -1,55 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.ipc.VersionedProtocol;
-
-/**
- * HRegionServers interact with the HMasterRegionInterface to report on local 
- * goings-on and to obtain data-handling instructions from the HMaster.
- */
-public interface HMasterRegionInterface extends VersionedProtocol {
-  /** Interface version number */
-  public static final long versionID = 1L;
-  
-  /**
-   * Called when a region server first starts
-   * @param info
-   * @throws IOException
-   * @return Configuration for the regionserver to use: e.g. filesystem,
-   * hbase rootdir, etc.
-   */
-  public HbaseMapWritable regionServerStartup(HServerInfo info) throws IOException;
-  
-  /**
-   * Called to renew lease, tell master what the region server is doing and to
-   * receive new instructions from the master
-   * 
-   * @param info server's address and start code
-   * @param msgs things the region server wants to tell the master
-   * @return instructions from the master to the region server
-   * @throws IOException
-   */
-  public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[])
-  throws IOException;
-}

+ 0 - 401
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java

@@ -1,401 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-
-import org.apache.hadoop.hbase.io.BatchUpdate;
-
-/** 
- * A non-instantiable class that has a static method capable of compacting
- * a table by merging adjacent regions that have grown too small.
- */
-class HMerge implements HConstants {
-  static final Log LOG = LogFactory.getLog(HMerge.class);
-  static final Random rand = new Random();
-  
-  private HMerge() {
-    // Not instantiable
-  }
-  
-  /**
-   * Scans the table and merges two adjacent regions if they are small. This
-   * only happens when a lot of rows are deleted.
-   * 
-   * When merging the META region, the HBase instance must be offline.
-   * When merging a normal table, the HBase instance must be online, but the
-   * table must be disabled. 
-   * 
-   * @param conf        - configuration object for HBase
-   * @param fs          - FileSystem where regions reside
-   * @param tableName   - Table to be compacted
-   * @throws IOException
-   */
-  public static void merge(HBaseConfiguration conf, FileSystem fs,
-      Text tableName) throws IOException {
-    
-    HConnection connection = HConnectionManager.getConnection(conf);
-    boolean masterIsRunning = connection.isMasterRunning();
-    HConnectionManager.deleteConnection(conf);
-    if(tableName.equals(META_TABLE_NAME)) {
-      if(masterIsRunning) {
-        throw new IllegalStateException(
-            "Can not compact META table if instance is on-line");
-      }
-      new OfflineMerger(conf, fs).process();
-
-    } else {
-      if(!masterIsRunning) {
-        throw new IllegalStateException(
-            "HBase instance must be running to merge a normal table");
-      }
-      new OnlineMerger(conf, fs, tableName).process();
-    }
-  }
-
-  private static abstract class Merger {
-    protected final HBaseConfiguration conf;
-    protected final FileSystem fs;
-    protected final Path tabledir;
-    protected final HLog hlog;
-    private final long maxFilesize;
-
-    
-    protected Merger(HBaseConfiguration conf, FileSystem fs, Text tableName)
-        throws IOException {
-      
-      this.conf = conf;
-      this.fs = fs;
-      this.maxFilesize =
-        conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
-
-      this.tabledir = new Path(
-          fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
-          tableName.toString()
-      );
-      Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
-          HREGION_LOGDIR_NAME);
-      this.hlog =
-        new HLog(fs, logdir, conf, null);
-    }
-    
-    void process() throws IOException {
-      try {
-        for(HRegionInfo[] regionsToMerge = next();
-        regionsToMerge != null;
-        regionsToMerge = next()) {
-
-          if (!merge(regionsToMerge)) {
-            return;
-          }
-        }
-      } finally {
-        try {
-          hlog.closeAndDelete();
-          
-        } catch(IOException e) {
-          LOG.error(e);
-        }
-      }
-    }
-    
-    private boolean merge(HRegionInfo[] info) throws IOException {
-      if(info.length < 2) {
-        LOG.info("only one region - nothing to merge");
-        return false;
-      }
-      
-      HRegion currentRegion = null;
-      long currentSize = 0;
-      HRegion nextRegion = null;
-      long nextSize = 0;
-      Text midKey = new Text();
-      for (int i = 0; i < info.length - 1; i++) {
-        if (currentRegion == null) {
-          currentRegion =
-            new HRegion(tabledir, hlog, fs, conf, info[i], null, null);
-          currentSize = currentRegion.largestHStore(midKey).getAggregate();
-        }
-        nextRegion =
-          new HRegion(tabledir, hlog, fs, conf, info[i + 1], null, null);
-
-        nextSize = nextRegion.largestHStore(midKey).getAggregate();
-
-        if ((currentSize + nextSize) <= (maxFilesize / 2)) {
-          // We merge two adjacent regions if their total size is less than
-          // one half of the desired maximum size
-
-          LOG.info("merging regions " + currentRegion.getRegionName()
-              + " and " + nextRegion.getRegionName());
-
-          HRegion mergedRegion = HRegion.closeAndMerge(currentRegion, nextRegion);
-
-          updateMeta(currentRegion.getRegionName(), nextRegion.getRegionName(),
-              mergedRegion);
-
-          break;
-        }
-        LOG.info("not merging regions " + currentRegion.getRegionName()
-            + " and " + nextRegion.getRegionName());
-
-        currentRegion.close();
-        currentRegion = nextRegion;
-        currentSize = nextSize;
-      }
-      if(currentRegion != null) {
-        currentRegion.close();
-      }
-      return true;
-    }
-    
-    protected abstract HRegionInfo[] next() throws IOException;
-    
-    protected abstract void updateMeta(Text oldRegion1, Text oldRegion2,
-        HRegion newRegion) throws IOException;
-    
-  }
-
-  /** Instantiated to compact a normal user table */
-  private static class OnlineMerger extends Merger {
-    private final Text tableName;
-    private final HTable table;
-    private final HScannerInterface metaScanner;
-    private HRegionInfo latestRegion;
-    
-    OnlineMerger(HBaseConfiguration conf, FileSystem fs, Text tableName)
-    throws IOException {
-      
-      super(conf, fs, tableName);
-      this.tableName = tableName;
-      this.table = new HTable(conf, META_TABLE_NAME);
-      this.metaScanner = table.obtainScanner(COL_REGIONINFO_ARRAY, tableName);
-      this.latestRegion = null;
-    }
-    
-    private HRegionInfo nextRegion() throws IOException {
-      try {
-        HStoreKey key = new HStoreKey();
-        TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-        if (! metaScanner.next(key, results)) {
-          return null;
-        }
-        byte[] bytes = results.get(COL_REGIONINFO);
-        if (bytes == null || bytes.length == 0) {
-          throw new NoSuchElementException("meta region entry missing "
-              + COL_REGIONINFO);
-        }
-        HRegionInfo region =
-          (HRegionInfo) Writables.getWritable(bytes, new HRegionInfo());
-
-        if (!region.getTableDesc().getName().equals(tableName)) {
-          return null;
-        }
-        
-        if (!region.isOffline()) {
-          throw new TableNotDisabledException("region " + region.getRegionName()
-              + " is not disabled");
-        }
-        return region;
-        
-      } catch (IOException e) {
-        e = RemoteExceptionHandler.checkIOException(e);
-        LOG.error("meta scanner error", e);
-        try {
-          metaScanner.close();
-          
-        } catch (IOException ex) {
-          ex = RemoteExceptionHandler.checkIOException(ex);
-          LOG.error("error closing scanner", ex);
-        }
-        throw e;
-      }
-    }
-
-    @Override
-    protected HRegionInfo[] next() throws IOException {
-      List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
-      if(latestRegion == null) {
-        latestRegion = nextRegion();
-      }
-      if(latestRegion != null) {
-        regions.add(latestRegion);
-      }
-      latestRegion = nextRegion();
-      if(latestRegion != null) {
-        regions.add(latestRegion);
-      }
-      return regions.toArray(new HRegionInfo[regions.size()]);
-    }
-
-    @Override
-    protected void updateMeta(Text oldRegion1, Text oldRegion2,
-        HRegion newRegion) throws IOException {
-      Text[] regionsToDelete = {
-          oldRegion1,
-          oldRegion2
-      };
-      for(int r = 0; r < regionsToDelete.length; r++) {
-        if(regionsToDelete[r].equals(latestRegion.getRegionName())) {
-          latestRegion = null;
-        }
-        long lockid = -1L;
-        try {
-          lockid = table.startUpdate(regionsToDelete[r]);
-          table.delete(lockid, COL_REGIONINFO);
-          table.delete(lockid, COL_SERVER);
-          table.delete(lockid, COL_STARTCODE);
-          table.delete(lockid, COL_SPLITA);
-          table.delete(lockid, COL_SPLITB);
-          table.commit(lockid);
-          lockid = -1L;
-
-          if(LOG.isDebugEnabled()) {
-            LOG.debug("updated columns in row: " + regionsToDelete[r]);
-          }
-        } finally {
-          if(lockid != -1L) {
-            table.abort(lockid);
-          }
-        }
-      }
-      newRegion.getRegionInfo().setOffline(true);
-      long lockid = -1L;
-      try {
-        lockid = table.startUpdate(newRegion.getRegionName());
-        table.put(lockid, COL_REGIONINFO,
-            Writables.getBytes(newRegion.getRegionInfo()));
-        table.commit(lockid);
-        lockid = -1L;
-
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("updated columns in row: "
-              + newRegion.getRegionName());
-        }
-      } finally {
-        if(lockid != -1L) {
-          table.abort(lockid);
-        }
-      }
-    }
-  }
-
-  /** Instantiated to compact the meta region */
-  private static class OfflineMerger extends Merger {
-    private final List<HRegionInfo> metaRegions = new ArrayList<HRegionInfo>();
-    private final HRegion root;
-    
-    OfflineMerger(HBaseConfiguration conf, FileSystem fs)
-        throws IOException {
-      
-      super(conf, fs, META_TABLE_NAME);
-
-      Path rootTableDir = HTableDescriptor.getTableDir(
-          fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
-          ROOT_TABLE_NAME);
-
-      // Scan root region to find all the meta regions
-      
-      root = new HRegion(rootTableDir, hlog, fs, conf,
-          HRegionInfo.rootRegionInfo, null, null);
-
-      HScannerInterface rootScanner = root.getScanner(COL_REGIONINFO_ARRAY,
-          new Text(), System.currentTimeMillis(), null);
-      
-      try {
-        HStoreKey key = new HStoreKey();
-        TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-        while(rootScanner.next(key, results)) {
-          for(byte [] b: results.values()) {
-            HRegionInfo info = Writables.getHRegionInfoOrNull(b);
-            if (info != null) {
-              metaRegions.add(info);
-            }
-          }
-        }
-      } finally {
-        rootScanner.close();
-        try {
-          root.close();
-          
-        } catch(IOException e) {
-          LOG.error(e);
-        }
-      }
-    }
-
-    @Override
-    protected HRegionInfo[] next() {
-      HRegionInfo[] results = null;
-      if (metaRegions.size() > 0) {
-        results = metaRegions.toArray(new HRegionInfo[metaRegions.size()]);
-        metaRegions.clear();
-      }
-      return results;
-    }
-
-    @Override
-    protected void updateMeta(Text oldRegion1, Text oldRegion2,
-        HRegion newRegion) throws IOException {
-      
-      Text[] regionsToDelete = {
-          oldRegion1,
-          oldRegion2
-      };
-      for(int r = 0; r < regionsToDelete.length; r++) {
-        long lockid = Math.abs(rand.nextLong());
-        BatchUpdate b = new BatchUpdate(lockid);
-        lockid = b.startUpdate(regionsToDelete[r]);
-        b.delete(lockid, COL_REGIONINFO);
-        b.delete(lockid, COL_SERVER);
-        b.delete(lockid, COL_STARTCODE);
-        b.delete(lockid, COL_SPLITA);
-        b.delete(lockid, COL_SPLITB);
-        root.batchUpdate(System.currentTimeMillis(), b);
-        lockid = -1L;
-
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("updated columns in row: " + regionsToDelete[r]);
-        }
-      }
-      HRegionInfo newInfo = newRegion.getRegionInfo();
-      newInfo.setOffline(true);
-      long lockid = Math.abs(rand.nextLong());
-      BatchUpdate b = new BatchUpdate(lockid);
-      lockid = b.startUpdate(newRegion.getRegionName());
-      b.put(lockid, COL_REGIONINFO, Writables.getBytes(newInfo));
-      root.batchUpdate(System.currentTimeMillis(), b);
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("updated columns in row: " + newRegion.getRegionName());
-      }
-    }
-  }
-}

+ 0 - 214
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java

@@ -1,214 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/*******************************************************************************
- * HMsg is for communicating instructions between the HMaster and the 
- * HRegionServers.
- ******************************************************************************/
-public class HMsg implements Writable {
-  
-  // Messages sent from master to region server
-  
-  /** Start serving the specified region */
-  public static final byte MSG_REGION_OPEN = 1;
-  
-  /** Stop serving the specified region */
-  public static final byte MSG_REGION_CLOSE = 2;
-
-  /** Region server is unknown to master. Restart */
-  public static final byte MSG_CALL_SERVER_STARTUP = 4;
-  
-  /** Master tells region server to stop */
-  public static final byte MSG_REGIONSERVER_STOP = 5;
-
-  /** Stop serving the specified region and don't report back that it's closed */
-  public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
-  
-  /** Stop serving user regions */
-  public static final byte MSG_REGIONSERVER_QUIESCE = 7;
-
-  // Messages sent from the region server to the master
-  
-  /** region server is now serving the specified region */
-  public static final byte MSG_REPORT_OPEN = 100;
-  
-  /** region server is no longer serving the specified region */
-  public static final byte MSG_REPORT_CLOSE = 101;
-  
-  /** region server is processing open request */
-  public static final byte MSG_REPORT_PROCESS_OPEN = 102;
-
-  /**
-   * region server split the region associated with this message.
-   * 
-   * note that this message is immediately followed by two MSG_REPORT_OPEN
-   * messages, one for each of the new regions resulting from the split
-   */
-  public static final byte MSG_REPORT_SPLIT = 103;
-  
-  /**
-   * region server is shutting down
-   * 
-   * note that this message is followed by MSG_REPORT_CLOSE messages for each
-   * region the region server was serving, unless it was told to quiesce.
-   */
-  public static final byte MSG_REPORT_EXITING = 104;
-  
-  /** region server has closed all user regions but is still serving meta regions */
-  public static final byte MSG_REPORT_QUIESCED = 105;
-
-  byte msg;
-  HRegionInfo info;
-
-  /** Default constructor. Used during deserialization */
-  public HMsg() {
-    this.info = new HRegionInfo();
-  }
-
-  /**
-   * Construct a message with an empty HRegionInfo
-   * 
-   * @param msg - message code
-   */
-  public HMsg(byte msg) {
-    this.msg = msg;
-    this.info = new HRegionInfo();
-  }
-  
-  /**
-   * Construct a message with the specified message code and HRegionInfo
-   * 
-   * @param msg - message code
-   * @param info - HRegionInfo
-   */
-  public HMsg(byte msg, HRegionInfo info) {
-    this.msg = msg;
-    this.info = info;
-  }
-
-  /**
-   * Accessor
-   * @return message code
-   */
-  public byte getMsg() {
-    return msg;
-  }
-
-  /**
-   * Accessor
-   * @return HRegionInfo
-   */
-  public HRegionInfo getRegionInfo() {
-    return info;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    StringBuilder message = new StringBuilder();
-    switch(msg) {
-    case MSG_REGION_OPEN:
-      message.append("MSG_REGION_OPEN : ");
-      break;
-      
-    case MSG_REGION_CLOSE:
-      message.append("MSG_REGION_CLOSE : ");
-      break;
-      
-    case MSG_CALL_SERVER_STARTUP:
-      message.append("MSG_CALL_SERVER_STARTUP : ");
-      break;
-      
-    case MSG_REGIONSERVER_STOP:
-      message.append("MSG_REGIONSERVER_STOP : ");
-      break;
-      
-    case MSG_REGION_CLOSE_WITHOUT_REPORT:
-      message.append("MSG_REGION_CLOSE_WITHOUT_REPORT : ");
-      break;
-      
-    case MSG_REGIONSERVER_QUIESCE:
-      message.append("MSG_REGIONSERVER_QUIESCE : ");
-      break;
-      
-    case MSG_REPORT_PROCESS_OPEN:
-      message.append("MSG_REPORT_PROCESS_OPEN : ");
-      break;
-      
-    case MSG_REPORT_OPEN:
-      message.append("MSG_REPORT_OPEN : ");
-      break;
-      
-    case MSG_REPORT_CLOSE:
-      message.append("MSG_REPORT_CLOSE : ");
-      break;
-      
-    case MSG_REPORT_SPLIT:
-      message.append("MSG_REGION_SPLIT : ");
-      break;
-      
-    case MSG_REPORT_EXITING:
-      message.append("MSG_REPORT_EXITING : ");
-      break;
-      
-    case MSG_REPORT_QUIESCED:
-      message.append("MSG_REPORT_QUIESCED : ");
-      break;
-      
-    default:
-      message.append("unknown message code (");
-      message.append(msg);
-      message.append(") : ");
-      break;
-    }
-    message.append(info == null ? "null" : info.getRegionName());
-    return message.toString();
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * {@inheritDoc}
-   */
-  public void write(DataOutput out) throws IOException {
-     out.writeByte(msg);
-     info.write(out);
-   }
-
-  /**
-   * {@inheritDoc}
-   */
-  public void readFields(DataInput in) throws IOException {
-     this.msg = in.readByte();
-     this.info.readFields(in);
-   }
-}

+ 0 - 1920
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java

@@ -1,1920 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.BatchOperation;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * HRegion stores data for a certain region of a table.  It stores all columns
- * for each row. A given table consists of one or more HRegions.
- *
- * <p>We maintain multiple HStores for a single HRegion.
- * 
- * <p>An HStore is a set of rows with some column data; together,
- * they make up all the data for the rows.  
- *
- * <p>Each HRegion has a 'startKey' and 'endKey'.
- *   
- * <p>The first is inclusive, the second is exclusive (except for
- * the final region)  The endKey of region 0 is the same as
- * startKey for region 1 (if it exists).  The startKey for the
- * first region is null. The endKey for the final region is null.
- *
- * <p>Locking at the HRegion level serves only one purpose: preventing the
- * region from being closed (and consequently split) while other operations
- * are ongoing. Each row level operation obtains both a row lock and a region
- * read lock for the duration of the operation. While a scanner is being
- * constructed, getScanner holds a read lock. If the scanner is successfully
- * constructed, it holds a read lock until it is closed. A close takes out a
- * write lock and consequently will block for ongoing operations and will block
- * new operations from starting while the close is in progress.
- * 
- * <p>An HRegion is defined by its table and its key extent.
- * 
- * <p>It consists of at least one HStore.  The number of HStores should be 
- * configurable, so that data which is accessed together is stored in the same
- * HStore.  Right now, we approximate that by building a single HStore for 
- * each column family.  (This config info will be communicated via the 
- * tabledesc.)
- * 
- * <p>The HTableDescriptor contains metainfo about the HRegion's table.
- * regionName is a unique identifier for this HRegion. (startKey, endKey]
- * defines the keyspace for this HRegion.
- */
-public class HRegion implements HConstants {
-  static final String SPLITDIR = "splits";
-  static final String MERGEDIR = "merges";
-  static final Random rand = new Random();
-  static final Log LOG = LogFactory.getLog(HRegion.class);
-  final AtomicBoolean closed = new AtomicBoolean(false);
-
-  /**
-   * Merge two HRegions.  They must be available on the current
-   * HRegionServer. Returns a brand-new active HRegion, also
-   * running on the current HRegionServer.
-   */
-  static HRegion closeAndMerge(final HRegion srcA, final HRegion srcB)
-  throws IOException {
-
-    HRegion a = srcA;
-    HRegion b = srcB;
-
-    // Make sure that srcA comes first; important for key-ordering during
-    // write of the merged file.
-    FileSystem fs = srcA.getFilesystem();
-    if (srcA.getStartKey() == null) {
-      if (srcB.getStartKey() == null) {
-        throw new IOException("Cannot merge two regions with null start key");
-      }
-      // A's start key is null but B's isn't. Assume A comes before B
-    } else if ((srcB.getStartKey() == null)         // A is not null but B is
-        || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
-      a = srcB;
-      b = srcA;
-    }
-
-    if (! a.getEndKey().equals(b.getStartKey())) {
-      throw new IOException("Cannot merge non-adjacent regions");
-    }
-
-    HBaseConfiguration conf = a.getConf();
-    HTableDescriptor tabledesc = a.getTableDesc();
-    HLog log = a.getLog();
-    Path basedir = a.getBaseDir();
-    Text startKey = a.getStartKey();
-    Text endKey = b.getEndKey();
-    Path merges = new Path(a.getRegionDir(), MERGEDIR);
-    if(! fs.exists(merges)) {
-      fs.mkdirs(merges);
-    }
-
-    HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey);
-    Path newRegionDir =
-      HRegion.getRegionDir(merges, newRegionInfo.getEncodedName());
-    if(fs.exists(newRegionDir)) {
-      throw new IOException("Cannot merge; target file collision at " +
-          newRegionDir);
-    }
-
-    LOG.info("starting merge of regions: " + a.getRegionName() + " and " +
-        b.getRegionName() + " into new region " + newRegionInfo.toString());
-
-    Map<Text, List<HStoreFile>> byFamily =
-      new TreeMap<Text, List<HStoreFile>>();
-    byFamily = filesByFamily(byFamily, a.close());
-    byFamily = filesByFamily(byFamily, b.close());
-    for (Map.Entry<Text, List<HStoreFile>> es : byFamily.entrySet()) {
-      Text colFamily = es.getKey();
-      List<HStoreFile> srcFiles = es.getValue();
-      HStoreFile dst = new HStoreFile(conf, fs, merges,
-          newRegionInfo.getEncodedName(), colFamily, -1, null);
-      dst.mergeStoreFiles(srcFiles, fs, conf);
-    }
-
-    // Done
-    // Construction moves the merge files into place under region.
-    HRegion dstRegion = new HRegion(basedir, log, fs, conf, newRegionInfo,
-        newRegionDir, null);
-
-    // Get rid of merges directory
-
-    fs.delete(merges);
-
-    LOG.info("merge completed. New region is " + dstRegion.getRegionName());
-
-    return dstRegion;
-  }
-
-  /*
-   * Fills a map with a vector of store files keyed by column family. 
-   * @param byFamily Map to fill.
-   * @param storeFiles Store files to process.
-   * @return Returns <code>byFamily</code>
-   */
-  private static Map<Text, List<HStoreFile>> filesByFamily(
-      Map<Text, List<HStoreFile>> byFamily, List<HStoreFile> storeFiles) {
-    for(HStoreFile src: storeFiles) {
-      List<HStoreFile> v = byFamily.get(src.getColFamily());
-      if(v == null) {
-        v = new ArrayList<HStoreFile>();
-        byFamily.put(src.getColFamily(), v);
-      }
-      v.add(src);
-    }
-    return byFamily;
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Members
-  //////////////////////////////////////////////////////////////////////////////
-
-  volatile Map<Text, Long> rowsToLocks = new ConcurrentHashMap<Text, Long>();
-  volatile Map<Long, Text> locksToRows = new ConcurrentHashMap<Long, Text>();
-  volatile Map<Text, HStore> stores = new ConcurrentHashMap<Text, HStore>();
-  volatile Map<Long, TreeMap<HStoreKey, byte []>> targetColumns =
-    new ConcurrentHashMap<Long, TreeMap<HStoreKey, byte []>>();
-
-  final AtomicLong memcacheSize = new AtomicLong(0);
-
-  final Path basedir;
-  final HLog log;
-  final FileSystem fs;
-  final HBaseConfiguration conf;
-  final HRegionInfo regionInfo;
-  final Path regiondir;
-  private final Path regionCompactionDir;
-
-  /*
-   * Data structure of write state flags used coordinating flushes,
-   * compactions and closes.
-   */
-  static class WriteState {
-    // Set while a memcache flush is happening.
-    volatile boolean flushing = false;
-    // Set while a compaction is running.
-    volatile boolean compacting = false;
-    // Gets set in close. If set, cannot compact or flush again.
-    volatile boolean writesEnabled = true;
-  }
-
-  volatile WriteState writestate = new WriteState();
-
-  final int memcacheFlushSize;
-  private volatile long lastFlushTime;
-  final CacheFlushListener flushListener;
-  final int blockingMemcacheSize;
-  protected final long threadWakeFrequency;
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-  private final Integer updateLock = new Integer(0);
-  private final Integer splitLock = new Integer(0);
-  private final long desiredMaxFileSize;
-  private final long minSequenceId;
-  final AtomicInteger activeScannerCount = new AtomicInteger(0);
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Constructor
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * HRegion constructor.
-   *
-   * @param log The HLog is the outbound log for any updates to the HRegion
-   * (There's a single HLog for all the HRegions on a single HRegionServer.)
-   * The log file is a logfile from the previous execution that's
-   * custom-computed for this HRegion. The HRegionServer computes and sorts the
-   * appropriate log info for this HRegion. If there is a previous log file
-   * (implying that the HRegion has been written-to before), then read it from
-   * the supplied path.
-   * @param basedir qualified path of directory where region should be located,
-   * usually the table directory.
-   * @param fs is the filesystem.  
-   * @param conf is global configuration settings.
-   * @param regionInfo - HRegionInfo that describes the region
-   * @param initialFiles If there are initial files (implying that the HRegion
-   * is new), then read them from the supplied path.
-   * @param listener an object that implements CacheFlushListener or null
-   * 
-   * @throws IOException
-   */
-  public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf, 
-      HRegionInfo regionInfo, Path initialFiles, CacheFlushListener listener)
-    throws IOException {
-    
-    this.basedir = basedir;
-    this.log = log;
-    this.fs = fs;
-    this.conf = conf;
-    this.regionInfo = regionInfo;
-    this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
-    this.regiondir = new Path(basedir, this.regionInfo.getEncodedName());
-    Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME);
-
-    this.regionCompactionDir =
-      new Path(getCompactionDir(basedir), this.regionInfo.getEncodedName());
-
-    // Move prefab HStore files into place (if any).  This picks up split files
-    // and any merges from splits and merges dirs.
-    if(initialFiles != null && fs.exists(initialFiles)) {
-      fs.rename(initialFiles, this.regiondir);
-    }
-
-    // Load in all the HStores.
-    long maxSeqId = -1;
-    for(HColumnDescriptor c :
-      this.regionInfo.getTableDesc().families().values()) {
-
-      HStore store = new HStore(this.basedir, this.regionInfo, c, this.fs,
-          oldLogFile, this.conf);
-
-      stores.put(c.getFamilyName(), store);
-
-      long storeSeqId = store.getMaxSequenceId();
-      if (storeSeqId > maxSeqId) {
-        maxSeqId = storeSeqId;
-      }
-    }
-    this.minSequenceId = maxSeqId;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Next sequence id for region " + regionInfo.getRegionName() +
-          " is " + this.minSequenceId);
-    }
-
-    // Get rid of any splits or merges that were lost in-progress
-    Path splits = new Path(regiondir, SPLITDIR);
-    if (fs.exists(splits)) {
-      fs.delete(splits);
-    }
-    Path merges = new Path(regiondir, MERGEDIR);
-    if (fs.exists(merges)) {
-      fs.delete(merges);
-    }
-
-    // By default, we flush the cache when 64M.
-    this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size",
-      1024*1024*64);
-    this.flushListener = listener;
-    this.blockingMemcacheSize = this.memcacheFlushSize *
-      conf.getInt("hbase.hregion.memcache.block.multiplier", 1);
-
-    // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
-    this.desiredMaxFileSize =
-      conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
-
-    // HRegion is ready to go!
-    this.writestate.compacting = false;
-    this.lastFlushTime = System.currentTimeMillis();
-    LOG.info("region " + this.regionInfo.getRegionName() + " available");
-  }
-  
-  /**
-   * @return Updates to this region need to have a sequence id that is >= to
-   * the this number.
-   */
-  long getMinSequenceId() {
-    return this.minSequenceId;
-  }
-
-  /** @return a HRegionInfo object for this region */
-  public HRegionInfo getRegionInfo() {
-    return this.regionInfo;
-  }
-
-  /** returns true if region is closed */
-  boolean isClosed() {
-    return this.closed.get();
-  }
-  
-  /**
-   * Close down this HRegion.  Flush the cache, shut down each HStore, don't 
-   * service any more calls.
-   *
-   * <p>This method could take some time to execute, so don't call it from a 
-   * time-sensitive thread.
-   * 
-   * @return Vector of all the storage files that the HRegion's component 
-   * HStores make use of.  It's a list of all HStoreFile objects. Returns empty
-   * vector if already closed and null if judged that it should not close.
-   * 
-   * @throws IOException
-   */
-  public List<HStoreFile> close() throws IOException {
-    return close(false, null);
-  }
-  
-  /**
-   * Close down this HRegion.  Flush the cache unless abort parameter is true,
-   * Shut down each HStore, don't service any more calls.
-   *
-   * This method could take some time to execute, so don't call it from a 
-   * time-sensitive thread.
-   * 
-   * @param abort true if server is aborting (only during testing)
-   * @param listener call back to alert caller on close status
-   * @return Vector of all the storage files that the HRegion's component 
-   * HStores make use of.  It's a list of HStoreFile objects.  Can be null if
-   * we are not to close at this time or we are already closed.
-   * 
-   * @throws IOException
-   */
-  List<HStoreFile> close(boolean abort,
-      final RegionUnavailableListener listener) throws IOException {
-    Text regionName = this.regionInfo.getRegionName(); 
-    if (isClosed()) {
-      LOG.info("region " + regionName + " already closed");
-      return null;
-    }
-    synchronized (splitLock) {
-      synchronized (writestate) {
-        // Disable compacting and flushing by background threads for this
-        // region.
-        writestate.writesEnabled = false;
-        LOG.debug("compactions and cache flushes disabled for region " +
-          regionName);
-        while (writestate.compacting || writestate.flushing) {
-          LOG.debug("waiting for" +
-              (writestate.compacting ? " compaction" : "") +
-              (writestate.flushing ?
-                  (writestate.compacting ? "," : "") + " cache flush" :
-                    ""
-              ) + " to complete for region " + regionName
-          );
-          try {
-            writestate.wait();
-          } catch (InterruptedException iex) {
-            // continue
-          }
-        }
-      }
-      lock.writeLock().lock();
-      LOG.debug("new updates and scanners for region " + regionName +
-          " disabled");
-      
-      try {
-        // Wait for active scanners to finish. The write lock we hold will prevent
-        // new scanners from being created.
-        synchronized (activeScannerCount) {
-          while (activeScannerCount.get() != 0) {
-            LOG.debug("waiting for " + activeScannerCount.get() +
-                " scanners to finish");
-            try {
-              activeScannerCount.wait();
-
-            } catch (InterruptedException e) {
-              // continue
-            }
-          }
-        }
-        LOG.debug("no more active scanners for region " + regionName);
-
-        // Write lock means no more row locks can be given out.  Wait on
-        // outstanding row locks to come in before we close so we do not drop
-        // outstanding updates.
-        waitOnRowLocks();
-        LOG.debug("no more row locks outstanding on region " + regionName);
-        
-        if (listener != null) {
-          // If there is a listener, let them know that we have now
-          // acquired all the necessary locks and are starting to
-          // do the close
-          listener.closing(getRegionName());
-        }
-        
-        // Don't flush the cache if we are aborting
-        if (!abort) {
-          internalFlushcache(snapshotMemcaches());
-        }
-
-        List<HStoreFile> result = new ArrayList<HStoreFile>();
-        for (HStore store: stores.values()) {
-          result.addAll(store.close());
-        }
-        this.closed.set(true);
-        
-        if (listener != null) {
-          // If there is a listener, tell them that the region is now 
-          // closed.
-          listener.closed(getRegionName());
-        }
-        
-        LOG.info("closed " + this.regionInfo.getRegionName());
-        return result;
-      } finally {
-        lock.writeLock().unlock();
-      }
-    }
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // HRegion accessors
-  //////////////////////////////////////////////////////////////////////////////
-
-  /** @return start key for region */
-  public Text getStartKey() {
-    return this.regionInfo.getStartKey();
-  }
-
-  /** @return end key for region */
-  public Text getEndKey() {
-    return this.regionInfo.getEndKey();
-  }
-
-  /** @return region id */
-  long getRegionId() {
-    return this.regionInfo.getRegionId();
-  }
-
-  /** @return region name */
-  public Text getRegionName() {
-    return this.regionInfo.getRegionName();
-  }
-
-  /** @return HTableDescriptor for this region */
-  HTableDescriptor getTableDesc() {
-    return this.regionInfo.getTableDesc();
-  }
-
-  /** @return HLog in use for this region */
-  public HLog getLog() {
-    return this.log;
-  }
-
-  /** @return Configuration object */
-  HBaseConfiguration getConf() {
-    return this.conf;
-  }
-
-  /** @return region directory Path */
-  Path getRegionDir() {
-    return this.regiondir;
-  }
-
-  /** @return FileSystem being used by this region */
-  FileSystem getFilesystem() {
-    return this.fs;
-  }
-
-  /** @return the last time the region was flushed */
-  public long getLastFlushTime() {
-    return this.lastFlushTime;
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // HRegion maintenance.  
-  //
-  // These methods are meant to be called periodically by the HRegionServer for 
-  // upkeep.
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * @return returns size of largest HStore.  Also returns whether store is
-   * splitable or not (Its not splitable if region has a store that has a
-   * reference store file).
-   */
-  HStore.HStoreSize largestHStore(Text midkey) {
-    HStore.HStoreSize biggest = null;
-    boolean splitable = true;
-    for(HStore h: stores.values()) {
-      HStore.HStoreSize size = h.size(midkey);
-      // If we came across a reference down in the store, then propagate
-      // fact that region is not splitable.
-      if (splitable) {
-        splitable = size.splitable;
-      }
-      if (biggest == null) {
-        biggest = size;
-        continue;
-      }
-      if(size.getAggregate() > biggest.getAggregate()) { // Largest so far
-        biggest = size;
-      }
-    }
-    if (biggest != null) {
-      biggest.setSplitable(splitable);
-    }
-    return biggest;
-  }
-  
-  /*
-   * Split the HRegion to create two brand-new ones.  This also closes
-   * current HRegion.  Split should be fast since we don't rewrite store files
-   * but instead create new 'reference' store files that read off the top and
-   * bottom ranges of parent store files.
-   * @param listener May be null.
-   * @return two brand-new (and open) HRegions or null if a split is not needed
-   * @throws IOException
-   */
-  HRegion[] splitRegion(final RegionUnavailableListener listener)
-    throws IOException {
-    synchronized (splitLock) {
-      Text midKey = new Text();
-      if (closed.get() || !needsSplit(midKey)) {
-        return null;
-      }
-      Path splits = new Path(this.regiondir, SPLITDIR);
-      if(!this.fs.exists(splits)) {
-        this.fs.mkdirs(splits);
-      }
-      HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
-          this.regionInfo.getStartKey(), midKey);
-      Path dirA = new Path(splits, regionAInfo.getEncodedName());
-      if(fs.exists(dirA)) {
-        throw new IOException("Cannot split; target file collision at " + dirA);
-      }
-      HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
-          midKey, this.regionInfo.getEndKey());
-      Path dirB = new Path(splits, regionBInfo.getEncodedName());
-      if(this.fs.exists(dirB)) {
-        throw new IOException("Cannot split; target file collision at " + dirB);
-      }
-
-      // Now close the HRegion.  Close returns all store files or null if not
-      // supposed to close (? What to do in this case? Implement abort of close?)
-      // Close also does wait on outstanding rows and calls a flush just-in-case.
-      List<HStoreFile> hstoreFilesToSplit = close(false, listener);
-      if (hstoreFilesToSplit == null) {
-        LOG.warn("Close came back null (Implement abort of close?)");
-        throw new RuntimeException("close returned empty vector of HStoreFiles");
-      }
-
-      // Tell listener that region is now closed and that they can therefore
-      // clean up any outstanding references.
-      if (listener != null) {
-        listener.closed(this.getRegionName());
-      }
-
-      // Split each store file.
-      for(HStoreFile h: hstoreFilesToSplit) {
-        // A reference to the bottom half of the hsf store file.
-        HStoreFile.Reference aReference = new HStoreFile.Reference(
-            this.regionInfo.getEncodedName(), h.getFileId(),
-            new HStoreKey(midKey), HStoreFile.Range.bottom);
-        HStoreFile a = new HStoreFile(this.conf, fs, splits,
-            regionAInfo.getEncodedName(), h.getColFamily(), -1, aReference);
-        // Reference to top half of the hsf store file.
-        HStoreFile.Reference bReference = new HStoreFile.Reference(
-            this.regionInfo.getEncodedName(), h.getFileId(),
-            new HStoreKey(midKey), HStoreFile.Range.top);
-        HStoreFile b = new HStoreFile(this.conf, fs, splits,
-            regionBInfo.getEncodedName(), h.getColFamily(), -1, bReference);
-        h.splitStoreFile(a, b, this.fs);
-      }
-
-      // Done!
-      // Opening the region copies the splits files from the splits directory
-      // under each region.
-      HRegion regionA =
-        new HRegion(basedir, log, fs, conf, regionAInfo, dirA, null);
-      regionA.close();
-      HRegion regionB =
-        new HRegion(basedir, log, fs, conf, regionBInfo, dirB, null);
-      regionB.close();
-
-      // Cleanup
-      boolean deleted = fs.delete(splits);    // Get rid of splits directory
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cleaned up " + splits.toString() + " " + deleted);
-      }
-      HRegion regions[] = new HRegion [] {regionA, regionB};
-      return regions;
-    }
-  }
-  
-  /*
-   * Iterates through all the HStores and finds the one with the largest
-   * MapFile size. If the size is greater than the (currently hard-coded)
-   * threshold, returns true indicating that the region should be split. The
-   * midKey for the largest MapFile is returned through the midKey parameter.
-   * It is possible for us to rule the region non-splitable even in excess of
-   * configured size.  This happens if region contains a reference file.  If
-   * a reference file, the region can not be split.
-   * 
-   * Note that there is no need to do locking in this method because it calls
-   * largestHStore which does the necessary locking.
-   * 
-   * @param midKey midKey of the largest MapFile
-   * @return true if the region should be split. midKey is set by this method.
-   * Check it for a midKey value on return.
-   */
-  boolean needsSplit(Text midKey) {
-    HStore.HStoreSize biggest = largestHStore(midKey);
-    if (biggest == null || midKey.getLength() == 0 || 
-      (midKey.equals(getStartKey()) && midKey.equals(getEndKey())) ) {
-      return false;
-    }
-    boolean split = (biggest.getAggregate() >= this.desiredMaxFileSize);
-    if (split) {
-      if (!biggest.isSplitable()) {
-        LOG.warn("Region " + getRegionName().toString() +
-            " is NOT splitable though its aggregate size is " +
-            StringUtils.humanReadableInt(biggest.getAggregate()) +
-            " and desired size is " +
-            StringUtils.humanReadableInt(this.desiredMaxFileSize));
-        split = false;
-      } else {
-        LOG.info("Splitting " + getRegionName().toString() +
-            " because largest aggregate size is " +
-            StringUtils.humanReadableInt(biggest.getAggregate()) +
-            " and desired size is " +
-            StringUtils.humanReadableInt(this.desiredMaxFileSize));
-      }
-    }
-    return split;
-  }
-  
-  /**
-   * Only do a compaction if it is necessary
-   * 
-   * @return
-   * @throws IOException
-   */
-  boolean compactIfNeeded() throws IOException {
-    boolean needsCompaction = false;
-    for (HStore store: stores.values()) {
-      if (store.needsCompaction()) {
-        needsCompaction = true;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(store.toString() + " needs compaction");
-        }
-        break;
-      }
-    }
-    if (!needsCompaction) {
-      return false;
-    }
-    return compactStores();
-  }
-  
-  /*
-   * @param dir
-   * @return compaction directory for the passed in <code>dir</code>
-   */
-  static Path getCompactionDir(final Path dir) {
-   return new Path(dir, "compaction.dir");
-  }
-
-  /*
-   * Do preparation for pending compaction.
-   * Clean out any vestiges of previous failed compactions.
-   * @throws IOException
-   */
-  private void doRegionCompactionPrep() throws IOException {
-    doRegionCompactionCleanup();
-  }
-  
-  /*
-   * Removes the compaction directory for this Store.
-   * @throws IOException
-   */
-  private void doRegionCompactionCleanup() throws IOException {
-    if (this.fs.exists(this.regionCompactionDir)) {
-      this.fs.delete(this.regionCompactionDir);
-    }
-  }
-  
-  /**
-   * Compact all the stores.  This should be called periodically to make sure 
-   * the stores are kept manageable.  
-   *
-   * <p>This operation could block for a long time, so don't call it from a 
-   * time-sensitive thread.
-   *
-   * @return Returns TRUE if the compaction has completed.  FALSE, if the
-   * compaction was not carried out, because the HRegion is busy doing
-   * something else storage-intensive (like flushing the cache). The caller
-   * should check back later.
-   * 
-   * Note that no locking is necessary at this level because compaction only
-   * conflicts with a region split, and that cannot happen because the region
-   * server does them sequentially and not in parallel.
-   */
-  boolean compactStores() throws IOException {
-    if (this.closed.get()) {
-      return false;
-    }
-    try {
-      synchronized (writestate) {
-        if (!writestate.compacting && writestate.writesEnabled) {
-          writestate.compacting = true;
-        } else {
-          LOG.info("NOT compacting region " +
-              this.regionInfo.getRegionName().toString() + ": compacting=" +
-              writestate.compacting + ", writesEnabled=" +
-              writestate.writesEnabled);
-            return false;
-        }
-      }
-      long startTime = System.currentTimeMillis();
-      LOG.info("starting compaction on region " +
-        this.regionInfo.getRegionName().toString());
-      boolean status = true;
-      doRegionCompactionPrep();
-      for (HStore store : stores.values()) {
-        if(!store.compact()) {
-          status = false;
-        }
-      }
-      doRegionCompactionCleanup();
-      LOG.info("compaction completed on region " +
-        this.regionInfo.getRegionName().toString() + ". Took " +
-        StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
-      return status;
-      
-    } finally {
-      synchronized (writestate) {
-        writestate.compacting = false;
-        writestate.notifyAll();
-      }
-    }
-  }
-
-  /**
-   * Flush the cache.
-   * 
-   * When this method is called the cache will be flushed unless:
-   * <ol>
-   *   <li>the cache is empty</li>
-   *   <li>the region is closed.</li>
-   *   <li>a flush is already in progress</li>
-   *   <li>writes are disabled</li>
-   * </ol>
-   *
-   * <p>This method may block for some time, so it should not be called from a 
-   * time-sensitive thread.
-   * 
-   * @return true if cache was flushed
-   * 
-   * @throws IOException
-   * @throws DroppedSnapshotException Thrown when replay of hlog is required
-   * because a Snapshot was not properly persisted.
-   */
-  boolean flushcache() throws IOException {
-    if (this.closed.get()) {
-      return false;
-    }
-    synchronized (writestate) {
-      if (!writestate.flushing && writestate.writesEnabled) {
-        writestate.flushing = true;
-      } else {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("NOT flushing memcache for region " +
-              this.regionInfo.getRegionName() + ", flushing=" +
-              writestate.flushing + ", writesEnabled=" +
-              writestate.writesEnabled);
-        }
-        return false;  
-      }
-    }
-    try {
-      lock.readLock().lock();                      // Prevent splits and closes
-      try {
-        long startTime = -1;
-        synchronized (updateLock) {// Stop updates while we snapshot the memcaches
-          startTime = snapshotMemcaches();
-        }
-        return internalFlushcache(startTime);
-      } finally {
-        lock.readLock().unlock();
-      }
-    } finally {
-      synchronized (writestate) {
-        writestate.flushing = false;
-        writestate.notifyAll();
-      }
-    }
-  }
-
-  /*
-   * It is assumed that updates are blocked for the duration of this method
-   */
-  private long snapshotMemcaches() {
-    if (this.memcacheSize.get() == 0) {
-      return -1;
-    }
-    long startTime = System.currentTimeMillis();
-    
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Started memcache flush for region " +
-        this.regionInfo.getRegionName() + ". Size " +
-        StringUtils.humanReadableInt(this.memcacheSize.get()));
-    }
-
-    // We reset the aggregate memcache size here so that subsequent updates
-    // will add to the unflushed size
-    
-    this.memcacheSize.set(0L);
-    
-    for (HStore hstore: stores.values()) {
-      hstore.snapshotMemcache();
-    }
-    return startTime;
-  }
-
-  /**
-   * Flushing the cache is a little tricky. We have a lot of updates in the
-   * HMemcache, all of which have also been written to the log. We need to
-   * write those updates in the HMemcache out to disk, while being able to
-   * process reads/writes as much as possible during the flush operation. Also,
-   * the log has to state clearly the point in time at which the HMemcache was
-   * flushed. (That way, during recovery, we know when we can rely on the
-   * on-disk flushed structures and when we have to recover the HMemcache from
-   * the log.)
-   * 
-   * <p>So, we have a three-step process:
-   * 
-   * <ul><li>A. Flush the memcache to the on-disk stores, noting the current
-   * sequence ID for the log.<li>
-   * 
-   * <li>B. Write a FLUSHCACHE-COMPLETE message to the log, using the sequence
-   * ID that was current at the time of memcache-flush.</li>
-   * 
-   * <li>C. Get rid of the memcache structures that are now redundant, as
-   * they've been flushed to the on-disk HStores.</li>
-   * </ul>
-   * <p>This method is protected, but can be accessed via several public
-   * routes.
-   * 
-   * <p> This method may block for some time.
-   * 
-   * @param startTime the time the cache was snapshotted or -1 if a flush is
-   * not needed
-   * 
-   * @return true if the cache was flushed
-   * 
-   * @throws IOException
-   * @throws DroppedSnapshotException Thrown when replay of hlog is required
-   * because a Snapshot was not properly persisted.
-   */
-  private boolean internalFlushcache(long startTime) throws IOException {
-    if (startTime == -1) {
-      return false;
-    }
-
-    // We pass the log to the HMemcache, so we can lock down both
-    // simultaneously.  We only have to do this for a moment: we need the
-    // HMemcache state at the time of a known log sequence number. Since
-    // multiple HRegions may write to a single HLog, the sequence numbers may
-    // zoom past unless we lock it.
-    //
-    // When execution returns from snapshotMemcacheForLog() with a non-NULL
-    // value, the HMemcache will have a snapshot object stored that must be
-    // explicitly cleaned up using a call to deleteSnapshot() or by calling
-    // abort.
-    //
-    long sequenceId = log.startCacheFlush();
-
-    // Any failure from here on out will be catastrophic requiring server
-    // restart so hlog content can be replayed and put back into the memcache.
-    // Otherwise, the snapshot content while backed up in the hlog, it will not
-    // be part of the current running servers state.
-
-    try {
-      // A.  Flush memcache to all the HStores.
-      // Keep running vector of all store files that includes both old and the
-      // just-made new flush store file.
-      
-      for (HStore hstore: stores.values()) {
-        hstore.flushCache(sequenceId);
-      }
-    } catch (IOException e) {
-      // An exception here means that the snapshot was not persisted.
-      // The hlog needs to be replayed so its content is restored to memcache.
-      // Currently, only a server restart will do this.
-      this.log.abortCacheFlush();
-      throw new DroppedSnapshotException(e.getMessage());
-    }
-
-    // If we get to here, the HStores have been written. If we get an
-    // error in completeCacheFlush it will release the lock it is holding
-
-    // B.  Write a FLUSHCACHE-COMPLETE message to the log.
-    //     This tells future readers that the HStores were emitted correctly,
-    //     and that all updates to the log for this regionName that have lower 
-    //     log-sequence-ids can be safely ignored.
-    this.log.completeCacheFlush(this.regionInfo.getRegionName(),
-        regionInfo.getTableDesc().getName(), sequenceId);
-
-    // D. Finally notify anyone waiting on memcache to clear:
-    // e.g. checkResources().
-    synchronized (this) {
-      notifyAll();
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Finished memcache flush for region " +
-          this.regionInfo.getRegionName() + " in " +
-          (System.currentTimeMillis() - startTime) + "ms, sequenceid=" +
-          sequenceId);
-    }
-    return true;
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // get() methods for client use.
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Fetch a single data item.
-   * @param row
-   * @param column
-   * @return column value
-   * @throws IOException
-   */
-  public byte [] get(Text row, Text column) throws IOException {
-    byte [][] results = get(row, column, Long.MAX_VALUE, 1);
-    return (results == null || results.length == 0)? null: results[0];
-  }
-  
-  /**
-   * Fetch multiple versions of a single data item
-   * 
-   * @param row
-   * @param column
-   * @param numVersions
-   * @return array of values one element per version
-   * @throws IOException
-   */
-  public byte [][] get(Text row, Text column, int numVersions) throws IOException {
-    return get(row, column, Long.MAX_VALUE, numVersions);
-  }
-
-  /**
-   * Fetch multiple versions of a single data item, with timestamp.
-   *
-   * @param row
-   * @param column
-   * @param timestamp
-   * @param numVersions
-   * @return array of values one element per version that matches the timestamp
-   * @throws IOException
-   */
-  public byte [][] get(Text row, Text column, long timestamp, int numVersions) 
-    throws IOException {
-    
-    if (this.closed.get()) {
-      throw new IOException("Region " + this.getRegionName().toString() +
-        " closed");
-    }
-
-    // Make sure this is a valid row and valid column
-    checkRow(row);
-    checkColumn(column);
-
-    // Don't need a row lock for a simple get
-    
-    HStoreKey key = new HStoreKey(row, column, timestamp);
-    HStore targetStore = stores.get(HStoreKey.extractFamily(column));
-    return targetStore.get(key, numVersions);
-  }
-
-  /**
-   * Fetch all the columns for the indicated row.
-   * Returns a TreeMap that maps column names to values.
-   *
-   * We should eventually use Bloom filters here, to reduce running time.  If 
-   * the database has many column families and is very sparse, then we could be 
-   * checking many files needlessly.  A small Bloom for each row would help us 
-   * determine which column groups are useful for that row.  That would let us 
-   * avoid a bunch of disk activity.
-   *
-   * @param row
-   * @return Map<columnName, byte[]> values
-   * @throws IOException
-   */
-  public Map<Text, byte []> getFull(Text row) throws IOException {
-    return getFull(row, HConstants.LATEST_TIMESTAMP);
-  }
-
-  /**
-   * Fetch all the columns for the indicated row at a specified timestamp.
-   * Returns a TreeMap that maps column names to values.
-   *
-   * We should eventually use Bloom filters here, to reduce running time.  If 
-   * the database has many column families and is very sparse, then we could be 
-   * checking many files needlessly.  A small Bloom for each row would help us 
-   * determine which column groups are useful for that row.  That would let us 
-   * avoid a bunch of disk activity.
-   *
-   * @param row
-   * @param ts
-   * @return Map<columnName, byte[]> values
-   * @throws IOException
-   */
-  public Map<Text, byte []> getFull(Text row, long ts) throws IOException {
-    HStoreKey key = new HStoreKey(row, ts);
-    obtainRowLock(row);
-    try {
-      TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
-      for (Text colFamily: stores.keySet()) {
-        HStore targetStore = stores.get(colFamily);
-        targetStore.getFull(key, result);
-      }
-      return result;
-    } finally {
-      releaseRowLock(row);
-    }
-  }
-
-  /**
-   * Return all the data for the row that matches <i>row</i> exactly, 
-   * or the one that immediately preceeds it, at or immediately before 
-   * <i>ts</i>.
-   * 
-   * @param row row key
-   * @param ts
-   * @return map of values
-   * @throws IOException
-   */
-  public Map<Text, byte[]> getClosestRowBefore(final Text row, final long ts)
-  throws IOException{
-    // look across all the HStores for this region and determine what the
-    // closest key is across all column families, since the data may be sparse
-    
-    HStoreKey key = null;
-    checkRow(row);
-    lock.readLock().lock();
-    try {
-      // examine each column family for the preceeding or matching key
-      for(Text colFamily : stores.keySet()){
-        HStore store = stores.get(colFamily);
-
-        // get the closest key
-        Text closestKey = store.getRowKeyAtOrBefore(row, ts);
-        
-        // if it happens to be an exact match, we can stop looping
-        if (row.equals(closestKey)) {
-          key = new HStoreKey(closestKey, ts);
-          break;
-        }
-
-        // otherwise, we need to check if it's the max and move to the next
-        if (closestKey != null 
-          && (key == null || closestKey.compareTo(key.getRow()) > 0) ) {
-          key = new HStoreKey(closestKey, ts);
-        }
-      }
-
-      if (key == null) {
-        return null;
-      }
-          
-      // now that we've found our key, get the values
-      TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
-      for (Text colFamily: stores.keySet()) {
-        HStore targetStore = stores.get(colFamily);
-        targetStore.getFull(key, result);
-      }
-      
-      return result;
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Get <code>versions</code> keys matching the origin key's
-   * row/column/timestamp and those of an older vintage
-   * Default access so can be accessed out of {@link HRegionServer}.
-   * @param origin Where to start searching.
-   * @param versions How many versions to return. Pass
-   * {@link HConstants.ALL_VERSIONS} to retrieve all.
-   * @return Ordered list of <code>versions</code> keys going from newest back.
-   * @throws IOException
-   */
-  private List<HStoreKey> getKeys(final HStoreKey origin, final int versions)
-    throws IOException {
-
-    List<HStoreKey> keys = null;
-    Text colFamily = HStoreKey.extractFamily(origin.getColumn());
-    HStore targetStore = stores.get(colFamily);
-    if (targetStore != null) {
-      // Pass versions without modification since in the store getKeys, it
-      // includes the size of the passed <code>keys</code> array when counting.
-      keys = targetStore.getKeys(origin, versions);
-    }
-    return keys;
-  }
-  
-  /**
-   * Return an iterator that scans over the HRegion, returning the indicated 
-   * columns for only the rows that match the data filter.  This Iterator must
-   * be closed by the caller.
-   *
-   * @param cols columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param firstRow row which is the starting point of the scan
-   * @param timestamp only return rows whose timestamp is <= this value
-   * @param filter row filter
-   * @return HScannerInterface
-   * @throws IOException
-   */
-  public HScannerInterface getScanner(Text[] cols, Text firstRow,
-      long timestamp, RowFilterInterface filter) throws IOException {
-    lock.readLock().lock();
-    try {
-      if (this.closed.get()) {
-        throw new IOException("Region " + this.getRegionName().toString() +
-          " closed");
-      }
-      TreeSet<Text> families = new TreeSet<Text>();
-      for(int i = 0; i < cols.length; i++) {
-        families.add(HStoreKey.extractFamily(cols[i]));
-      }
-      List<HStore> storelist = new ArrayList<HStore>();
-      for (Text family: families) {
-        HStore s = stores.get(family);
-        if (s == null) {
-          continue;
-        }
-        storelist.add(stores.get(family));
-        
-      }
-      return new HScanner(cols, firstRow, timestamp,
-        storelist.toArray(new HStore [storelist.size()]), filter);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // set() methods for client use.
-  //////////////////////////////////////////////////////////////////////////////
-  
-  /**
-   * @param timestamp
-   * @param b
-   * @throws IOException
-   */
-  public void batchUpdate(long timestamp, BatchUpdate b)
-    throws IOException {
-    // Do a rough check that we have resources to accept a write.  The check is
-    // 'rough' in that between the resource check and the call to obtain a 
-    // read lock, resources may run out.  For now, the thought is that this
-    // will be extremely rare; we'll deal with it when it happens.
-    checkResources();
-
-    // We obtain a per-row lock, so other clients will block while one client
-    // performs an update. The read lock is released by the client calling
-    // #commit or #abort or if the HRegionServer lease on the lock expires.
-    // See HRegionServer#RegionListener for how the expire on HRegionServer
-    // invokes a HRegion#abort.
-    Text row = b.getRow();
-    long lockid = obtainRowLock(row);
-
-    long commitTime =
-      (timestamp == LATEST_TIMESTAMP) ? System.currentTimeMillis() : timestamp;
-      
-    try {
-      List<Text> deletes = null;
-      for (BatchOperation op: b) {
-        HStoreKey key = new HStoreKey(row, op.getColumn(), commitTime);
-        byte[] val = null;
-        if (op.isPut()) {
-          val = op.getValue();
-          if (HLogEdit.isDeleted(val)) {
-            throw new IOException("Cannot insert value: " + val);
-          }
-        } else {
-          if (timestamp == LATEST_TIMESTAMP) {
-            // Save off these deletes
-            if (deletes == null) {
-              deletes = new ArrayList<Text>();
-            }
-            deletes.add(op.getColumn());
-          } else {
-            val = HLogEdit.deleteBytes.get();
-          }
-        }
-        if (val != null) {
-          localput(lockid, key, val);
-        }
-      }
-      TreeMap<HStoreKey, byte[]> edits =
-        this.targetColumns.remove(Long.valueOf(lockid));
-      if (edits != null && edits.size() > 0) {
-        update(edits);
-      }
-      
-      if (deletes != null && deletes.size() > 0) {
-        // We have some LATEST_TIMESTAMP deletes to run.
-        for (Text column: deletes) {
-          deleteMultiple(row, column, LATEST_TIMESTAMP, 1);
-        }
-      }
-
-    } catch (IOException e) {
-      this.targetColumns.remove(Long.valueOf(lockid));
-      throw e;
-      
-    } finally {
-      releaseRowLock(row);
-    }
-  }
-  
-  /*
-   * Check if resources to support an update.
-   * 
-   * For now, just checks memcache saturation.
-   * 
-   * Here we synchronize on HRegion, a broad scoped lock.  Its appropriate
-   * given we're figuring in here whether this region is able to take on
-   * writes.  This is only method with a synchronize (at time of writing),
-   * this and the synchronize on 'this' inside in internalFlushCache to send
-   * the notify.
-   */
-  private synchronized void checkResources() {
-    boolean blocked = false;
-    
-    while (this.memcacheSize.get() >= this.blockingMemcacheSize) {
-      if (!blocked) {
-        LOG.info("Blocking updates for '" + Thread.currentThread().getName() +
-            "': Memcache size " +
-            StringUtils.humanReadableInt(this.memcacheSize.get()) +
-            " is >= than blocking " +
-            StringUtils.humanReadableInt(this.blockingMemcacheSize) + " size");
-      }
-
-      blocked = true;
-      try {
-        wait(threadWakeFrequency);
-      } catch (InterruptedException e) {
-        // continue;
-      }
-    }
-    if (blocked) {
-      LOG.info("Unblocking updates for region " + getRegionName() + " '" + 
-        Thread.currentThread().getName() + "'");
-    }
-  }
-  
-  /**
-   * Delete all cells of the same age as the passed timestamp or older.
-   * @param row
-   * @param column
-   * @param ts Delete all entries that have this timestamp or older
-   * @throws IOException
-   */
-  public void deleteAll(final Text row, final Text column, final long ts)
-    throws IOException {
-    
-    checkColumn(column);
-    obtainRowLock(row);
-    try {
-      deleteMultiple(row, column, ts, ALL_VERSIONS);
-    } finally {
-      releaseRowLock(row);
-    }
-  }
-
-  /**
-   * Delete all cells of the same age as the passed timestamp or older.
-   * @param row
-   * @param ts Delete all entries that have this timestamp or older
-   * @throws IOException
-   */
-  public void deleteAll(final Text row, final long ts)
-    throws IOException {
-    
-    obtainRowLock(row);    
-    
-    try {
-      for(Map.Entry<Text, HStore> store : stores.entrySet()){
-        List<HStoreKey> keys = store.getValue().getKeys(new HStoreKey(row, ts), ALL_VERSIONS);
-
-        TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
-        for (HStoreKey key: keys) {
-          edits.put(key, HLogEdit.deleteBytes.get());
-        }
-        update(edits);
-      }
-    } finally {
-      releaseRowLock(row);
-    }
-  }
-
-  /**
-   * Delete all cells for a row with matching column family with timestamps
-   * less than or equal to <i>timestamp</i>.
-   *
-   * @param row The row to operate on
-   * @param family The column family to match
-   * @param timestamp Timestamp to match
-   * @throws IOException
-   */
-  public void deleteFamily(Text row, Text family, long timestamp)
-  throws IOException{
-    obtainRowLock(row);    
-    
-    try {
-      // find the HStore for the column family
-      HStore store = stores.get(HStoreKey.extractFamily(family));
-      // find all the keys that match our criteria
-      List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp), ALL_VERSIONS);
-      
-      // delete all the cells
-      TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
-      for (HStoreKey key: keys) {
-        edits.put(key, HLogEdit.deleteBytes.get());
-      }
-      update(edits);
-    } finally {
-      releaseRowLock(row);
-    }
-  }
-  
-  /**
-   * Delete one or many cells.
-   * Used to support {@link #deleteAll(Text, Text, long)} and deletion of
-   * latest cell.
-   * 
-   * @param row
-   * @param column
-   * @param ts Timestamp to start search on.
-   * @param versions How many versions to delete. Pass
-   * {@link HConstants#ALL_VERSIONS} to delete all.
-   * @throws IOException
-   */
-  private void deleteMultiple(final Text row, final Text column, final long ts,
-      final int versions) throws IOException {
-    
-    HStoreKey origin = new HStoreKey(row, column, ts);
-    List<HStoreKey> keys = getKeys(origin, versions);
-    if (keys.size() > 0) {
-      TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
-      for (HStoreKey key: keys) {
-        edits.put(key, HLogEdit.deleteBytes.get());
-      }
-      update(edits);
-    }
-  }
-  
-  /**
-   * Private implementation.
-   * 
-   * localput() is used for both puts and deletes. We just place the values
-   * into a per-row pending area, until a commit() or abort() call is received.
-   * (Or until the user's write-lock expires.)
-   * 
-   * @param lockid
-   * @param key 
-   * @param val Value to enter into cell
-   * @throws IOException
-   */
-  private void localput(final long lockid, final HStoreKey key, 
-      final byte [] val) throws IOException {
-    
-    checkColumn(key.getColumn());
-    Long lid = Long.valueOf(lockid);
-    TreeMap<HStoreKey, byte []> targets = this.targetColumns.get(lid);
-    if (targets == null) {
-      targets = new TreeMap<HStoreKey, byte []>();
-      this.targetColumns.put(lid, targets);
-    }
-    targets.put(key, val);
-  }
-
-  /* 
-   * Add updates first to the hlog and then add values to memcache.
-   * Warning: Assumption is caller has lock on passed in row.
-   * @param row Row to update.
-   * @param timestamp Timestamp to record the updates against
-   * @param updatesByColumn Cell updates by column
-   * @throws IOException
-   */
-  private void update(final TreeMap<HStoreKey, byte []> updatesByColumn)
-    throws IOException {
-    
-    if (updatesByColumn == null || updatesByColumn.size() <= 0) {
-      return;
-    }
-    synchronized (updateLock) {                         // prevent a cache flush
-      this.log.append(regionInfo.getRegionName(),
-          regionInfo.getTableDesc().getName(), updatesByColumn);
-
-      long size = 0;
-      for (Map.Entry<HStoreKey, byte[]> e: updatesByColumn.entrySet()) {
-        HStoreKey key = e.getKey();
-        byte[] val = e.getValue();
-        size = this.memcacheSize.addAndGet(key.getSize() +
-            (val == null ? 0 : val.length));
-        stores.get(HStoreKey.extractFamily(key.getColumn())).add(key, val);
-      }
-      if (this.flushListener != null && size > this.memcacheFlushSize) {
-        // Request a cache flush
-        this.flushListener.flushRequested(this);
-      }
-    }
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Support code
-  //////////////////////////////////////////////////////////////////////////////
-
-  /** Make sure this is a valid row for the HRegion */
-  private void checkRow(Text row) throws IOException {
-    if(((regionInfo.getStartKey().getLength() == 0)
-        || (regionInfo.getStartKey().compareTo(row) <= 0))
-        && ((regionInfo.getEndKey().getLength() == 0)
-            || (regionInfo.getEndKey().compareTo(row) > 0))) {
-      // all's well
-      
-    } else {
-      throw new WrongRegionException("Requested row out of range for " +
-        "HRegion " + regionInfo.getRegionName() + ", startKey='" +
-        regionInfo.getStartKey() + "', getEndKey()='" + regionInfo.getEndKey() +
-        "', row='" + row + "'");
-    }
-  }
-  
-  /**
-   * Make sure this is a valid column for the current table
-   * @param columnName
-   * @throws IOException
-   */
-  private void checkColumn(Text columnName) throws IOException {
-    Text family = HStoreKey.extractFamily(columnName, true);
-    if (!regionInfo.getTableDesc().hasFamily(family)) {
-      throw new IOException("Requested column family " + family 
-          + " does not exist in HRegion " + regionInfo.getRegionName()
-          + " for table " + regionInfo.getTableDesc().getName());
-    }
-  }
-
-  /**
-   * Obtain a lock on the given row.  Blocks until success.
-   *
-   * I know it's strange to have two mappings:
-   * <pre>
-   *   ROWS  ==> LOCKS
-   * </pre>
-   * as well as
-   * <pre>
-   *   LOCKS ==> ROWS
-   * </pre>
-   *
-   * But it acts as a guard on the client; a miswritten client just can't
-   * submit the name of a row and start writing to it; it must know the correct
-   * lockid, which matches the lock list in memory.
-   * 
-   * <p>It would be more memory-efficient to assume a correctly-written client, 
-   * which maybe we'll do in the future.
-   * 
-   * @param row Name of row to lock.
-   * @throws IOException
-   * @return The id of the held lock.
-   */
-  long obtainRowLock(Text row) throws IOException {
-    checkRow(row);
-    lock.readLock().lock();
-    try {
-      if (this.closed.get()) {
-        throw new IOException("Region " + this.getRegionName().toString() +
-          " closed");
-      }
-      synchronized (rowsToLocks) {
-        while (rowsToLocks.get(row) != null) {
-          try {
-            rowsToLocks.wait();
-          } catch (InterruptedException ie) {
-            // Empty
-          }
-        }
-        Long lid = Long.valueOf(Math.abs(rand.nextLong()));
-        rowsToLocks.put(row, lid);
-        locksToRows.put(lid, row);
-        rowsToLocks.notifyAll();
-        return lid.longValue();
-      }
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-  
-  Text getRowFromLock(long lockid) {
-    // Pattern is that all access to rowsToLocks and/or to
-    // locksToRows is via a lock on rowsToLocks.
-    synchronized (rowsToLocks) {
-      return locksToRows.get(Long.valueOf(lockid));
-    }
-  }
-  
-  /** 
-   * Release the row lock!
-   * @param row Name of row whose lock we are to release
-   */
-  void releaseRowLock(Text row) {
-    synchronized (rowsToLocks) {
-      long lockid = rowsToLocks.remove(row).longValue();
-      locksToRows.remove(Long.valueOf(lockid));
-      rowsToLocks.notifyAll();
-    }
-  }
-  
-  private void waitOnRowLocks() {
-    synchronized (rowsToLocks) {
-      while (this.rowsToLocks.size() > 0) {
-        LOG.debug("waiting for " + this.rowsToLocks.size() + " row locks");
-        try {
-          this.rowsToLocks.wait();
-        } catch (InterruptedException e) {
-          // Catch. Let while test determine loop-end.
-        }
-      }
-    }
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return regionInfo.getRegionName().toString();
-  }
-  
-  private Path getBaseDir() {
-    return this.basedir;
-  }
-
-  /**
-   * HScanner is an iterator through a bunch of rows in an HRegion.
-   */
-  private class HScanner implements HScannerInterface {
-    private HInternalScannerInterface[] scanners;
-    private TreeMap<Text, byte []>[] resultSets;
-    private HStoreKey[] keys;
-
-    /** Create an HScanner with a handle on many HStores. */
-    @SuppressWarnings("unchecked")
-    HScanner(Text[] cols, Text firstRow, long timestamp, HStore[] stores,
-        RowFilterInterface filter)
-    throws IOException {
-      this.scanners = new HInternalScannerInterface[stores.length];
-      try {
-        for (int i = 0; i < stores.length; i++) {
-          // TODO: The cols passed in here can include columns from other
-          // stores; add filter so only pertinent columns are passed.
-          //
-          // Also, if more than one store involved, need to replicate filters.
-          // At least WhileMatchRowFilter will mess up the scan if only
-          // one shared across many rows. See HADOOP-2467.
-          scanners[i] = stores[i].getScanner(timestamp, cols, firstRow,
-            (i > 0 && filter != null)?
-              (RowFilterInterface)WritableUtils.clone(filter, conf): filter);
-        }
-      } catch(IOException e) {
-        for (int i = 0; i < this.scanners.length; i++) {
-          if(scanners[i] != null) {
-            closeScanner(i);
-          }
-        }
-        throw e;
-      }
-
-      // Advance to the first key in each store.
-      // All results will match the required column-set and scanTime.
-      this.resultSets = new TreeMap[scanners.length];
-      this.keys = new HStoreKey[scanners.length];
-      for (int i = 0; i < scanners.length; i++) {
-        keys[i] = new HStoreKey();
-        resultSets[i] = new TreeMap<Text, byte []>();
-        if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
-          closeScanner(i);
-        }
-      }
-
-      // As we have now successfully completed initialization, increment the
-      // activeScanner count.
-      activeScannerCount.incrementAndGet();
-    }
-
-    /** {@inheritDoc} */
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
-    throws IOException {
-      boolean moreToFollow = false;
-
-      // Find the lowest-possible key.
-
-      Text chosenRow = null;
-      long chosenTimestamp = -1;
-      for (int i = 0; i < this.keys.length; i++) {
-        if (scanners[i] != null &&
-            (chosenRow == null ||
-                (keys[i].getRow().compareTo(chosenRow) < 0) ||
-                ((keys[i].getRow().compareTo(chosenRow) == 0) &&
-                    (keys[i].getTimestamp() > chosenTimestamp)))) {
-          chosenRow = new Text(keys[i].getRow());
-          chosenTimestamp = keys[i].getTimestamp();
-        }
-      }
-
-      // Store the key and results for each sub-scanner. Merge them as
-      // appropriate.
-      if (chosenTimestamp >= 0) {
-        // Here we are setting the passed in key with current row+timestamp
-        key.setRow(chosenRow);
-        key.setVersion(chosenTimestamp);
-        key.setColumn(HConstants.EMPTY_TEXT);
-
-        for (int i = 0; i < scanners.length; i++) {
-          if (scanners[i] != null && keys[i].getRow().compareTo(chosenRow) == 0) {
-            // NOTE: We used to do results.putAll(resultSets[i]);
-            // but this had the effect of overwriting newer
-            // values with older ones. So now we only insert
-            // a result if the map does not contain the key.
-            for (Map.Entry<Text, byte[]> e : resultSets[i].entrySet()) {
-              if (!results.containsKey(e.getKey())) {
-                results.put(e.getKey(), e.getValue());
-              }
-            }
-            resultSets[i].clear();
-            if (!scanners[i].next(keys[i], resultSets[i])) {
-              closeScanner(i);
-            }
-          }
-        }
-      }
-
-      for (int i = 0; i < scanners.length; i++) {
-        // If the current scanner is non-null AND has a lower-or-equal
-        // row label, then its timestamp is bad. We need to advance it.
-        while ((scanners[i] != null) &&
-            (keys[i].getRow().compareTo(chosenRow) <= 0)) {
-          resultSets[i].clear();
-          if (!scanners[i].next(keys[i], resultSets[i])) {
-            closeScanner(i);
-          }
-        }
-      }
-
-      moreToFollow = chosenTimestamp >= 0;
-      if (results == null || results.size() <= 0) {
-        // If we got no results, then there is no more to follow.
-        moreToFollow = false;
-      }
-
-      // Make sure scanners closed if no more results
-      if (!moreToFollow) {
-        for (int i = 0; i < scanners.length; i++) {
-          if (null != scanners[i]) {
-            closeScanner(i);
-          }
-        }
-      }
-      return moreToFollow;
-    }
-
-    
-    /** Shut down a single scanner */
-    void closeScanner(int i) {
-      try {
-        try {
-          scanners[i].close();
-        } catch (IOException e) {
-          LOG.warn("Failed closeing scanner " + i, e);
-        }
-      } finally {
-        scanners[i] = null;
-        resultSets[i] = null;
-        keys[i] = null;
-      }
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public void close() {
-      try {
-        for(int i = 0; i < scanners.length; i++) {
-          if(scanners[i] != null) {
-            closeScanner(i);
-          }
-        }
-      } finally {
-        synchronized (activeScannerCount) {
-          int count = activeScannerCount.decrementAndGet();
-          if (count < 0) {
-            LOG.error("active scanner count less than zero: " + count +
-                " resetting to zero");
-            activeScannerCount.set(0);
-            count = 0;
-          }
-          if (count == 0) {
-            activeScannerCount.notifyAll();
-          }
-        }
-      }
-    }
-
-    /** {@inheritDoc} */
-    public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
-      throw new UnsupportedOperationException("Unimplemented serverside. " +
-        "next(HStoreKey, StortedMap(...) is more efficient");
-    }
-  }
-  
-  // Utility methods
-
-  /**
-   * Convenience method creating new HRegions. Used by createTable and by the
-   * bootstrap code in the HMaster constructor.
-   * Note, this method creates an {@link HLog} for the created region. It
-   * needs to be closed explicitly.  Use {@link HRegion#getLog()} to get
-   * access.
-   * @param info Info for region to create.
-   * @param rootDir Root directory for HBase instance
-   * @param conf
-   * @return new HRegion
-   * 
-   * @throws IOException
-   */
-  static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
-      final HBaseConfiguration conf) throws IOException {
-    Path tableDir =
-      HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName());
-    Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
-    FileSystem fs = FileSystem.get(conf);
-    fs.mkdirs(regionDir);
-    return new HRegion(tableDir,
-      new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
-      fs, conf, info, null, null);
-  }
-  
-  /**
-   * Inserts a new region's meta information into the passed
-   * <code>meta</code> region. Used by the HMaster bootstrap code adding
-   * new table to ROOT table.
-   * 
-   * @param meta META HRegion to be updated
-   * @param r HRegion to add to <code>meta</code>
-   *
-   * @throws IOException
-   * @see {@link #removeRegionFromMETA(HRegion, HRegion)}
-   */
-  static void addRegionToMETA(HRegion meta, HRegion r) throws IOException {
-    meta.checkResources();
-    // The row key is the region name
-    Text row = r.getRegionName();
-    meta.obtainRowLock(row);
-    try {
-      HStoreKey key =
-        new HStoreKey(row, COL_REGIONINFO, System.currentTimeMillis());
-      TreeMap<HStoreKey, byte[]> edits = new TreeMap<HStoreKey, byte[]>();
-      edits.put(key, Writables.getBytes(r.getRegionInfo()));
-      meta.update(edits);
-    
-    } finally {
-      meta.releaseRowLock(row);
-    }
-  }
-
-  /**
-   * Delete a region's meta information from the passed
-   * <code>meta</code> region.
-   * 
-   * @param srvr META server to be updated
-   * @param metaRegionName Meta region name
-   * @param regionNmae HRegion to remove from <code>meta</code>
-   *
-   * @throws IOException
-   * @see {@link #addRegionToMETA(HRegion, HRegion)}
-   */
-  static void removeRegionFromMETA(final HRegionInterface srvr,
-      final Text metaRegionName, final Text regionName)
-  throws IOException {
-    BatchUpdate b = new BatchUpdate(rand.nextLong());
-    long lockid = b.startUpdate(regionName);
-    for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
-      b.delete(lockid, ALL_META_COLUMNS[i]);
-    }
-    srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
-  }
-
-  /**
-   * Utility method used by HMaster marking regions offlined.
-   * @param srvr META server to be updated
-   * @param metaRegionName Meta region name
-   * @param info HRegion to update in <code>meta</code>
-   *
-   * @throws IOException
-   * @see {@link #addRegionToMETA(HRegion, HRegion)}
-   */
-  static void offlineRegionInMETA(final HRegionInterface srvr,
-      final Text metaRegionName, final HRegionInfo info)
-  throws IOException {
-    BatchUpdate b = new BatchUpdate(rand.nextLong());
-    long lockid = b.startUpdate(info.getRegionName());
-    info.setOffline(true);
-    b.put(lockid, COL_REGIONINFO, Writables.getBytes(info));
-    b.delete(lockid, COL_SERVER);
-    b.delete(lockid, COL_STARTCODE);
-    // If carrying splits, they'll be in place when we show up on new
-    // server.
-    srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
-  }
-
-  /**
-   * Deletes all the files for a HRegion
-   * 
-   * @param fs the file system object
-   * @param rootdir qualified path of HBase root directory
-   * @param info HRegionInfo for region to be deleted
-   * @throws IOException
-   * @return True if deleted.
-   */
-  static boolean deleteRegion(FileSystem fs, Path rootdir, HRegionInfo info)
-  throws IOException {
-    Path p = HRegion.getRegionDir(rootdir, info);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETING region " + p.toString());
-    }
-    return fs.delete(p);
-  }
-
-  /**
-   * Computes the Path of the HRegion
-   * 
-   * @param tabledir qualified path for table
-   * @param name region file name ENCODED!
-   * @return Path of HRegion directory
-   * @see HRegionInfo#encodeRegionName(Text)
-   */
-  static Path getRegionDir(final Path tabledir, final String name) {
-    return new Path(tabledir, name);
-  }
-  
-  /**
-   * Computes the Path of the HRegion
-   * 
-   * @param rootdir qualified path of HBase root directory
-   * @param info HRegionInfo for the region
-   * @return qualified path of region directory
-   */
-  public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
-    return new Path(
-        HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()),
-        info.getEncodedName()
-    );
-  }
-}

+ 0 - 333
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java

@@ -1,333 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-
-import org.apache.hadoop.hbase.util.JenkinsHash;
-
-/**
- * HRegion information.
- * Contains HRegion id, start and end keys, a reference to this
- * HRegions' table descriptor, etc.
- */
-public class HRegionInfo implements WritableComparable {
-  /**
-   * @param regionName
-   * @return the encodedName
-   */
-  public static String encodeRegionName(final Text regionName) {
-    return String.valueOf(Math.abs(
-        JenkinsHash.hash(regionName.getBytes(), regionName.getLength(), 0)));
-  }
-
-  /** delimiter used between portions of a region name */
-  private static final String DELIMITER = ",";
-
-  /** HRegionInfo for root region */
-  public static final HRegionInfo rootRegionInfo =
-    new HRegionInfo(0L, HTableDescriptor.rootTableDesc);
-
-  /** HRegionInfo for first meta region */
-  public static final HRegionInfo firstMetaRegionInfo =
-    new HRegionInfo(1L, HTableDescriptor.metaTableDesc);
-  
-  /**
-   * Extracts table name prefix from a region name.
-   * Presumes region names are ASCII characters only.
-   * @param regionName A region name.
-   * @return The table prefix of a region name.
-   */
-  public static Text getTableNameFromRegionName(final Text regionName) {
-    int offset = regionName.find(DELIMITER);
-    if (offset == -1) {
-      throw new IllegalArgumentException(regionName.toString() + " does not " +
-        "contain '" + DELIMITER + "' character");
-    }
-    byte [] tableName = new byte[offset];
-    System.arraycopy(regionName.getBytes(), 0, tableName, 0, offset);
-    return new Text(tableName);
-  }
-
-  private Text endKey;
-  private boolean offLine;
-  private long regionId;
-  private Text regionName;
-  private boolean split;
-  private Text startKey;
-  private HTableDescriptor tableDesc;
-  private int hashCode;
-  private transient String encodedName = null;
-  
-  private void setHashCode() {
-    int result = this.regionName.hashCode();
-    result ^= this.regionId;
-    result ^= this.startKey.hashCode();
-    result ^= this.endKey.hashCode();
-    result ^= Boolean.valueOf(this.offLine).hashCode();
-    result ^= this.tableDesc.hashCode();
-    this.hashCode = result;
-  }
-  
-  /** Used to construct the HRegionInfo for the root and first meta regions */
-  private HRegionInfo(long regionId, HTableDescriptor tableDesc) {
-    this.regionId = regionId;
-    this.tableDesc = tableDesc;
-    this.endKey = new Text();
-    this.offLine = false;
-    this.regionName = new Text(tableDesc.getName().toString() + DELIMITER +
-        DELIMITER + regionId);
-    this.split = false;
-    this.startKey = new Text();
-    setHashCode();
-  }
-
-  /** Default constructor - creates empty object */
-  public HRegionInfo() {
-    this.endKey = new Text();
-    this.offLine = false;
-    this.regionId = 0;
-    this.regionName = new Text();
-    this.split = false;
-    this.startKey = new Text();
-    this.tableDesc = new HTableDescriptor();
-    this.hashCode = 0;
-  }
-  
-  /**
-   * Construct HRegionInfo with explicit parameters
-   * 
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(HTableDescriptor tableDesc, Text startKey, Text endKey)
-    throws IllegalArgumentException {
-    this(tableDesc, startKey, endKey, false);
-  }
-
-  /**
-   * Construct HRegionInfo with explicit parameters
-   * 
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @param split true if this region has split and we have daughter regions
-   * regions that may or may not hold references to this region.
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(HTableDescriptor tableDesc, Text startKey, Text endKey,
-      final boolean split) throws IllegalArgumentException {
-
-    if(tableDesc == null) {
-      throw new IllegalArgumentException("tableDesc cannot be null");
-    }
-
-    this.endKey = new Text();
-    if(endKey != null) {
-      this.endKey.set(endKey);
-    }
-    
-    this.offLine = false;
-    this.regionId = System.currentTimeMillis();
-    
-    this.regionName = new Text(tableDesc.getName().toString() + DELIMITER +
-        (startKey == null ? "" : startKey.toString()) + DELIMITER +
-        regionId);
-      
-    this.split = split;
-
-    this.startKey = new Text();
-    if(startKey != null) {
-      this.startKey.set(startKey);
-    }
-    
-    this.tableDesc = tableDesc;
-    setHashCode();
-  }
-  
-  /** @return the endKey */
-  public Text getEndKey(){
-    return endKey;
-  }
-
-  /** @return the regionId */
-  public long getRegionId(){
-    return regionId;
-  }
-
-  /** @return the regionName */
-  public Text getRegionName(){
-    return regionName;
-  }
-  
-  /** @return the encoded region name */
-  public synchronized String getEncodedName() {
-    if (encodedName == null) {
-      encodedName = encodeRegionName(regionName);
-    }
-    return encodedName;
-  }
-
-  /** @return the startKey */
-  public Text getStartKey(){
-    return startKey;
-  }
-
-  /** @return the tableDesc */
-  public HTableDescriptor getTableDesc(){
-    return tableDesc;
-  }
-  
-  /** @return true if this is the root region */
-  public boolean isRootRegion() {
-    return this.tableDesc.isRootRegion();
-  }
-  
-  /** @return true if this is the meta table */
-  public boolean isMetaTable() {
-    return this.tableDesc.isMetaTable();
-  }
-
-  /** @return true if this region is a meta region */
-  public boolean isMetaRegion() {
-    return this.tableDesc.isMetaRegion();
-  }
-  
-  /**
-   * @return True if has been split and has daughters.
-   */
-  public boolean isSplit() {
-    return this.split;
-  }
-  
-  /**
-   * @param split set split status
-   */
-  public void setSplit(boolean split) {
-    this.split = split;
-  }
-
-  /**
-   * @return True if this region is offline.
-   */
-  public boolean isOffline() {
-    return this.offLine;
-  }
-
-  /**
-   * @param offLine set online - offline status
-   */
-  public void setOffline(boolean offLine) {
-    this.offLine = offLine;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    return "regionname: " + this.regionName.toString() + ", startKey: <" +
-      this.startKey.toString() + ">, endKey: <" + this.endKey.toString() + 
-      ">, encodedName: " + getEncodedName() + "," +
-      (isOffline()? " offline: true,": "") + (isSplit()? " split: true,": "") +
-      " tableDesc: {" + this.tableDesc.toString() + "}";
-  }
-    
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean equals(Object o) {
-    return this.compareTo(o) == 0;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int hashCode() {
-    return this.hashCode;
-  }
-
-  //
-  // Writable
-  //
-
-  /**
-   * {@inheritDoc}
-   */
-  public void write(DataOutput out) throws IOException {
-    endKey.write(out);
-    out.writeBoolean(offLine);
-    out.writeLong(regionId);
-    regionName.write(out);
-    out.writeBoolean(split);
-    startKey.write(out);
-    tableDesc.write(out);
-    out.writeInt(hashCode);
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  public void readFields(DataInput in) throws IOException {
-    this.endKey.readFields(in);
-    this.offLine = in.readBoolean();
-    this.regionId = in.readLong();
-    this.regionName.readFields(in);
-    this.split = in.readBoolean();
-    this.startKey.readFields(in);
-    this.tableDesc.readFields(in);
-    this.hashCode = in.readInt();
-  }
-  
-  //
-  // Comparable
-  //
-  
-  /**
-   * {@inheritDoc}
-   */
-  public int compareTo(Object o) {
-    HRegionInfo other = (HRegionInfo) o;
-    
-    // Are regions of same table?
-    int result = this.tableDesc.compareTo(other.tableDesc);
-    if (result != 0) {
-      return result;
-    }
-
-    // Compare start keys.
-    result = this.startKey.compareTo(other.startKey);
-    if (result != 0) {
-      return result;
-    }
-    
-    // Compare end keys.
-    return this.endKey.compareTo(other.endKey);
-  }
-}

+ 0 - 229
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInterface.java

@@ -1,229 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.VersionedProtocol;
-
-/**
- * Clients interact with HRegionServers using a handle to the HRegionInterface.
- */
-public interface HRegionInterface extends VersionedProtocol {
-  /** initial version */
-  public static final long versionID = 1L;
-
-  /** 
-   * Get metainfo about an HRegion
-   * 
-   * @param regionName name of the region
-   * @return HRegionInfo object for region
-   * @throws NotServingRegionException
-   */
-  public HRegionInfo getRegionInfo(final Text regionName)
-  throws NotServingRegionException;
-
-  /**
-   * Retrieve a single value from the specified region for the specified row
-   * and column keys
-   * 
-   * @param regionName name of region
-   * @param row row key
-   * @param column column key
-   * @return alue for that region/row/column
-   * @throws IOException
-   */
-  public byte [] get(final Text regionName, final Text row, final Text column)
-  throws IOException;
-
-  /**
-   * Get the specified number of versions of the specified row and column
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @param column column key
-   * @param numVersions number of versions to return
-   * @return array of values
-   * @throws IOException
-   */
-  public byte [][] get(final Text regionName, final Text row,
-    final Text column, final int numVersions)
-  throws IOException;
-  
-  /**
-   * Get the specified number of versions of the specified row and column with
-   * the specified timestamp.
-   *
-   * @param regionName region name
-   * @param row row key
-   * @param column column key
-   * @param timestamp timestamp
-   * @param numVersions number of versions to return
-   * @return array of values
-   * @throws IOException
-   */
-  public byte [][] get(final Text regionName, final Text row,
-      final Text column, final long timestamp, final int numVersions)
-  throws IOException;
-  
-  /**
-   * Get all the data for the specified row
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @return map of values
-   * @throws IOException
-   */
-  public HbaseMapWritable getRow(final Text regionName, final Text row)
-  throws IOException;
-
-  /**
-   * Get all the data for the specified row at a given timestamp
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @return map of values
-   * @throws IOException
-   */
-  public HbaseMapWritable getRow(final Text regionName, final Text row, final long ts)
-  throws IOException;
-
-  /**
-   * Return all the data for the row that matches <i>row</i> exactly, 
-   * or the one that immediately preceeds it.
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @return map of values
-   * @throws IOException
-   */
-  public HbaseMapWritable getClosestRowBefore(final Text regionName, final Text row)
-  throws IOException;
-
-  /**
-   * Return all the data for the row that matches <i>row</i> exactly, 
-   * or the one that immediately preceeds it, at or immediately before 
-   * <i>ts</i>.
-   * 
-   * @param regionName region name
-   * @param row row key
-   * @return map of values
-   * @throws IOException
-   */
-  public HbaseMapWritable getClosestRowBefore(final Text regionName, 
-    final Text row, final long ts)
-  throws IOException;
-
-  /**
-   * Applies a batch of updates via one RPC
-   * 
-   * @param regionName name of the region to update
-   * @param timestamp the time to be associated with the changes
-   * @param b BatchUpdate
-   * @throws IOException
-   */
-  public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
-  throws IOException;
-  
-  /**
-   * Delete all cells that match the passed row and column and whose
-   * timestamp is equal-to or older than the passed timestamp.
-   *
-   * @param regionName region name
-   * @param row row key
-   * @param column column key
-   * @param timestamp Delete all entries that have this timestamp or older
-   * @throws IOException
-   */
-  public void deleteAll(Text regionName, Text row, Text column, long timestamp)
-  throws IOException;
-
-  /**
-   * Delete all cells that match the passed row and whose
-   * timestamp is equal-to or older than the passed timestamp.
-   *
-   * @param regionName region name
-   * @param row row key
-   * @param timestamp Delete all entries that have this timestamp or older
-   * @throws IOException
-   */
-  public void deleteAll(Text regionName, Text row, long timestamp)
-  throws IOException;
-
-  /**
-   * Delete all cells for a row with matching column family with timestamps
-   * less than or equal to <i>timestamp</i>.
-   *
-   * @param regionName The name of the region to operate on
-   * @param row The row to operate on
-   * @param family The column family to match
-   * @param timestamp Timestamp to match
-   */
-  public void deleteFamily(Text regionName, Text row, Text family, 
-    long timestamp)
-  throws IOException;
-
-  
-  //
-  // remote scanner interface
-  //
-
-  /**
-   * Opens a remote scanner with a RowFilter.
-   * 
-   * @param regionName name of region to scan
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex for column family name. A column name is judged to be
-   * regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row to scan
-   * @param timestamp only return values whose timestamp is <= this value
-   * @param filter RowFilter for filtering results at the row-level.
-   *
-   * @return scannerId scanner identifier used in other calls
-   * @throws IOException
-   */
-  public long openScanner(Text regionName, Text[] columns, Text startRow,
-      long timestamp, RowFilterInterface filter)
-  throws IOException;
-
-  /**
-   * Get the next set of values
-   * 
-   * @param scannerId clientId passed to openScanner
-   * @return map of values
-   * @throws IOException
-   */
-  public HbaseMapWritable next(long scannerId) throws IOException;
-  
-  /**
-   * Close a scanner
-   * 
-   * @param scannerId the scanner id returned by openScanner
-   * @throws IOException
-   */
-  public void close(long scannerId) throws IOException;
-}

+ 0 - 94
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionLocation.java

@@ -1,94 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-/**
- * Contains the HRegionInfo for the region and the HServerAddress for the
- * HRegionServer serving the region
- */
-@SuppressWarnings("unchecked")
-public class HRegionLocation implements Comparable {
-  private HRegionInfo regionInfo;
-  private HServerAddress serverAddress;
-
-  /**
-   * Constructor
-   * 
-   * @param regionInfo the HRegionInfo for the region
-   * @param serverAddress the HServerAddress for the region server
-   */
-  public HRegionLocation(HRegionInfo regionInfo, HServerAddress serverAddress) {
-    this.regionInfo = regionInfo;
-    this.serverAddress = serverAddress;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    return "address: " + this.serverAddress.toString() + ", regioninfo: " +
-      this.regionInfo;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean equals(Object o) {
-    return this.compareTo(o) == 0;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int hashCode() {
-    int result = this.regionInfo.hashCode();
-    result ^= this.serverAddress.hashCode();
-    return result;
-  }
-  
-  /** @return HRegionInfo */
-  public HRegionInfo getRegionInfo(){
-    return regionInfo;
-  }
-
-  /** @return HServerAddress */
-  public HServerAddress getServerAddress(){
-    return serverAddress;
-  }
-
-  //
-  // Comparable
-  //
-  
-  /**
-   * {@inheritDoc}
-   */
-  public int compareTo(Object o) {
-    HRegionLocation other = (HRegionLocation) o;
-    int result = this.regionInfo.compareTo(other.regionInfo);
-    if(result == 0) {
-      result = this.serverAddress.compareTo(other.serverAddress);
-    }
-    return result;
-  }
-}

+ 0 - 1770
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java

@@ -1,1770 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.lang.Thread.UncaughtExceptionHandler;
-import java.lang.reflect.Constructor;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Delayed;
-import java.util.concurrent.DelayQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.ipc.HbaseRPC;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.InfoServer;
-import org.apache.hadoop.hbase.util.Sleeper;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * HRegionServer makes a set of HRegions available to clients.  It checks in with
- * the HMaster. There are many HRegionServers in a single HBase deployment.
- */
-public class HRegionServer implements HConstants, HRegionInterface, Runnable {
-  static final Log LOG = LogFactory.getLog(HRegionServer.class);
-  
-  // Set when a report to the master comes back with a message asking us to
-  // shutdown.  Also set by call to stop when debugging or running unit tests
-  // of HRegionServer in isolation. We use AtomicBoolean rather than
-  // plain boolean so we can pass a reference to Chore threads.  Otherwise,
-  // Chore threads need to know about the hosting class.
-  protected volatile AtomicBoolean stopRequested = new AtomicBoolean(false);
-  
-  protected volatile AtomicBoolean quiesced = new AtomicBoolean(false);
-  
-  // Go down hard.  Used if file system becomes unavailable and also in
-  // debugging and unit tests.
-  protected volatile boolean abortRequested;
-  
-  // If false, the file system has become unavailable
-  protected volatile boolean fsOk;
-  
-  protected final HServerInfo serverInfo;
-  protected final HBaseConfiguration conf;
-  private FileSystem fs;
-  private Path rootDir;
-  private final Random rand = new Random();
-  
-  // region name -> HRegion
-  protected volatile SortedMap<Text, HRegion> onlineRegions =
-    Collections.synchronizedSortedMap(new TreeMap<Text, HRegion>());
-  protected volatile Map<Text, HRegion> retiringRegions =
-    new ConcurrentHashMap<Text, HRegion>();
- 
-  protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-  private volatile List<HMsg> outboundMsgs =
-    Collections.synchronizedList(new ArrayList<HMsg>());
-
-  final int numRetries;
-  protected final int threadWakeFrequency;
-  private final int msgInterval;
-  private final int serverLeaseTimeout;
-
-  // Remote HMaster
-  private HMasterRegionInterface hbaseMaster;
-
-  // Server to handle client requests.  Default access so can be accessed by
-  // unit tests.
-  final Server server;
-  
-  // Leases
-  private final Leases leases;
-  
-  // Request counter
-  private volatile AtomicInteger requestCount = new AtomicInteger();
-  
-  // A sleeper that sleeps for msgInterval.
-  private final Sleeper sleeper;
-
-  // Info server.  Default access so can be used by unit tests.  REGIONSERVER
-  // is name of the webapp and the attribute name used stuffing this instance
-  // into web context.
-  InfoServer infoServer;
-  
-  /** region server process name */
-  public static final String REGIONSERVER = "regionserver";
-  
-  /**
-   * Thread to shutdown the region server in an orderly manner.  This thread
-   * is registered as a shutdown hook in the HRegionServer constructor and is
-   * only called when the HRegionServer receives a kill signal.
-   */
-  class ShutdownThread extends Thread {
-    private final HRegionServer instance;
-    
-    /**
-     * @param instance
-     */
-    public ShutdownThread(HRegionServer instance) {
-      this.instance = instance;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public void run() {
-      LOG.info("Starting shutdown thread.");
-      
-      // tell the region server to stop and wait for it to complete
-      instance.stop();
-      instance.join();
-      LOG.info("Shutdown thread complete");
-    }    
-    
-  }
-
-  /** Queue entry passed to flusher, compactor and splitter threads */
-  class QueueEntry implements Delayed {
-    private final HRegion region;
-    private long expirationTime;
-
-    QueueEntry(HRegion region, long expirationTime) {
-      this.region = region;
-      this.expirationTime = expirationTime;
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public boolean equals(Object o) {
-      QueueEntry other = (QueueEntry) o;
-      return this.hashCode() == other.hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public int hashCode() {
-      return this.region.getRegionInfo().hashCode();
-    }
-
-    /** {@inheritDoc} */
-    public long getDelay(TimeUnit unit) {
-      return unit.convert(this.expirationTime - System.currentTimeMillis(),
-          TimeUnit.MILLISECONDS);
-    }
-
-    /** {@inheritDoc} */
-    public int compareTo(Delayed o) {
-      long delta = this.getDelay(TimeUnit.MILLISECONDS) -
-        o.getDelay(TimeUnit.MILLISECONDS);
-
-      int value = 0;
-      if (delta > 0) {
-        value = 1;
-        
-      } else if (delta < 0) {
-        value = -1;
-      }
-      return value;
-    }
-
-    /** @return the region */
-    public HRegion getRegion() {
-      return region;
-    }
-
-    /** @param expirationTime the expirationTime to set */
-    public void setExpirationTime(long expirationTime) {
-      this.expirationTime = expirationTime;
-    }
-  }
-
-  // Compactions
-  final CompactSplitThread compactSplitThread;
-  // Needed during shutdown so we send an interrupt after completion of a
-  // compaction, not in the midst.
-  final Integer compactSplitLock = new Integer(0);
-
-  /** Compact region on request and then run split if appropriate
-   */
-  private class CompactSplitThread extends Thread
-  implements RegionUnavailableListener {
-    private HTable root = null;
-    private HTable meta = null;
-    private long startTime;
-    private final long frequency;
-    
-    private final BlockingQueue<QueueEntry> compactionQueue =
-      new LinkedBlockingQueue<QueueEntry>();
-
-    /** constructor */
-    public CompactSplitThread() {
-      super();
-      this.frequency =
-        conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency",
-        20 * 1000);
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public void run() {
-      while (!stopRequested.get()) {
-        QueueEntry e = null;
-        try {
-          e = compactionQueue.poll(this.frequency, TimeUnit.MILLISECONDS);
-          if (e == null) {
-            continue;
-          }
-          e.getRegion().compactIfNeeded();
-          split(e.getRegion());
-        } catch (InterruptedException ex) {
-          continue;
-        } catch (IOException ex) {
-          LOG.error("Compaction failed" +
-              (e != null ? (" for region " + e.getRegion().getRegionName()) : ""),
-              RemoteExceptionHandler.checkIOException(ex));
-          if (!checkFileSystem()) {
-            break;
-          }
-
-        } catch (Exception ex) {
-          LOG.error("Compaction failed" +
-              (e != null ? (" for region " + e.getRegion().getRegionName()) : ""),
-              ex);
-          if (!checkFileSystem()) {
-            break;
-          }
-        }
-      }
-      LOG.info(getName() + " exiting");
-    }
-    
-    /**
-     * @param e QueueEntry for region to be compacted
-     */
-    public void compactionRequested(QueueEntry e) {
-      compactionQueue.add(e);
-    }
-    
-    void compactionRequested(final HRegion r) {
-      compactionRequested(new QueueEntry(r, System.currentTimeMillis()));
-    }
-    
-    private void split(final HRegion region) throws IOException {
-      final HRegionInfo oldRegionInfo = region.getRegionInfo();
-      final HRegion[] newRegions = region.splitRegion(this);
-      if (newRegions == null) {
-        // Didn't need to be split
-        return;
-      }
-      
-      // When a region is split, the META table needs to updated if we're
-      // splitting a 'normal' region, and the ROOT table needs to be
-      // updated if we are splitting a META region.
-      HTable t = null;
-      if (region.getRegionInfo().isMetaTable()) {
-        // We need to update the root region
-        if (this.root == null) {
-          this.root = new HTable(conf, ROOT_TABLE_NAME);
-        }
-        t = root;
-      } else {
-        // For normal regions we need to update the meta region
-        if (meta == null) {
-          meta = new HTable(conf, META_TABLE_NAME);
-        }
-        t = meta;
-      }
-      LOG.info("Updating " + t.getTableName() + " with region split info");
-
-      // Mark old region as offline and split in META.
-      // NOTE: there is no need for retry logic here. HTable does it for us.
-      long lockid = t.startUpdate(oldRegionInfo.getRegionName());
-      oldRegionInfo.setOffline(true);
-      oldRegionInfo.setSplit(true);
-      t.put(lockid, COL_REGIONINFO, Writables.getBytes(oldRegionInfo));
-      t.put(lockid, COL_SPLITA, Writables.getBytes(
-        newRegions[0].getRegionInfo()));
-      t.put(lockid, COL_SPLITB, Writables.getBytes(
-        newRegions[1].getRegionInfo()));
-      t.commit(lockid);
-      
-      // Add new regions to META
-      for (int i = 0; i < newRegions.length; i++) {
-        lockid = t.startUpdate(newRegions[i].getRegionName());
-        t.put(lockid, COL_REGIONINFO, Writables.getBytes(
-          newRegions[i].getRegionInfo()));
-        t.commit(lockid);
-      }
-          
-      // Now tell the master about the new regions
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Reporting region split to master");
-      }
-      reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
-        newRegions[1].getRegionInfo());
-      LOG.info("region split, META updated, and report to master all" +
-        " successful. Old region=" + oldRegionInfo.getRegionName() +
-        ", new regions: " + newRegions[0].getRegionName() + ", " +
-        newRegions[1].getRegionName() + ". Split took " +
-        StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
-      
-      // Do not serve the new regions. Let the Master assign them.
-    }
-    
-    /** {@inheritDoc} */
-    public void closing(final Text regionName) {
-      startTime = System.currentTimeMillis();
-      lock.writeLock().lock();
-      try {
-        // Remove region from regions Map and add it to the Map of retiring
-        // regions.
-        retiringRegions.put(regionName, onlineRegions.remove(regionName));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(regionName.toString() + " closing (" +
-            "Adding to retiringRegions)");
-        }
-      } finally {
-        lock.writeLock().unlock();
-      }
-    }
-    
-    /** {@inheritDoc} */
-    public void closed(final Text regionName) {
-      lock.writeLock().lock();
-      try {
-        retiringRegions.remove(regionName);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(regionName.toString() + " closed");
-        }
-      } finally {
-        lock.writeLock().unlock();
-      }
-    }
-  }
-  
-  // Cache flushing  
-  final Flusher cacheFlusher;
-  // Needed during shutdown so we send an interrupt after completion of a
-  // flush, not in the midst.
-  final Integer cacheFlusherLock = new Integer(0);
-  
-  /** Flush cache upon request */
-  class Flusher extends Thread implements CacheFlushListener {
-    private final DelayQueue<QueueEntry> flushQueue =
-      new DelayQueue<QueueEntry>();
-
-    private final long optionalFlushPeriod;
-    
-    /** constructor */
-    public Flusher() {
-      super();
-      this.optionalFlushPeriod = conf.getLong(
-        "hbase.regionserver.optionalcacheflushinterval", 30 * 60 * 1000L);
-
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public void run() {
-      while (!stopRequested.get()) {
-        QueueEntry e = null;
-        try {
-          e = flushQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
-          if (e == null) {
-            continue;
-          }
-          synchronized(cacheFlusherLock) { // Don't interrupt while we're working
-            if (e.getRegion().flushcache()) {
-              compactSplitThread.compactionRequested(e);
-            }
-              
-            e.setExpirationTime(System.currentTimeMillis() +
-                optionalFlushPeriod);
-            flushQueue.add(e);
-          }
-          
-          // Now insure that all the active regions are in the queue
-          
-          Set<HRegion> regions = getRegionsToCheck();
-          for (HRegion r: regions) {
-            e = new QueueEntry(r, r.getLastFlushTime() + optionalFlushPeriod);
-            synchronized (flushQueue) {
-              if (!flushQueue.contains(e)) {
-                flushQueue.add(e);
-              }
-            }
-          }
-
-          // Now make sure that the queue only contains active regions
-
-          synchronized (flushQueue) {
-            for (Iterator<QueueEntry> i = flushQueue.iterator(); i.hasNext();  ) {
-              e = i.next();
-              if (!regions.contains(e.getRegion())) {
-                i.remove();
-              }
-            }
-          }
-        } catch (InterruptedException ex) {
-          continue;
-
-        } catch (ConcurrentModificationException ex) {
-          continue;
-
-        } catch (DroppedSnapshotException ex) {
-          // Cache flush can fail in a few places.  If it fails in a critical
-          // section, we get a DroppedSnapshotException and a replay of hlog
-          // is required. Currently the only way to do this is a restart of
-          // the server.
-          LOG.fatal("Replay of hlog required. Forcing server restart", ex);
-          if (!checkFileSystem()) {
-            break;
-          }
-          HRegionServer.this.stop();
-
-        } catch (IOException ex) {
-          LOG.error("Cache flush failed" +
-              (e != null ? (" for region " + e.getRegion().getRegionName()) : ""),
-              RemoteExceptionHandler.checkIOException(ex));
-          if (!checkFileSystem()) {
-            break;
-          }
-
-        } catch (Exception ex) {
-          LOG.error("Cache flush failed" +
-              (e != null ? (" for region " + e.getRegion().getRegionName()) : ""),
-              ex);
-          if (!checkFileSystem()) {
-            break;
-          }
-        }
-      }
-      flushQueue.clear();
-      LOG.info(getName() + " exiting");
-    }
-    
-    /** {@inheritDoc} */
-    public void flushRequested(HRegion region) {
-      QueueEntry e = new QueueEntry(region, System.currentTimeMillis());
-      synchronized (flushQueue) {
-        if (flushQueue.contains(e)) {
-          flushQueue.remove(e);
-        }
-        flushQueue.add(e);
-      }
-    }
-  }
-
-  // HLog and HLog roller.  log is protected rather than private to avoid
-  // eclipse warning when accessed by inner classes
-  protected HLog log;
-  final LogRoller logRoller;
-  final Integer logRollerLock = new Integer(0);
-  
-  /** Runs periodically to determine if the HLog should be rolled */
-  class LogRoller extends Thread implements LogRollListener {
-    private final Integer rollLock = new Integer(0);
-    private volatile boolean rollLog;
-    
-    /** constructor */
-    public LogRoller() {
-      super();
-      this.rollLog = false;
-    }
- 
-    /** {@inheritDoc} */
-    @Override
-    public void run() {
-      while (!stopRequested.get()) {
-        while (!rollLog && !stopRequested.get()) {
-          synchronized (rollLock) {
-            try {
-              rollLock.wait(threadWakeFrequency);
-
-            } catch (InterruptedException e) {
-              continue;
-            }
-          }
-        }
-        if (!rollLog) {
-          // There's only two reasons to break out of the while loop.
-          // 1. Log roll requested
-          // 2. Stop requested
-          // so if a log roll was not requested, continue and break out of loop
-          continue;
-        }
-        synchronized (logRollerLock) {
-          try {
-            LOG.info("Rolling hlog. Number of entries: " + log.getNumEntries());
-            log.rollWriter();
-            
-          } catch (IOException ex) {
-            LOG.error("Log rolling failed",
-              RemoteExceptionHandler.checkIOException(ex));
-            checkFileSystem();
-            
-          } catch (Exception ex) {
-            LOG.error("Log rolling failed", ex);
-            checkFileSystem();
-            
-          } finally {
-            rollLog = false;
-          }
-        }
-      }
-    }
-
-    /** {@inheritDoc} */
-    public void logRollRequested() {
-      synchronized (rollLock) {
-        rollLog = true;
-        rollLock.notifyAll();
-      }
-    }
-  }
-
-  /**
-   * Starts a HRegionServer at the default location
-   * @param conf
-   * @throws IOException
-   */
-  public HRegionServer(HBaseConfiguration conf) throws IOException {
-    this(new HServerAddress(conf.get(REGIONSERVER_ADDRESS,
-        DEFAULT_REGIONSERVER_ADDRESS)), conf);
-  }
-  
-  /**
-   * Starts a HRegionServer at the specified location
-   * @param address
-   * @param conf
-   * @throws IOException
-   */
-  public HRegionServer(HServerAddress address, HBaseConfiguration conf)
-  throws IOException {  
-    this.abortRequested = false;
-    this.fsOk = true;
-    this.conf = conf;
-
-    // Config'ed params
-    this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
-    this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
-    this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000);
-    this.serverLeaseTimeout =
-      conf.getInt("hbase.master.lease.period", 30 * 1000);
-
-    // Cache flushing thread.
-    this.cacheFlusher = new Flusher();
-    
-    // Compaction thread
-    this.compactSplitThread = new CompactSplitThread();
-    
-    // Log rolling thread
-    this.logRoller = new LogRoller();
-
-    // Task thread to process requests from Master
-    this.worker = new Worker();
-    this.workerThread = new Thread(worker);
-    this.sleeper = new Sleeper(this.msgInterval, this.stopRequested);
-    // Server to handle client requests
-    this.server = HbaseRPC.getServer(this, address.getBindAddress(), 
-      address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
-      false, conf);
-    this.serverInfo = new HServerInfo(new HServerAddress(
-      new InetSocketAddress(getThisIP(),
-      this.server.getListenerAddress().getPort())), System.currentTimeMillis(),
-      this.conf.getInt("hbase.regionserver.info.port", 60030));
-     this.leases = new Leases(
-       conf.getInt("hbase.regionserver.lease.period", 3 * 60 * 1000),
-       this.threadWakeFrequency);
-     
-     // Register shutdown hook for HRegionServer, runs an orderly shutdown
-     // when a kill signal is recieved
-     Runtime.getRuntime().addShutdownHook(new ShutdownThread(this));
-  }
-
-  /**
-   * The HRegionServer sticks in this loop until closed. It repeatedly checks
-   * in with the HMaster, sending heartbeats & reports, and receiving HRegion 
-   * load/unload instructions.
-   */
-  public void run() {
-    boolean quiesceRequested = false;
-    try {
-      init(reportForDuty());
-      long lastMsg = 0;
-      while(!stopRequested.get()) {
-        // Now ask master what it wants us to do and tell it what we have done
-        for (int tries = 0; !stopRequested.get();) {
-          long now = System.currentTimeMillis();
-          if (lastMsg != 0 && (now - lastMsg) >= serverLeaseTimeout) {
-            // It has been way too long since we last reported to the master.
-            // Commit suicide.
-            LOG.fatal("unable to report to master for " + (now - lastMsg) +
-                " milliseconds - aborting server");
-            abort();
-            break;
-          }
-          if ((now - lastMsg) >= msgInterval) {
-            HMsg outboundArray[] = null;
-            synchronized(outboundMsgs) {
-              outboundArray =
-                this.outboundMsgs.toArray(new HMsg[outboundMsgs.size()]);
-            }
-            this.outboundMsgs.clear();
-
-            try {
-              this.serverInfo.setLoad(new HServerLoad(requestCount.get(),
-                  onlineRegions.size()));
-              this.requestCount.set(0);
-              HMsg msgs[] =
-                this.hbaseMaster.regionServerReport(serverInfo, outboundArray);
-              lastMsg = System.currentTimeMillis();
-              
-              if (this.quiesced.get() && onlineRegions.size() == 0) {
-                // We've just told the master we're exiting because we aren't
-                // serving any regions. So set the stop bit and exit.
-                LOG.info("Server quiesced and not serving any regions. " +
-                    "Starting shutdown");
-                stopRequested.set(true);
-                continue;
-              }
-              
-              // Queue up the HMaster's instruction stream for processing
-              boolean restart = false;
-              for(int i = 0; i < msgs.length && !stopRequested.get() &&
-                  !restart; i++) {
-                switch(msgs[i].getMsg()) {
-                
-                case HMsg.MSG_CALL_SERVER_STARTUP:
-                  LOG.info("Got call server startup message");
-                  // We the MSG_CALL_SERVER_STARTUP on startup but we can also
-                  // get it when the master is panicing because for instance
-                  // the HDFS has been yanked out from under it.  Be wary of
-                  // this message.
-                  if (checkFileSystem()) {
-                    closeAllRegions();
-                    synchronized (logRollerLock) {
-                      try {
-                        log.closeAndDelete();
-
-                      } catch (Exception e) {
-                        LOG.error("error closing and deleting HLog", e);
-                      }
-                      try {
-                        serverInfo.setStartCode(System.currentTimeMillis());
-                        log = setupHLog();
-                      } catch (IOException e) {
-                        this.abortRequested = true;
-                        this.stopRequested.set(true);
-                        e = RemoteExceptionHandler.checkIOException(e); 
-                        LOG.fatal("error restarting server", e);
-                        break;
-                      }
-                    }
-                    reportForDuty();
-                    restart = true;
-                  } else {
-                    LOG.fatal("file system available check failed. " +
-                        "Shutting down server.");
-                  }
-                  break;
-
-                case HMsg.MSG_REGIONSERVER_STOP:
-                  LOG.info("Got regionserver stop message");
-                  stopRequested.set(true);
-                  break;
-                  
-                case HMsg.MSG_REGIONSERVER_QUIESCE:
-                  if (!quiesceRequested) {
-                    LOG.info("Got quiesce server message");
-                    try {
-                      toDo.put(new ToDoEntry(msgs[i]));
-                    } catch (InterruptedException e) {
-                      throw new RuntimeException("Putting into msgQueue was " +
-                        "interrupted.", e);
-                    }
-                    quiesceRequested = true;
-                  }
-                  break;
-
-                default:
-                  if (fsOk) {
-                    try {
-                      toDo.put(new ToDoEntry(msgs[i]));
-                    } catch (InterruptedException e) {
-                      throw new RuntimeException("Putting into msgQueue was " +
-                        "interrupted.", e);
-                    }
-                    if (msgs[i].getMsg() == HMsg.MSG_REGION_OPEN) {
-                      outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_PROCESS_OPEN,
-                          msgs[i].getRegionInfo()));
-                    }
-                  }
-                }
-              }
-              if (restart || this.stopRequested.get()) {
-                toDo.clear();
-                break;
-              }
-              // Reset tries count if we had a successful transaction.
-              tries = 0;
-            } catch (Exception e) {
-              if (e instanceof IOException) {
-                e = RemoteExceptionHandler.checkIOException((IOException) e);
-              }
-              if(tries < this.numRetries) {
-                LOG.warn("Processing message (Retry: " + tries + ")", e);
-                tries++;
-              } else {
-                LOG.fatal("Exceeded max retries: " + this.numRetries, e);
-                if (!checkFileSystem()) {
-                  continue;
-                }
-                // Something seriously wrong. Shutdown.
-                stop();
-              }
-            }
-          }
-          this.sleeper.sleep(lastMsg);
-        } // for
-      } // while (!stopRequested.get())
-    } catch (Throwable t) {
-      LOG.fatal("Unhandled exception. Aborting...", t);
-      abort();
-    }
-    this.leases.closeAfterLeasesExpire();
-    this.worker.stop();
-    this.server.stop();
-    if (this.infoServer != null) {
-      LOG.info("Stopping infoServer");
-      try {
-        this.infoServer.stop();
-      } catch (InterruptedException ex) {
-        ex.printStackTrace();
-      }
-    }
-
-    // Send interrupts to wake up threads if sleeping so they notice shutdown.
-    // TODO: Should we check they are alive?  If OOME could have exited already
-    synchronized(cacheFlusherLock) {
-      this.cacheFlusher.interrupt();
-    }
-    synchronized (compactSplitLock) {
-      this.compactSplitThread.interrupt();
-    }
-    synchronized (logRollerLock) {
-      this.logRoller.interrupt();
-    }
-
-    if (abortRequested) {
-      if (this.fsOk) {
-        // Only try to clean up if the file system is available
-        try {
-          this.log.close();
-          LOG.info("On abort, closed hlog");
-        } catch (IOException e) {
-          LOG.error("Unable to close log in abort",
-              RemoteExceptionHandler.checkIOException(e));
-        }
-        closeAllRegions(); // Don't leave any open file handles
-      }
-      LOG.info("aborting server at: " +
-        serverInfo.getServerAddress().toString());
-    } else {
-      ArrayList<HRegion> closedRegions = closeAllRegions();
-      try {
-        log.closeAndDelete();
-      } catch (IOException e) {
-        LOG.error("Close and delete failed",
-            RemoteExceptionHandler.checkIOException(e));
-      }
-      try {
-        HMsg[] exitMsg = new HMsg[closedRegions.size() + 1];
-        exitMsg[0] = new HMsg(HMsg.MSG_REPORT_EXITING);
-        // Tell the master what regions we are/were serving
-        int i = 1;
-        for(HRegion region: closedRegions) {
-          exitMsg[i++] = new HMsg(HMsg.MSG_REPORT_CLOSE,
-              region.getRegionInfo());
-        }
-
-        LOG.info("telling master that region server is shutting down at: " +
-            serverInfo.getServerAddress().toString());
-        hbaseMaster.regionServerReport(serverInfo, exitMsg);
-      } catch (IOException e) {
-        LOG.warn("Failed to send exiting message to master: ",
-            RemoteExceptionHandler.checkIOException(e));
-      }
-      LOG.info("stopping server at: " +
-        serverInfo.getServerAddress().toString());
-    }
-
-    join();
-    LOG.info(Thread.currentThread().getName() + " exiting");
-  }
-  
-  /*
-   * Run init. Sets up hlog and starts up all server threads.
-   * @param c Extra configuration.
-   */
-  private void init(final HbaseMapWritable c) throws IOException {
-    try {
-      for (Map.Entry<Writable, Writable> e: c.entrySet()) {
-        String key = e.getKey().toString();
-        String value = e.getValue().toString();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Config from master: " + key + "=" + value);
-        }
-        this.conf.set(key, value);
-      }
-      this.fs = FileSystem.get(this.conf);
-      this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
-      this.log = setupHLog();
-      startServiceThreads();
-    } catch (IOException e) {
-      this.stopRequested.set(true);
-      e = RemoteExceptionHandler.checkIOException(e); 
-      LOG.fatal("Failed init", e);
-      IOException ex = new IOException("region server startup failed");
-      ex.initCause(e);
-      throw ex;
-    }
-  }
-  
-  private HLog setupHLog() throws RegionServerRunningException,
-    IOException {
-    
-    Path logdir = new Path(rootDir, "log" + "_" + getThisIP() + "_" +
-        this.serverInfo.getStartCode() + "_" + 
-        this.serverInfo.getServerAddress().getPort());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Log dir " + logdir);
-    }
-    if (fs.exists(logdir)) {
-      throw new RegionServerRunningException("region server already " +
-        "running at " + this.serverInfo.getServerAddress().toString() +
-        " because logdir " + logdir.toString() + " exists");
-    }
-    return new HLog(fs, logdir, conf, logRoller);
-  }
-  
-  /*
-   * Start Chore Threads, Server, Worker and lease checker threads. Install an
-   * UncaughtExceptionHandler that calls abort of RegionServer if we get
-   * an unhandled exception.  We cannot set the handler on all threads.
-   * Server's internal Listener thread is off limits.  For Server, if an OOME,
-   * it waits a while then retries.  Meantime, a flush or a compaction that
-   * tries to run should trigger same critical condition and the shutdown will
-   * run.  On its way out, this server will shut down Server.  Leases are sort
-   * of inbetween. It has an internal thread that while it inherits from
-   * Chore, it keeps its own internal stop mechanism so needs to be stopped
-   * by this hosting server.  Worker logs the exception and exits.
-   */
-  private void startServiceThreads() throws IOException {
-    String n = Thread.currentThread().getName();
-    UncaughtExceptionHandler handler = new UncaughtExceptionHandler() {
-      public void uncaughtException(Thread t, Throwable e) {
-        abort();
-        LOG.fatal("Set stop flag in " + t.getName(), e);
-      }
-    };
-    Threads.setDaemonThreadRunning(this.logRoller, n + ".logRoller",
-        handler);
-    Threads.setDaemonThreadRunning(this.cacheFlusher, n + ".cacheFlusher",
-      handler);
-    Threads.setDaemonThreadRunning(this.compactSplitThread, n + ".compactor",
-        handler);
-    Threads.setDaemonThreadRunning(this.workerThread, n + ".worker", handler);
-    // Leases is not a Thread. Internally it runs a daemon thread.  If it gets
-    // an unhandled exception, it will just exit.
-    this.leases.setName(n + ".leaseChecker");
-    this.leases.start();
-    // Put up info server.
-    int port = this.conf.getInt("hbase.regionserver.info.port", 60030);
-    if (port >= 0) {
-      String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
-      this.infoServer = new InfoServer("regionserver", a, port, false);
-      this.infoServer.setAttribute("regionserver", this);
-      this.infoServer.start();
-    }
-    // Start Server.  This service is like leases in that it internally runs
-    // a thread.
-    this.server.start();
-    LOG.info("HRegionServer started at: " +
-        serverInfo.getServerAddress().toString());
-  }
-
-  /** @return the HLog */
-  HLog getLog() {
-    return this.log;
-  }
-
-  /*
-   * Use interface to get the 'real' IP for this host. 'serverInfo' is sent to
-   * master.  Should have the real IP of this host rather than 'localhost' or
-   * 0.0.0.0 or 127.0.0.1 in it.
-   * @return This servers' IP.
-   */
-  private String getThisIP() throws UnknownHostException {
-    return DNS.getDefaultIP(conf.get("dfs.datanode.dns.interface","default"));
-  }
-
-  /**
-   * Sets a flag that will cause all the HRegionServer threads to shut down
-   * in an orderly fashion.  Used by unit tests and called by {@link Flusher}
-   * if it judges server needs to be restarted.
-   */
-  synchronized void stop() {
-    this.stopRequested.set(true);
-    notifyAll();                        // Wakes run() if it is sleeping
-  }
-  
-  /**
-   * Cause the server to exit without closing the regions it is serving, the
-   * log it is using and without notifying the master.
-   * Used unit testing and on catastrophic events such as HDFS is yanked out
-   * from under hbase or we OOME.
-   */
-  synchronized void abort() {
-    this.abortRequested = true;
-    stop();
-  }
-
-  /** 
-   * Wait on all threads to finish.
-   * Presumption is that all closes and stops have already been called.
-   */
-  void join() {
-    join(this.workerThread);
-    join(this.cacheFlusher);
-    join(this.compactSplitThread);
-    join(this.logRoller);
-  }
-
-  private void join(final Thread t) {
-    while (t.isAlive()) {
-      try {
-        t.join();
-      } catch (InterruptedException e) {
-        // continue
-      }
-    }
-  }
-  
-  /*
-   * Let the master know we're here
-   * Run initialization using parameters passed us by the master.
-   */
-  private HbaseMapWritable reportForDuty() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Telling master at " +
-        conf.get(MASTER_ADDRESS) + " that we are up");
-    }
-    // Do initial RPC setup.
-    this.hbaseMaster = (HMasterRegionInterface)HbaseRPC.waitForProxy(
-      HMasterRegionInterface.class, HMasterRegionInterface.versionID,
-      new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
-      this.conf);
-    HbaseMapWritable result = null;
-    long lastMsg = 0;
-    while(!stopRequested.get()) {
-      try {
-        this.requestCount.set(0);
-        this.serverInfo.setLoad(new HServerLoad(0, onlineRegions.size()));
-        result = this.hbaseMaster.regionServerStartup(serverInfo);
-        lastMsg = System.currentTimeMillis();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Done telling master we are up");
-        }
-        break;
-      } catch(IOException e) {
-        LOG.warn("error telling master we are up", e);
-        this.sleeper.sleep(lastMsg);
-        continue;
-      }
-    }
-    return result;
-  }
-
-  /** Add to the outbound message buffer */
-  private void reportOpen(HRegionInfo region) {
-    outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_OPEN, region));
-  }
-
-  /** Add to the outbound message buffer */
-  private void reportClose(HRegionInfo region) {
-    outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_CLOSE, region));
-  }
-  
-  /**
-   * Add to the outbound message buffer
-   * 
-   * When a region splits, we need to tell the master that there are two new 
-   * regions that need to be assigned.
-   * 
-   * We do not need to inform the master about the old region, because we've
-   * updated the meta or root regions, and the master will pick that up on its
-   * next rescan of the root or meta tables.
-   */
-  void reportSplit(HRegionInfo oldRegion, HRegionInfo newRegionA,
-      HRegionInfo newRegionB) {
-
-    outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_SPLIT, oldRegion));
-    outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_OPEN, newRegionA));
-    outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_OPEN, newRegionB));
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // HMaster-given operations
-  //////////////////////////////////////////////////////////////////////////////
-
-  private static class ToDoEntry {
-    int tries;
-    HMsg msg;
-    ToDoEntry(HMsg msg) {
-      this.tries = 0;
-      this.msg = msg;
-    }
-  }
-  BlockingQueue<ToDoEntry> toDo = new LinkedBlockingQueue<ToDoEntry>();
-  private Worker worker;
-  private Thread workerThread;
-  
-  /** Thread that performs long running requests from the master */
-  class Worker implements Runnable {
-    void stop() {
-      synchronized(toDo) {
-        toDo.notifyAll();
-      }
-    }
-    
-    /** {@inheritDoc} */
-    public void run() {
-      try {
-        while(!stopRequested.get()) {
-          ToDoEntry e = null;
-          try {
-            e = toDo.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
-            if(e == null || stopRequested.get()) {
-              continue;
-            }
-            LOG.info(e.msg.toString());
-            switch(e.msg.getMsg()) {
-
-            case HMsg.MSG_REGIONSERVER_QUIESCE:
-              closeUserRegions();
-              break;
-
-            case HMsg.MSG_REGION_OPEN:
-              // Open a region
-              openRegion(e.msg.getRegionInfo());
-              break;
-
-            case HMsg.MSG_REGION_CLOSE:
-              // Close a region
-              closeRegion(e.msg.getRegionInfo(), true);
-              break;
-
-            case HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT:
-              // Close a region, don't reply
-              closeRegion(e.msg.getRegionInfo(), false);
-              break;
-
-            default:
-              throw new AssertionError(
-                  "Impossible state during msg processing.  Instruction: "
-                  + e.msg.toString());
-            }
-          } catch (InterruptedException ex) {
-            // continue
-          } catch (Exception ex) {
-            if (ex instanceof IOException) {
-              ex = RemoteExceptionHandler.checkIOException((IOException) ex);
-            }
-            if(e != null && e.tries < numRetries) {
-              LOG.warn(ex);
-              e.tries++;
-              try {
-                toDo.put(e);
-              } catch (InterruptedException ie) {
-                throw new RuntimeException("Putting into msgQueue was " +
-                    "interrupted.", ex);
-              }
-            } else {
-              LOG.error("unable to process message" +
-                  (e != null ? (": " + e.msg.toString()) : ""), ex);
-              if (!checkFileSystem()) {
-                break;
-              }
-            }
-          }
-        }
-      } catch(Throwable t) {
-        LOG.fatal("Unhandled exception", t);
-      } finally {
-        LOG.info("worker thread exiting");
-      }
-    }
-  }
-  
-  void openRegion(final HRegionInfo regionInfo) {
-    HRegion region = onlineRegions.get(regionInfo.getRegionName());
-    if(region == null) {
-      try {
-        region = new HRegion(
-            HTableDescriptor.getTableDir(rootDir,
-                regionInfo.getTableDesc().getName()
-            ),
-            this.log, this.fs, conf, regionInfo, null, this.cacheFlusher
-        );
-        // Startup a compaction early if one is needed.
-        this.compactSplitThread.compactionRequested(region);
-      } catch (IOException e) {
-        LOG.error("error opening region " + regionInfo.getRegionName(), e);
-        
-        // Mark the region offline.
-        // TODO: add an extra field in HRegionInfo to indicate that there is
-        // an error. We can't do that now because that would be an incompatible
-        // change that would require a migration
-        
-        regionInfo.setOffline(true);
-        reportClose(regionInfo);
-        return;
-      }
-      this.lock.writeLock().lock();
-      try {
-        this.log.setSequenceNumber(region.getMinSequenceId());
-        this.onlineRegions.put(region.getRegionName(), region);
-      } finally {
-        this.lock.writeLock().unlock();
-      }
-      reportOpen(regionInfo); 
-    }
-  }
-
-  void closeRegion(final HRegionInfo hri, final boolean reportWhenCompleted)
-  throws IOException {  
-    this.lock.writeLock().lock();
-    HRegion region = null;
-    try {
-      region = onlineRegions.remove(hri.getRegionName());
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-      
-    if(region != null) {
-      region.close();
-      if(reportWhenCompleted) {
-        reportClose(hri);
-      }
-    }
-  }
-
-  /** Called either when the master tells us to restart or from stop() */
-  ArrayList<HRegion> closeAllRegions() {
-    ArrayList<HRegion> regionsToClose = new ArrayList<HRegion>();
-    this.lock.writeLock().lock();
-    try {
-      regionsToClose.addAll(onlineRegions.values());
-      onlineRegions.clear();
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-    for(HRegion region: regionsToClose) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("closing region " + region.getRegionName());
-      }
-      try {
-        region.close(abortRequested, null);
-      } catch (IOException e) {
-        LOG.error("error closing region " + region.getRegionName(),
-          RemoteExceptionHandler.checkIOException(e));
-      }
-    }
-    return regionsToClose;
-  }
-
-  /** Called as the first stage of cluster shutdown. */
-  void closeUserRegions() {
-    ArrayList<HRegion> regionsToClose = new ArrayList<HRegion>();
-    this.lock.writeLock().lock();
-    try {
-      synchronized (onlineRegions) {
-        for (Iterator<Map.Entry<Text, HRegion>> i =
-          onlineRegions.entrySet().iterator();
-        i.hasNext();) {
-          Map.Entry<Text, HRegion> e = i.next();
-          HRegion r = e.getValue();
-          if (!r.getRegionInfo().isMetaRegion()) {
-            regionsToClose.add(r);
-            i.remove();
-          }
-        }
-      }
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-    for(HRegion region: regionsToClose) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("closing region " + region.getRegionName());
-      }
-      try {
-        region.close();
-      } catch (IOException e) {
-        LOG.error("error closing region " + region.getRegionName(),
-          RemoteExceptionHandler.checkIOException(e));
-      }
-    }
-    this.quiesced.set(true);
-    if (onlineRegions.size() == 0) {
-      outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_EXITING));
-    } else {
-      outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_QUIESCED));
-    }
-  }
-
-  //
-  // HRegionInterface
-  //
-
-  /** {@inheritDoc} */
-  public HRegionInfo getRegionInfo(final Text regionName)
-    throws NotServingRegionException {
-    
-    requestCount.incrementAndGet();
-    return getRegion(regionName).getRegionInfo();
-  }
-
-  /** {@inheritDoc} */
-  public byte [] get(final Text regionName, final Text row,
-      final Text column) throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      return getRegion(regionName).get(row, column);
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public byte [][] get(final Text regionName, final Text row,
-      final Text column, final int numVersions) throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      return getRegion(regionName).get(row, column, numVersions);
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public byte [][] get(final Text regionName, final Text row, final Text column, 
-      final long timestamp, final int numVersions) throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      return getRegion(regionName).get(row, column, timestamp, numVersions);
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public HbaseMapWritable getRow(final Text regionName, final Text row)
-    throws IOException {
-    return getRow(regionName, row, HConstants.LATEST_TIMESTAMP);
-  }
-
-  /** {@inheritDoc} */
-  public HbaseMapWritable getRow(final Text regionName, final Text row, final long ts)
-    throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      HRegion region = getRegion(regionName);
-      HbaseMapWritable result = new HbaseMapWritable();
-      Map<Text, byte[]> map = region.getFull(row, ts);
-      for (Map.Entry<Text, byte []> es: map.entrySet()) {
-        result.put(new HStoreKey(row, es.getKey()),
-            new ImmutableBytesWritable(es.getValue()));
-      }
-      return result;
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public HbaseMapWritable getClosestRowBefore(final Text regionName, 
-    final Text row)
-  throws IOException {
-    return getClosestRowBefore(regionName, row, HConstants.LATEST_TIMESTAMP);
-  }
-
-  /** {@inheritDoc} */
-  public HbaseMapWritable getClosestRowBefore(final Text regionName, 
-    final Text row, final long ts)
-  throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      // locate the region we're operating on
-      HRegion region = getRegion(regionName);
-      HbaseMapWritable result = new HbaseMapWritable();
-      // ask the region for all the data 
-      Map<Text, byte[]> map = region.getClosestRowBefore(row, ts);
-      // convert to a MapWritable
-      if (map == null) {
-        return null;
-      }
-      for (Map.Entry<Text, byte []> es: map.entrySet()) {
-        result.put(new HStoreKey(row, es.getKey()),
-            new ImmutableBytesWritable(es.getValue()));
-      }
-      return result;
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public HbaseMapWritable next(final long scannerId) throws IOException {
-
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      String scannerName = String.valueOf(scannerId);
-      HScannerInterface s = scanners.get(scannerName);
-      if (s == null) {
-        throw new UnknownScannerException("Name: " + scannerName);
-      }
-      this.leases.renewLease(scannerId, scannerId);
-
-      // Collect values to be returned here
-      HbaseMapWritable values = new HbaseMapWritable();
-      HStoreKey key = new HStoreKey();
-      TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
-      while (s.next(key, results)) {
-        for(Map.Entry<Text, byte []> e: results.entrySet()) {
-          values.put(new HStoreKey(key.getRow(), e.getKey(), key.getTimestamp()),
-            new ImmutableBytesWritable(e.getValue()));
-        }
-
-        if(values.size() > 0) {
-          // Row has something in it. Return the value.
-          break;
-        }
-
-        // No data for this row, go get another.
-        results.clear();
-      }
-      return values;
-      
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
-    throws IOException {
-    checkOpen();
-    this.requestCount.incrementAndGet();
-    HRegion region = getRegion(regionName);
-    try {
-      region.batchUpdate(timestamp, b);
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-  
-  //
-  // remote scanner interface
-  //
-
-  /** {@inheritDoc} */
-  public long openScanner(Text regionName, Text[] cols, Text firstRow,
-      final long timestamp, final RowFilterInterface filter)
-    throws IOException {
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      HRegion r = getRegion(regionName);
-      long scannerId = -1L;
-      HScannerInterface s =
-        r.getScanner(cols, firstRow, timestamp, filter);
-      scannerId = rand.nextLong();
-      String scannerName = String.valueOf(scannerId);
-      synchronized(scanners) {
-        scanners.put(scannerName, s);
-      }
-      this.leases.
-        createLease(scannerId, scannerId, new ScannerListener(scannerName));
-      return scannerId;
-    } catch (IOException e) {
-      LOG.error("Error opening scanner (fsOk: " + this.fsOk + ")",
-          RemoteExceptionHandler.checkIOException(e));
-      checkFileSystem();
-      throw e;
-    }
-  }
-  
-  /** {@inheritDoc} */
-  public void close(final long scannerId) throws IOException {
-    checkOpen();
-    requestCount.incrementAndGet();
-    try {
-      String scannerName = String.valueOf(scannerId);
-      HScannerInterface s = null;
-      synchronized(scanners) {
-        s = scanners.remove(scannerName);
-      }
-      if(s == null) {
-        throw new UnknownScannerException(scannerName);
-      }
-      s.close();
-      this.leases.cancelLease(scannerId, scannerId);
-    } catch (IOException e) {
-      checkFileSystem();
-      throw e;
-    }
-  }
-
-  Map<String, HScannerInterface> scanners =
-    Collections.synchronizedMap(new HashMap<String, HScannerInterface>());
-
-  /** 
-   * Instantiated as a scanner lease.
-   * If the lease times out, the scanner is closed
-   */
-  private class ScannerListener implements LeaseListener {
-    private final String scannerName;
-    
-    ScannerListener(final String n) {
-      this.scannerName = n;
-    }
-    
-    /** {@inheritDoc} */
-    public void leaseExpired() {
-      LOG.info("Scanner " + this.scannerName + " lease expired");
-      HScannerInterface s = null;
-      synchronized(scanners) {
-        s = scanners.remove(this.scannerName);
-      }
-      if (s != null) {
-        try {
-          s.close();
-        } catch (IOException e) {
-          LOG.error("Closing scanner", e);
-        }
-      }
-    }
-  }
-  
-  //
-  // Methods that do the actual work for the remote API
-  //
-  
-  /** {@inheritDoc} */
-  public void deleteAll(final Text regionName, final Text row,
-      final Text column, final long timestamp) 
-  throws IOException {
-    HRegion region = getRegion(regionName);
-    region.deleteAll(row, column, timestamp);
-  }
-
-  /** {@inheritDoc} */
-  public void deleteAll(final Text regionName, final Text row,
-      final long timestamp) 
-  throws IOException {
-    HRegion region = getRegion(regionName);
-    region.deleteAll(row, timestamp);
-  }
-
-  /** {@inheritDoc} */
-  public void deleteFamily(Text regionName, Text row, Text family, 
-    long timestamp) throws IOException{
-    getRegion(regionName).deleteFamily(row, family, timestamp);
-  }
-
-
-  /**
-   * @return Info on this server.
-   */
-  public HServerInfo getServerInfo() {
-    return this.serverInfo;
-  }
-
-  /**
-   * @return Immutable list of this servers regions.
-   */
-  public SortedMap<Text, HRegion> getOnlineRegions() {
-    return Collections.unmodifiableSortedMap(this.onlineRegions);
-  }
-
-  /** @return the request count */
-  public AtomicInteger getRequestCount() {
-    return this.requestCount;
-  }
-
-  /** @return reference to CacheFlushListener */
-  public CacheFlushListener getCacheFlushListener() {
-    return this.cacheFlusher;
-  }
-  
-  /** 
-   * Protected utility method for safely obtaining an HRegion handle.
-   * @param regionName Name of online {@link HRegion} to return
-   * @return {@link HRegion} for <code>regionName</code>
-   * @throws NotServingRegionException
-   */
-  protected HRegion getRegion(final Text regionName)
-  throws NotServingRegionException {
-    return getRegion(regionName, false);
-  }
-  
-  /** 
-   * Protected utility method for safely obtaining an HRegion handle.
-   * @param regionName Name of online {@link HRegion} to return
-   * @param checkRetiringRegions Set true if we're to check retiring regions
-   * as well as online regions.
-   * @return {@link HRegion} for <code>regionName</code>
-   * @throws NotServingRegionException
-   */
-  protected HRegion getRegion(final Text regionName,
-      final boolean checkRetiringRegions)
-  throws NotServingRegionException {
-    HRegion region = null;
-    this.lock.readLock().lock();
-    try {
-      region = onlineRegions.get(regionName);
-      if (region == null && checkRetiringRegions) {
-        region = this.retiringRegions.get(regionName);
-        if (LOG.isDebugEnabled()) {
-          if (region != null) {
-            LOG.debug("Found region " + regionName + " in retiringRegions");
-          }
-        }
-      }
-
-      if (region == null) {
-        throw new NotServingRegionException(regionName.toString());
-      }
-      
-      return region;
-    } finally {
-      this.lock.readLock().unlock();
-    }
-  }
-
-  /**
-   * Called to verify that this server is up and running.
-   * 
-   * @throws IOException
-   */
-  private void checkOpen() throws IOException {
-    if (this.stopRequested.get() || this.abortRequested) {
-      throw new IOException("Server not running");
-    }
-    if (!fsOk) {
-      throw new IOException("File system not available");
-    }
-  }
-  
-  /**
-   * Checks to see if the file system is still accessible.
-   * If not, sets abortRequested and stopRequested
-   * 
-   * @return false if file system is not available
-   */
-  protected boolean checkFileSystem() {
-    if (this.fsOk) {
-      try {
-        if (fs != null && !FSUtils.isFileSystemAvailable(fs)) {
-          LOG.fatal("Shutting down HRegionServer: file system not available");
-          this.abortRequested = true;
-          this.stopRequested.set(true);
-          fsOk = false;
-        }
-      } catch (Exception e) {
-        LOG.error("Failed get of filesystem", e);
-        LOG.fatal("Shutting down HRegionServer: file system not available");
-        this.abortRequested = true;
-        this.stopRequested.set(true);
-        fsOk = false;
-      }
-    }
-    return this.fsOk;
-  }
- 
-  /**
-   * @return Returns list of non-closed regions hosted on this server.  If no
-   * regions to check, returns an empty list.
-   */
-  protected Set<HRegion> getRegionsToCheck() {
-    HashSet<HRegion> regionsToCheck = new HashSet<HRegion>();
-    //TODO: is this locking necessary? 
-    lock.readLock().lock();
-    try {
-      regionsToCheck.addAll(this.onlineRegions.values());
-    } finally {
-      lock.readLock().unlock();
-    }
-    // Purge closed regions.
-    for (final Iterator<HRegion> i = regionsToCheck.iterator(); i.hasNext();) {
-      HRegion r = i.next();
-      if (r.isClosed()) {
-        i.remove();
-      }
-    }
-    return regionsToCheck;
-  }
-
-  /** {@inheritDoc} */
-  public long getProtocolVersion(final String protocol, 
-      @SuppressWarnings("unused") final long clientVersion)
-  throws IOException {  
-    if (protocol.equals(HRegionInterface.class.getName())) {
-      return HRegionInterface.versionID;
-    }
-    throw new IOException("Unknown protocol to name node: " + protocol);
-  }
-
-  //
-  // Main program and support routines
-  //
-  
-  private static void printUsageAndExit() {
-    printUsageAndExit(null);
-  }
-  
-  private static void printUsageAndExit(final String message) {
-    if (message != null) {
-      System.err.println(message);
-    }
-    System.err.println("Usage: java " +
-        "org.apache.hbase.HRegionServer [--bind=hostname:port] start");
-    System.exit(0);
-  }
-  
-  /**
-   * Do class main.
-   * @param args
-   * @param regionServerClass HRegionServer to instantiate.
-   */
-  protected static void doMain(final String [] args,
-      final Class<? extends HRegionServer> regionServerClass) {
-    if (args.length < 1) {
-      printUsageAndExit();
-    }
-    Configuration conf = new HBaseConfiguration();
-    
-    // Process command-line args. TODO: Better cmd-line processing
-    // (but hopefully something not as painful as cli options).
-    final String addressArgKey = "--bind=";
-    for (String cmd: args) {
-      if (cmd.startsWith(addressArgKey)) {
-        conf.set(REGIONSERVER_ADDRESS, cmd.substring(addressArgKey.length()));
-        continue;
-      }
-      
-      if (cmd.equals("start")) {
-        try {
-          // If 'local', don't start a region server here.  Defer to
-          // LocalHBaseCluster.  It manages 'local' clusters.
-          if (LocalHBaseCluster.isLocal(conf)) {
-            LOG.warn("Not starting a distinct region server because " +
-              "hbase.master is set to 'local' mode");
-          } else {
-            Constructor<? extends HRegionServer> c =
-              regionServerClass.getConstructor(HBaseConfiguration.class);
-            HRegionServer hrs = c.newInstance(conf);
-            Thread t = new Thread(hrs);
-            t.setName("regionserver" + hrs.server.getListenerAddress());
-            t.start();
-          }
-        } catch (Throwable t) {
-          LOG.error( "Can not start region server because "+
-              StringUtils.stringifyException(t) );
-          System.exit(-1);
-        }
-        break;
-      }
-      
-      if (cmd.equals("stop")) {
-        printUsageAndExit("To shutdown the regionserver run " +
-        		"bin/hbase-daemon.sh stop regionserver or send a kill signal to" +
-        		"the regionserver pid");
-      }
-      
-      // Print out usage if we get to here.
-      printUsageAndExit();
-    }
-  }
-  
-  /**
-   * @param args
-   */
-  public static void main(String [] args) {
-    doMain(args, HRegionServer.class);
-  }
-}

+ 0 - 55
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HScannerInterface.java

@@ -1,55 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * HScannerInterface iterates through a set of rows.  It's implemented by
- * several classes.  Implements {@link Iterable} but be sure to still call
- * {@link #close()} when done with your {@link Iterator}
- */
-public interface HScannerInterface extends Closeable,
-Iterable<Map.Entry<HStoreKey, SortedMap<Text, byte []>>> {
-  /**
-   * Grab the next row's worth of values. The scanner will return the most
-   * recent data value for each row that is not newer than the target time
-   * passed when the scanner was created.
-   * @param key will contain the row and timestamp upon return
-   * @param results will contain an entry for each column family member and its
-   * value
-   * @return true if data was returned
-   * @throws IOException
-   */
-  public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
-  throws IOException;
-  
-  /**
-   * Closes a scanner and releases any resources it has allocated
-   * @throws IOException
-   */
-  public void close() throws IOException;
-}

+ 0 - 179
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java

@@ -1,179 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.io.*;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-/**
- * HServerAddress is a "label" for a HBase server that combines the host
- * name and port number.
- */
-public class HServerAddress implements WritableComparable {
-  private InetSocketAddress address;
-  String stringValue;
-
-  /** Empty constructor, used for Writable */
-  public HServerAddress() {
-    this.address = null;
-    this.stringValue = null;
-  }
-
-  /**
-   * Construct a HServerAddress from an InetSocketAddress
-   * @param address InetSocketAddress of server
-   */
-  public HServerAddress(InetSocketAddress address) {
-    this.address = address;
-    this.stringValue = address.getAddress().getHostAddress() + ":" +
-      address.getPort();
-  }
-  
-  /**
-   * Construct a HServerAddress from a string of the form hostname:port
-   * 
-   * @param hostAndPort format 'hostname:port'
-   */
-  public HServerAddress(String hostAndPort) {
-    int colonIndex = hostAndPort.lastIndexOf(':');
-    if(colonIndex < 0) {
-      throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
-    }
-    String host = hostAndPort.substring(0, colonIndex);
-    int port =
-      Integer.valueOf(hostAndPort.substring(colonIndex + 1)).intValue();
-    this.address = new InetSocketAddress(host, port);
-    this.stringValue = hostAndPort;
-  }
-  
-  /**
-   * Construct a HServerAddress from hostname, port number
-   * @param bindAddress host name
-   * @param port port number
-   */
-  public HServerAddress(String bindAddress, int port) {
-    this.address = new InetSocketAddress(bindAddress, port);
-    this.stringValue = bindAddress + ":" + port;
-  }
-  
-  /**
-   * Construct a HServerAddress from another HServerAddress
-   * 
-   * @param other the HServerAddress to copy from
-   */
-  public HServerAddress(HServerAddress other) {
-    String bindAddress = other.getBindAddress();
-    int port = other.getPort();
-    address = new InetSocketAddress(bindAddress, port);
-    stringValue = bindAddress + ":" + port;
-  }
-
-  /** @return host name */
-  public String getBindAddress() {
-    return address.getAddress().getHostAddress();
-  }
-
-  /** @return port number */
-  public int getPort() {
-    return address.getPort();
-  }
-
-  /** @return the InetSocketAddress */
-  public InetSocketAddress getInetSocketAddress() {
-    return address;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    return (stringValue == null ? "" : stringValue);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean equals(Object o) {
-    return this.compareTo(o) == 0;
-  }
-  
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int hashCode() {
-    int result = this.address.hashCode();
-    result ^= this.stringValue.hashCode();
-    return result;
-  }
-  
-  //
-  // Writable
-  //
-
-  /**
-   * {@inheritDoc}
-   */
-  public void readFields(DataInput in) throws IOException {
-    String bindAddress = in.readUTF();
-    int port = in.readInt();
-    
-    if(bindAddress == null || bindAddress.length() == 0) {
-      address = null;
-      stringValue = null;
-      
-    } else {
-      address = new InetSocketAddress(bindAddress, port);
-      stringValue = bindAddress + ":" + port;
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public void write(DataOutput out) throws IOException {
-    if(address == null) {
-      out.writeUTF("");
-      out.writeInt(0);
-      
-    } else {
-      out.writeUTF(address.getAddress().getHostAddress());
-      out.writeInt(address.getPort());
-    }
-  }
-  
-  //
-  // Comparable
-  //
-  
-  /**
-   * {@inheritDoc}
-   */
-  public int compareTo(Object o) {
-    HServerAddress other = (HServerAddress) o;
-    return this.toString().compareTo(other.toString());
-  }
-}

+ 0 - 158
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java

@@ -1,158 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-
-/**
- * HServerInfo contains metainfo about an HRegionServer, Currently it only
- * contains the server start code.
- * 
- * In the future it will contain information about the source machine and
- * load statistics.
- */
-public class HServerInfo implements Writable {
-  private HServerAddress serverAddress;
-  private long startCode;
-  private HServerLoad load;
-  private int infoPort;
-
-  /** default constructor - used by Writable */
-  public HServerInfo() {
-    this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT);
-  }
-  
-  /**
-   * Constructor
-   * @param serverAddress
-   * @param startCode
-   * @param infoPort Port the info server is listening on.
-   */
-  public HServerInfo(HServerAddress serverAddress, long startCode,
-      final int infoPort) {
-    this.serverAddress = serverAddress;
-    this.startCode = startCode;
-    this.load = new HServerLoad();
-    this.infoPort = infoPort;
-  }
-  
-  /**
-   * Construct a new object using another as input (like a copy constructor)
-   * @param other
-   */
-  public HServerInfo(HServerInfo other) {
-    this.serverAddress = new HServerAddress(other.getServerAddress());
-    this.startCode = other.getStartCode();
-    this.load = other.getLoad();
-    this.infoPort = other.getInfoPort();
-  }
-  
-  /**
-   * @return the load
-   */
-  public HServerLoad getLoad() {
-    return load;
-  }
-
-  /**
-   * @param load the load to set
-   */
-  public void setLoad(HServerLoad load) {
-    this.load = load;
-  }
-
-  /** @return the server address */
-  public HServerAddress getServerAddress() {
-    return serverAddress;
-  }
- 
-  /** @return the server start code */
-  public long getStartCode() {
-    return startCode;
-  }
-  
-  /**
-   * @return Port the info server is listening on.
-   */
-  public int getInfoPort() {
-    return this.infoPort;
-  }
-  
-  /**
-   * @param startCode the startCode to set
-   */
-  public void setStartCode(long startCode) {
-    this.startCode = startCode;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return "address: " + this.serverAddress + ", startcode: " + this.startCode
-    + ", load: (" + this.load.toString() + ")";
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!(obj instanceof HServerInfo)) {
-      return false;
-    }
-    HServerInfo that = (HServerInfo)obj;
-    if (!this.serverAddress.equals(that.serverAddress)) {
-      return false;
-    }
-    if (this.infoPort != that.infoPort) {
-      return false;
-    }
-    if (this.startCode != that.startCode) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = this.serverAddress.hashCode();
-    result ^= this.infoPort;
-    result ^= this.startCode;
-    return result;
-  }
-
-
-  // Writable
-  public void readFields(DataInput in) throws IOException {
-    this.serverAddress.readFields(in);
-    this.startCode = in.readLong();
-    this.load.readFields(in);
-    this.infoPort = in.readInt();
-  }
-
-  public void write(DataOutput out) throws IOException {
-    this.serverAddress.write(out);
-    out.writeLong(this.startCode);
-    this.load.write(out);
-    out.writeInt(this.infoPort);
-  }
-}

+ 0 - 136
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerLoad.java

@@ -1,136 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.WritableComparable;
-
-/**
- * This class encapsulates metrics for determining the load on a HRegionServer
- */
-public class HServerLoad implements WritableComparable {
-  private int numberOfRequests;         // number of requests since last report
-  private int numberOfRegions;          // number of regions being served
-  
-  /*
-   * TODO: Other metrics that might be considered when the master is actually
-   * doing load balancing instead of merely trying to decide where to assign
-   * a region:
-   * <ul>
-   *   <li># of CPUs, heap size (to determine the "class" of machine). For
-   *       now, we consider them to be homogeneous.</li>
-   *   <li>#requests per region (Map<{String|HRegionInfo}, Integer>)</li>
-   *   <li>#compactions and/or #splits (churn)</li>
-   *   <li>server death rate (maybe there is something wrong with this server)</li>
-   * </ul>
-   */
-  
-  /** default constructior (used by Writable) */
-  public HServerLoad() {}
-  
-  /**
-   * Constructor
-   * @param numberOfRequests
-   * @param numberOfRegions
-   */
-  public HServerLoad(int numberOfRequests, int numberOfRegions) {
-    this.numberOfRequests = numberOfRequests;
-    this.numberOfRegions = numberOfRegions;
-  }
-  
-  /**
-   * @return load factor for this server
-   */
-  public int getLoad() {
-    int load = numberOfRequests == 0 ? 1 : numberOfRequests;
-    load *= numberOfRegions == 0 ? 1 : numberOfRegions;
-    return load;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return "requests: " + numberOfRequests + " regions: " + numberOfRegions;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public boolean equals(Object o) {
-    return compareTo(o) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public int hashCode() {
-    int result = Integer.valueOf(numberOfRequests).hashCode();
-    result ^= Integer.valueOf(numberOfRegions).hashCode();
-    return result;
-  }
-  
-  // Getters
-  
-  /**
-   * @return the numberOfRegions
-   */
-  public int getNumberOfRegions() {
-    return numberOfRegions;
-  }
-
-  /**
-   * @return the numberOfRequests
-   */
-  public int getNumberOfRequests() {
-    return numberOfRequests;
-  }
-
-  // Setters
-  
-  /**
-   * @param numberOfRegions the numberOfRegions to set
-   */
-  public void setNumberOfRegions(int numberOfRegions) {
-    this.numberOfRegions = numberOfRegions;
-  }
-
-  // Writable
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    numberOfRequests = in.readInt();
-    numberOfRegions = in.readInt();
-  }
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(numberOfRequests);
-    out.writeInt(numberOfRegions);
-  }
-  
-  // Comparable
-
-  /** {@inheritDoc} */
-  public int compareTo(Object o) {
-    HServerLoad other = (HServerLoad) o;
-    return this.getLoad() - other.getLoad();
-  }
-}

+ 0 - 2532
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java

@@ -1,2532 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.TextSequence;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.util.StringUtils;
-import org.onelab.filter.BloomFilter;
-import org.onelab.filter.CountingBloomFilter;
-import org.onelab.filter.Filter;
-import org.onelab.filter.RetouchedBloomFilter;
-
-/**
- * HStore maintains a bunch of data files.  It is responsible for maintaining 
- * the memory/file hierarchy and for periodic flushes to disk and compacting 
- * edits to the file.
- *
- * Locking and transactions are handled at a higher level.  This API should not 
- * be called directly by any writer, but rather by an HRegion manager.
- */
-public class HStore implements HConstants {
-  static final Log LOG = LogFactory.getLog(HStore.class);
-
-  /**
-   * The Memcache holds in-memory modifications to the HRegion.  This is really a
-   * wrapper around a TreeMap that helps us when staging the Memcache out to disk.
-   */
-  static class Memcache {
-
-    // Note that since these structures are always accessed with a lock held,
-    // no additional synchronization is required.
-
-    @SuppressWarnings("hiding")
-    private final SortedMap<HStoreKey, byte[]> memcache =
-      Collections.synchronizedSortedMap(new TreeMap<HStoreKey, byte []>());
-      
-    volatile SortedMap<HStoreKey, byte[]> snapshot;
-      
-    @SuppressWarnings("hiding")
-    private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-
-    /**
-     * Constructor
-     */
-    Memcache() {
-      snapshot = 
-        Collections.synchronizedSortedMap(new TreeMap<HStoreKey, byte []>());
-    }
-
-    /**
-     * Creates a snapshot of the current Memcache
-     */
-    void snapshot() {
-      this.lock.writeLock().lock();
-      try {
-        synchronized (memcache) {
-          if (memcache.size() != 0) {
-            snapshot.putAll(memcache);
-            memcache.clear();
-          }
-        }
-      } finally {
-        this.lock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * @return memcache snapshot
-     */
-    SortedMap<HStoreKey, byte[]> getSnapshot() {
-      this.lock.writeLock().lock();
-      try {
-        SortedMap<HStoreKey, byte[]> currentSnapshot = snapshot;
-        snapshot = 
-          Collections.synchronizedSortedMap(new TreeMap<HStoreKey, byte []>());
-        
-        return currentSnapshot;
-
-      } finally {
-        this.lock.writeLock().unlock();
-      }
-    }
-    
-    /**
-     * Store a value.  
-     * @param key
-     * @param value
-     */
-    void add(final HStoreKey key, final byte[] value) {
-      this.lock.readLock().lock();
-      try {
-        memcache.put(key, value);
-        
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    /**
-     * Look back through all the backlog TreeMaps to find the target.
-     * @param key
-     * @param numVersions
-     * @return An array of byte arrays ordered by timestamp.
-     */
-    List<byte[]> get(final HStoreKey key, final int numVersions) {
-      this.lock.readLock().lock();
-      try {
-        List<byte []> results;
-        synchronized (memcache) {
-          results = internalGet(memcache, key, numVersions);
-        }
-        synchronized (snapshot) {
-          results.addAll(results.size(),
-              internalGet(snapshot, key, numVersions - results.size()));
-        }
-        return results;
-        
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    /**
-     * Return all the available columns for the given key.  The key indicates a 
-     * row and timestamp, but not a column name.
-     *
-     * The returned object should map column names to byte arrays (byte[]).
-     * @param key
-     * @param results
-     */
-    void getFull(HStoreKey key, SortedMap<Text, byte[]> results) {
-      this.lock.readLock().lock();
-      try {
-        synchronized (memcache) {
-          internalGetFull(memcache, key, results);
-        }
-        synchronized (snapshot) {
-          internalGetFull(snapshot, key, results);
-        }
-
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    private void internalGetFull(SortedMap<HStoreKey, byte []> map, HStoreKey key, 
-        SortedMap<Text, byte []> results) {
-
-      if (map.isEmpty() || key == null) {
-        return;
-      }
-
-      SortedMap<HStoreKey, byte []> tailMap = map.tailMap(key);
-      for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
-        HStoreKey itKey = es.getKey();
-        Text itCol = itKey.getColumn();
-        if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) {
-          byte [] val = tailMap.get(itKey);
-
-          if (!HLogEdit.isDeleted(val)) {
-            results.put(itCol, val);
-          }
-
-        } else if (key.getRow().compareTo(itKey.getRow()) < 0) {
-          break;
-        }
-      }
-    }
-
-    /**
-     * Find the key that matches <i>row</i> exactly, or the one that immediately
-     * preceeds it.
-     */
-    public Text getRowKeyAtOrBefore(final Text row, long timestamp)
-    throws IOException{
-      this.lock.readLock().lock();
-      
-      Text key_memcache = null;
-      Text key_snapshot = null;
-      
-      try {
-        synchronized (memcache) {
-          key_memcache = internalGetRowKeyAtOrBefore(memcache, row, timestamp);
-        }
-        synchronized (snapshot) {
-          key_snapshot = internalGetRowKeyAtOrBefore(snapshot, row, timestamp);
-        }
-
-        if (key_memcache == null && key_snapshot == null) {
-          // didn't find any candidates, return null
-          return null;
-        } else if (key_memcache == null && key_snapshot != null) {
-          return key_snapshot;
-        } else if (key_memcache != null && key_snapshot == null) {
-          return key_memcache;
-        } else {
-          // if either is a precise match, return the original row.
-          if ( (key_memcache != null && key_memcache.equals(row)) 
-            || (key_snapshot != null && key_snapshot.equals(row)) ) {
-            return row;
-          } else {
-            // no precise matches, so return the one that is closer to the search
-            // key (greatest)
-            return key_memcache.compareTo(key_snapshot) > 0 ? 
-              key_memcache : key_snapshot;
-          }          
-        }
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    private Text internalGetRowKeyAtOrBefore(SortedMap<HStoreKey, byte []> map, 
-      Text key, long timestamp) {
-      // TODO: account for deleted cells
-
-      HStoreKey search_key = new HStoreKey(key, timestamp);
-
-      // get all the entries that come equal or after our search key
-      SortedMap<HStoreKey, byte []> tailMap = map.tailMap(search_key);
-
-      // if the first item in the tail has a matching row, then we have an 
-      // exact match, and we should return that item
-      if (!tailMap.isEmpty() && tailMap.firstKey().getRow().equals(key)) {
-        // seek forward past any cells that don't fulfill the timestamp
-        // argument
-        Iterator<HStoreKey> key_iterator = tailMap.keySet().iterator();
-        HStoreKey found_key = key_iterator.next();
-        
-        // keep seeking so long as we're in the same row, and the timstamp
-        // isn't as small as we'd like, and there are more cells to check
-        while (found_key.getRow().equals(key)
-          && found_key.getTimestamp() > timestamp && key_iterator.hasNext()) {
-          found_key = key_iterator.next();
-        }
-        
-        // if this check fails, then we've iterated through all the keys that 
-        // match by row, but none match by timestamp, so we fall through to
-        // the headMap case.
-        if (found_key.getTimestamp() <= timestamp) {
-          // we didn't find a key that matched by timestamp, so we have to 
-          // return null;
-/*          LOG.debug("Went searching for " + key + ", found " + found_key.getRow());*/
-          return found_key.getRow();
-        }
-      }
-      
-      // the tail didn't contain the key we're searching for, so we should
-      // use the last key in the headmap as the closest before
-      SortedMap<HStoreKey, byte []> headMap = map.headMap(search_key);
-      if (headMap.isEmpty()) {
-/*        LOG.debug("Went searching for " + key + ", found nothing!");*/
-        return null;
-      } else {
-/*        LOG.debug("Went searching for " + key + ", found " + headMap.lastKey().getRow());*/
-        return headMap.lastKey().getRow();
-      }
-    }
-    
-    /**
-     * Examine a single map for the desired key.
-     *
-     * TODO - This is kinda slow.  We need a data structure that allows for 
-     * proximity-searches, not just precise-matches.
-     * 
-     * @param map
-     * @param key
-     * @param numVersions
-     * @return Ordered list of items found in passed <code>map</code>.  If no
-     * matching values, returns an empty list (does not return null).
-     */
-    private ArrayList<byte []> internalGet(
-        final SortedMap<HStoreKey, byte []> map, final HStoreKey key,
-        final int numVersions) {
-
-      ArrayList<byte []> result = new ArrayList<byte []>();
-      // TODO: If get is of a particular version -- numVersions == 1 -- we
-      // should be able to avoid all of the tailmap creations and iterations
-      // below.
-      SortedMap<HStoreKey, byte []> tailMap = map.tailMap(key);
-      for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
-        HStoreKey itKey = es.getKey();
-        if (itKey.matchesRowCol(key)) {
-          if (!HLogEdit.isDeleted(es.getValue())) { 
-            result.add(tailMap.get(itKey));
-          }
-        }
-        if (numVersions > 0 && result.size() >= numVersions) {
-          break;
-        }
-      }
-      return result;
-    }
-
-    /**
-     * Get <code>versions</code> keys matching the origin key's
-     * row/column/timestamp and those of an older vintage
-     * Default access so can be accessed out of {@link HRegionServer}.
-     * @param origin Where to start searching.
-     * @param versions How many versions to return. Pass
-     * {@link HConstants.ALL_VERSIONS} to retrieve all.
-     * @return Ordered list of <code>versions</code> keys going from newest back.
-     * @throws IOException
-     */
-    List<HStoreKey> getKeys(final HStoreKey origin, final int versions) {
-      this.lock.readLock().lock();
-      try {
-        List<HStoreKey> results;
-        synchronized (memcache) {
-          results = internalGetKeys(this.memcache, origin, versions);
-        }
-        synchronized (snapshot) {
-          results.addAll(results.size(), internalGetKeys(snapshot, origin,
-              versions == HConstants.ALL_VERSIONS ? versions :
-                (versions - results.size())));
-        }
-        return results;
-        
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    /*
-     * @param origin Where to start searching.
-     * @param versions How many versions to return. Pass
-     * {@link HConstants.ALL_VERSIONS} to retrieve all.
-     * @return List of all keys that are of the same row and column and of
-     * equal or older timestamp.  If no keys, returns an empty List. Does not
-     * return null.
-     */
-    private List<HStoreKey> internalGetKeys(final SortedMap<HStoreKey, byte []> map,
-        final HStoreKey origin, final int versions) {
-
-      List<HStoreKey> result = new ArrayList<HStoreKey>();
-      SortedMap<HStoreKey, byte []> tailMap = map.tailMap(origin);
-      for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
-        HStoreKey key = es.getKey();
-    
-        // if there's no column name, then compare rows and timestamps
-        if (origin.getColumn().toString().equals("")) {
-          // if the current and origin row don't match, then we can jump
-          // out of the loop entirely.
-          if (!key.getRow().equals(origin.getRow())) {
-            break;
-          }
-          // if the rows match but the timestamp is newer, skip it so we can
-          // get to the ones we actually want.
-          if (key.getTimestamp() > origin.getTimestamp()) {
-            continue;
-          }
-        }
-        else{ // compare rows and columns
-          // if the key doesn't match the row and column, then we're done, since 
-          // all the cells are ordered.
-          if (!key.matchesRowCol(origin)) {
-            break;
-          }
-        }
-
-        if (!HLogEdit.isDeleted(es.getValue())) {
-          result.add(key);
-          if (versions != HConstants.ALL_VERSIONS && result.size() >= versions) {
-            // We have enough results.  Return.
-            break;
-          }
-        }
-      }
-      return result;
-    }
-
-
-    /**
-     * @param key
-     * @return True if an entry and its content is {@link HGlobals.deleteBytes}.
-     * Use checking values in store. On occasion the memcache has the fact that
-     * the cell has been deleted.
-     */
-    boolean isDeleted(final HStoreKey key) {
-      return HLogEdit.isDeleted(this.memcache.get(key));
-    }
-
-    /**
-     * @return a scanner over the keys in the Memcache
-     */
-    HInternalScannerInterface getScanner(long timestamp,
-        Text targetCols[], Text firstRow) throws IOException {
-
-      // Here we rely on ReentrantReadWriteLock's ability to acquire multiple
-      // locks by the same thread and to be able to downgrade a write lock to
-      // a read lock. We need to hold a lock throughout this method, but only
-      // need the write lock while creating the memcache snapshot
-      
-      this.lock.writeLock().lock(); // hold write lock during memcache snapshot
-      snapshot();                       // snapshot memcache
-      this.lock.readLock().lock();      // acquire read lock
-      this.lock.writeLock().unlock();   // downgrade to read lock
-      try {
-        // Prevent a cache flush while we are constructing the scanner
-
-        return new MemcacheScanner(timestamp, targetCols, firstRow);
-      
-      } finally {
-        this.lock.readLock().unlock();
-      }
-    }
-
-    //////////////////////////////////////////////////////////////////////////////
-    // MemcacheScanner implements the HScannerInterface.
-    // It lets the caller scan the contents of the Memcache.
-    //////////////////////////////////////////////////////////////////////////////
-
-    class MemcacheScanner extends HAbstractScanner {
-      SortedMap<HStoreKey, byte []> backingMap;
-      Iterator<HStoreKey> keyIterator;
-
-      @SuppressWarnings("unchecked")
-      MemcacheScanner(final long timestamp, final Text targetCols[],
-          final Text firstRow) throws IOException {
-
-        super(timestamp, targetCols);
-        try {
-          this.backingMap = new TreeMap<HStoreKey, byte[]>();
-          this.backingMap.putAll(snapshot);
-          this.keys = new HStoreKey[1];
-          this.vals = new byte[1][];
-
-          // Generate list of iterators
-
-          HStoreKey firstKey = new HStoreKey(firstRow);
-            if (firstRow != null && firstRow.getLength() != 0) {
-              keyIterator =
-                backingMap.tailMap(firstKey).keySet().iterator();
-
-            } else {
-              keyIterator = backingMap.keySet().iterator();
-            }
-
-            while (getNext(0)) {
-              if (!findFirstRow(0, firstRow)) {
-                continue;
-              }
-              if (columnMatch(0)) {
-                break;
-              }
-            }
-        } catch (RuntimeException ex) {
-          LOG.error("error initializing Memcache scanner: ", ex);
-          close();
-          IOException e = new IOException("error initializing Memcache scanner");
-          e.initCause(ex);
-          throw e;
-
-        } catch(IOException ex) {
-          LOG.error("error initializing Memcache scanner: ", ex);
-          close();
-          throw ex;
-        }
-      }
-
-      /**
-       * The user didn't want to start scanning at the first row. This method
-       * seeks to the requested row.
-       *
-       * @param i which iterator to advance
-       * @param firstRow seek to this row
-       * @return true if this is the first row
-       */
-      @Override
-      boolean findFirstRow(int i, Text firstRow) {
-        return firstRow.getLength() == 0 ||
-        keys[i].getRow().compareTo(firstRow) >= 0;
-      }
-
-      /**
-       * Get the next value from the specified iterator.
-       * 
-       * @param i Which iterator to fetch next value from
-       * @return true if there is more data available
-       */
-      @Override
-      boolean getNext(int i) {
-        boolean result = false;
-        while (true) {
-          if (!keyIterator.hasNext()) {
-            closeSubScanner(i);
-            break;
-          }
-          // Check key is < than passed timestamp for this scanner.
-          HStoreKey hsk = keyIterator.next();
-          if (hsk == null) {
-            throw new NullPointerException("Unexpected null key");
-          }
-          if (hsk.getTimestamp() <= this.timestamp) {
-            this.keys[i] = hsk;
-            this.vals[i] = backingMap.get(keys[i]);
-            result = true;
-            break;
-          }
-        }
-        return result;
-      }
-
-      /** Shut down an individual map iterator. */
-      @Override
-      void closeSubScanner(int i) {
-        keyIterator = null;
-        keys[i] = null;
-        vals[i] = null;
-        backingMap = null;
-      }
-
-      /** Shut down map iterators */
-      public void close() {
-        if (!scannerClosed) {
-          if(keyIterator != null) {
-            closeSubScanner(0);
-          }
-          scannerClosed = true;
-        }
-      }
-    }
-  }
-  
-  /*
-   * Regex that will work for straight filenames and for reference names.
-   * If reference, then the regex has more than just one group.  Group 1 is
-   * this files id.  Group 2 the referenced region name, etc.
-   */
-  private static Pattern REF_NAME_PARSER =
-    Pattern.compile("^(\\d+)(?:\\.(.+))?$");
-  
-  private static final String BLOOMFILTER_FILE_NAME = "filter";
-
-  final Memcache memcache = new Memcache();
-  private final Path basedir;
-  private final HRegionInfo info;
-  private final HColumnDescriptor family;
-  private final SequenceFile.CompressionType compression;
-  final FileSystem fs;
-  private final HBaseConfiguration conf;
-  private final Path filterDir;
-  final Filter bloomFilter;
-  private final Path compactionDir;
-
-  private final Integer compactLock = new Integer(0);
-  private final Integer flushLock = new Integer(0);
-
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-  final AtomicInteger activeScanners = new AtomicInteger(0);
-
-  final String storeName;
-
-  /*
-   * Sorted Map of readers keyed by sequence id (Most recent should be last in
-   * in list).
-   */
-  final SortedMap<Long, HStoreFile> storefiles =
-    Collections.synchronizedSortedMap(new TreeMap<Long, HStoreFile>());
-  
-  /*
-   * Sorted Map of readers keyed by sequence id (Most recent should be last in
-   * in list).
-   */
-  private final SortedMap<Long, MapFile.Reader> readers =
-    new TreeMap<Long, MapFile.Reader>();
-
-  private volatile long maxSeqId;
-  private final int compactionThreshold;
-  private final ReentrantReadWriteLock newScannerLock =
-    new ReentrantReadWriteLock();
-
-  /**
-   * An HStore is a set of zero or more MapFiles, which stretch backwards over 
-   * time.  A given HStore is responsible for a certain set of columns for a
-   * row in the HRegion.
-   *
-   * <p>The HRegion starts writing to its set of HStores when the HRegion's 
-   * memcache is flushed.  This results in a round of new MapFiles, one for
-   * each HStore.
-   *
-   * <p>There's no reason to consider append-logging at this level; all logging 
-   * and locking is handled at the HRegion level.  HStore just provides
-   * services to manage sets of MapFiles.  One of the most important of those
-   * services is MapFile-compaction services.
-   *
-   * <p>The only thing having to do with logs that HStore needs to deal with is
-   * the reconstructionLog.  This is a segment of an HRegion's log that might
-   * NOT be present upon startup.  If the param is NULL, there's nothing to do.
-   * If the param is non-NULL, we need to process the log to reconstruct
-   * a TreeMap that might not have been written to disk before the process
-   * died.
-   *
-   * <p>It's assumed that after this constructor returns, the reconstructionLog
-   * file will be deleted (by whoever has instantiated the HStore).
-   *
-   * @param basedir qualified path under which the region directory lives
-   * @param info HRegionInfo for this region
-   * @param family HColumnDescriptor for this column
-   * @param fs file system object
-   * @param reconstructionLog existing log file to apply if any
-   * @param conf configuration object
-   * @throws IOException
-   */
-  HStore(Path basedir, HRegionInfo info, HColumnDescriptor family,
-      FileSystem fs, Path reconstructionLog, HBaseConfiguration conf)
-      throws IOException {  
-    
-    this.basedir = basedir;
-    this.info = info;
-    this.family = family;
-    this.fs = fs;
-    this.conf = conf;
-    
-    this.compactionDir = HRegion.getCompactionDir(basedir);
-    this.storeName =
-      this.info.getEncodedName() + "/" + this.family.getFamilyName();
-    
-    if (family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) {
-      this.compression = SequenceFile.CompressionType.BLOCK;
-    } else if (family.getCompression() ==
-      HColumnDescriptor.CompressionType.RECORD) {
-      this.compression = SequenceFile.CompressionType.RECORD;
-    } else {
-      this.compression = SequenceFile.CompressionType.NONE;
-    }
-    
-    Path mapdir = HStoreFile.getMapDir(basedir, info.getEncodedName(),
-        family.getFamilyName());
-    if (!fs.exists(mapdir)) {
-      fs.mkdirs(mapdir);
-    }
-    Path infodir = HStoreFile.getInfoDir(basedir, info.getEncodedName(),
-        family.getFamilyName());
-    if (!fs.exists(infodir)) {
-      fs.mkdirs(infodir);
-    }
-    
-    if(family.getBloomFilter() == null) {
-      this.filterDir = null;
-      this.bloomFilter = null;
-    } else {
-      this.filterDir = HStoreFile.getFilterDir(basedir, info.getEncodedName(),
-          family.getFamilyName());
-      if (!fs.exists(filterDir)) {
-        fs.mkdirs(filterDir);
-      }
-      this.bloomFilter = loadOrCreateBloomFilter();
-    }
-
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("starting " + storeName +
-          ((reconstructionLog == null || !fs.exists(reconstructionLog)) ?
-          " (no reconstruction log)" :
-            " with reconstruction log: " + reconstructionLog.toString()));
-    }
-
-    // Go through the 'mapdir' and 'infodir' together, make sure that all 
-    // MapFiles are in a reliable state.  Every entry in 'mapdir' must have a 
-    // corresponding one in 'loginfodir'. Without a corresponding log info
-    // file, the entry in 'mapdir' must be deleted.
-    List<HStoreFile> hstoreFiles = loadHStoreFiles(infodir, mapdir);
-    for(HStoreFile hsf: hstoreFiles) {
-      this.storefiles.put(Long.valueOf(hsf.loadInfo(fs)), hsf);
-    }
-
-    // Now go through all the HSTORE_LOGINFOFILEs and figure out the
-    // most-recent log-seq-ID that's present.  The most-recent such ID means we
-    // can ignore all log messages up to and including that ID (because they're
-    // already reflected in the TreeMaps).
-    //
-    // If the HSTORE_LOGINFOFILE doesn't contain a number, just ignore it. That
-    // means it was built prior to the previous run of HStore, and so it cannot 
-    // contain any updates also contained in the log.
-    
-    this.maxSeqId = getMaxSequenceId(hstoreFiles);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("maximum sequence id for hstore " + storeName + " is " +
-          this.maxSeqId);
-    }
-    
-    doReconstructionLog(reconstructionLog, maxSeqId);
-
-    // By default, we compact if an HStore has more than
-    // MIN_COMMITS_FOR_COMPACTION map files
-    this.compactionThreshold =
-      conf.getInt("hbase.hstore.compactionThreshold", 3);
-    
-    // We used to compact in here before bringing the store online.  Instead
-    // get it online quick even if it needs compactions so we can start
-    // taking updates as soon as possible (Once online, can take updates even
-    // during a compaction).
-
-    // Move maxSeqId on by one. Why here?  And not in HRegion?
-    this.maxSeqId += 1;
-    
-    // Finally, start up all the map readers! (There should be just one at this 
-    // point, as we've compacted them all.)
-    for(Map.Entry<Long, HStoreFile> e: this.storefiles.entrySet()) {
-      this.readers.put(e.getKey(),
-        e.getValue().getReader(this.fs, this.bloomFilter));
-    }
-  }
-  
-  /* 
-   * @param hstoreFiles
-   * @return Maximum sequence number found or -1.
-   * @throws IOException
-   */
-  private long getMaxSequenceId(final List<HStoreFile> hstoreFiles)
-  throws IOException {
-    long maxSeqID = -1;
-    for (HStoreFile hsf : hstoreFiles) {
-      long seqid = hsf.loadInfo(fs);
-      if (seqid > 0) {
-        if (seqid > maxSeqID) {
-          maxSeqID = seqid;
-        }
-      }
-    }
-    return maxSeqID;
-  }
-  
-  long getMaxSequenceId() {
-    return this.maxSeqId;
-  }
-  
-  /*
-   * Read the reconstructionLog to see whether we need to build a brand-new 
-   * MapFile out of non-flushed log entries.  
-   *
-   * We can ignore any log message that has a sequence ID that's equal to or 
-   * lower than maxSeqID.  (Because we know such log messages are already 
-   * reflected in the MapFiles.)
-   */
-  private void doReconstructionLog(final Path reconstructionLog,
-      final long maxSeqID) throws UnsupportedEncodingException, IOException {
-    
-    if (reconstructionLog == null || !fs.exists(reconstructionLog)) {
-      // Nothing to do.
-      return;
-    }
-    long maxSeqIdInLog = -1;
-    TreeMap<HStoreKey, byte []> reconstructedCache =
-      new TreeMap<HStoreKey, byte []>();
-      
-    SequenceFile.Reader logReader = new SequenceFile.Reader(this.fs,
-        reconstructionLog, this.conf);
-    
-    try {
-      HLogKey key = new HLogKey();
-      HLogEdit val = new HLogEdit();
-      long skippedEdits = 0;
-      while (logReader.next(key, val)) {
-        maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
-        if (key.getLogSeqNum() <= maxSeqID) {
-          skippedEdits++;
-          continue;
-        }
-        if (skippedEdits > 0 && LOG.isDebugEnabled()) {
-          LOG.debug("Skipped " + skippedEdits +
-            " edits because sequence id <= " + maxSeqID);
-        }
-        // Check this edit is for me. Also, guard against writing
-        // METACOLUMN info such as HBASE::CACHEFLUSH entries
-        Text column = val.getColumn();
-        if (column.equals(HLog.METACOLUMN)
-            || !key.getRegionName().equals(info.getRegionName())
-            || !HStoreKey.extractFamily(column).equals(family.getFamilyName())) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Passing on edit " + key.getRegionName() + ", " +
-                column.toString() + ": " + 
-                new String(val.getVal(), UTF8_ENCODING) +
-                ", my region: " + info.getRegionName() + ", my column: " +
-                family.getFamilyName());
-          }
-          continue;
-        }
-        HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Applying edit <" + k.toString() + "=" + val.toString() +
-              ">");
-        }
-        reconstructedCache.put(k, val.getVal());
-      }
-    } finally {
-      logReader.close();
-    }
-    
-    if (reconstructedCache.size() > 0) {
-      // We create a "virtual flush" at maxSeqIdInLog+1.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("flushing reconstructionCache");
-      }
-      internalFlushCache(reconstructedCache, maxSeqIdInLog + 1);
-    }
-  }
-  
-  /*
-   * Creates a series of HStoreFiles loaded from the given directory.
-   * There must be a matching 'mapdir' and 'loginfo' pair of files.
-   * If only one exists, we'll delete it.
-   *
-   * @param infodir qualified path for info file directory
-   * @param mapdir qualified path for map file directory
-   * @throws IOException
-   */
-  private List<HStoreFile> loadHStoreFiles(Path infodir, Path mapdir)
-  throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("infodir: " + infodir.toString() + " mapdir: " +
-          mapdir.toString());
-    }
-    // Look first at info files.  If a reference, these contain info we need
-    // to create the HStoreFile.
-    Path infofiles[] = fs.listPaths(new Path[] {infodir});
-    ArrayList<HStoreFile> results = new ArrayList<HStoreFile>(infofiles.length);
-    ArrayList<Path> mapfiles = new ArrayList<Path>(infofiles.length);
-    for (Path p: infofiles) {
-      Matcher m = REF_NAME_PARSER.matcher(p.getName());
-      /*
-       *  *  *  *  *  N O T E  *  *  *  *  *
-       *  
-       *  We call isReference(Path, Matcher) here because it calls
-       *  Matcher.matches() which must be called before Matcher.group(int)
-       *  and we don't want to call Matcher.matches() twice.
-       *  
-       *  *  *  *  *  N O T E  *  *  *  *  *
-       */
-      boolean isReference = isReference(p, m);
-      long fid = Long.parseLong(m.group(1));
-
-      HStoreFile curfile = null;
-      HStoreFile.Reference reference = null;
-      if (isReference) {
-        reference = readSplitInfo(p, fs);
-      }
-      curfile = new HStoreFile(conf, fs, basedir, info.getEncodedName(),
-          family.getFamilyName(), fid, reference);
-      Path mapfile = curfile.getMapFilePath();
-      if (!fs.exists(mapfile)) {
-        fs.delete(curfile.getInfoFilePath());
-        LOG.warn("Mapfile " + mapfile.toString() + " does not exist. " +
-          "Cleaned up info file.  Continuing...");
-        continue;
-      }
-      
-      // TODO: Confirm referent exists.
-      
-      // Found map and sympathetic info file.  Add this hstorefile to result.
-      results.add(curfile);
-      // Keep list of sympathetic data mapfiles for cleaning info dir in next
-      // section.  Make sure path is fully qualified for compare.
-      mapfiles.add(mapfile);
-    }
-    
-    // List paths by experience returns fully qualified names -- at least when
-    // running on a mini hdfs cluster.
-    Path datfiles[] = fs.listPaths(new Path[] {mapdir});
-    for (int i = 0; i < datfiles.length; i++) {
-      // If does not have sympathetic info file, delete.
-      if (!mapfiles.contains(fs.makeQualified(datfiles[i]))) {
-        fs.delete(datfiles[i]);
-      }
-    }
-    return results;
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // Bloom filters
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Called by constructor if a bloom filter is enabled for this column family.
-   * If the HStore already exists, it will read in the bloom filter saved
-   * previously. Otherwise, it will create a new bloom filter.
-   */
-  private Filter loadOrCreateBloomFilter() throws IOException {
-    Path filterFile = new Path(filterDir, BLOOMFILTER_FILE_NAME);
-    Filter bloomFilter = null;
-    if(fs.exists(filterFile)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("loading bloom filter for " + this.storeName);
-      }
-      
-      BloomFilterDescriptor.BloomFilterType type =
-        family.getBloomFilter().filterType;
-
-      switch(type) {
-      
-      case BLOOMFILTER:
-        bloomFilter = new BloomFilter();
-        break;
-        
-      case COUNTING_BLOOMFILTER:
-        bloomFilter = new CountingBloomFilter();
-        break;
-        
-      case RETOUCHED_BLOOMFILTER:
-        bloomFilter = new RetouchedBloomFilter();
-        break;
-      
-      default:
-        throw new IllegalArgumentException("unknown bloom filter type: " +
-            type);
-      }
-      FSDataInputStream in = fs.open(filterFile);
-      try {
-        bloomFilter.readFields(in);
-      } finally {
-        fs.close();
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("creating bloom filter for " + this.storeName);
-      }
-
-      BloomFilterDescriptor.BloomFilterType type =
-        family.getBloomFilter().filterType;
-
-      switch(type) {
-      
-      case BLOOMFILTER:
-        bloomFilter = new BloomFilter(family.getBloomFilter().vectorSize,
-            family.getBloomFilter().nbHash);
-        break;
-        
-      case COUNTING_BLOOMFILTER:
-        bloomFilter =
-          new CountingBloomFilter(family.getBloomFilter().vectorSize,
-            family.getBloomFilter().nbHash);
-        break;
-        
-      case RETOUCHED_BLOOMFILTER:
-        bloomFilter =
-          new RetouchedBloomFilter(family.getBloomFilter().vectorSize,
-            family.getBloomFilter().nbHash);
-      }
-    }
-    return bloomFilter;
-  }
-
-  /**
-   * Flushes bloom filter to disk
-   * 
-   * @throws IOException
-   */
-  private void flushBloomFilter() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("flushing bloom filter for " + this.storeName);
-    }
-    FSDataOutputStream out =
-      fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
-    try {
-      bloomFilter.write(out);
-    } finally {
-      out.close();
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("flushed bloom filter for " + this.storeName);
-    }
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // End bloom filters
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Adds a value to the memcache
-   * 
-   * @param key
-   * @param value
-   */
-  void add(HStoreKey key, byte[] value) {
-    lock.readLock().lock();
-    try {
-      this.memcache.add(key, value);
-      
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-  
-  /**
-   * Close all the MapFile readers
-   * 
-   * We don't need to worry about subsequent requests because the HRegion holds
-   * a write lock that will prevent any more reads or writes.
-   * 
-   * @throws IOException
-   */
-  List<HStoreFile> close() throws IOException {
-    ArrayList<HStoreFile> result = null;
-    this.lock.writeLock().lock();
-    try {
-      for (MapFile.Reader reader: this.readers.values()) {
-        reader.close();
-      }
-      this.readers.clear();
-      result = new ArrayList<HStoreFile>(storefiles.values());
-      this.storefiles.clear();
-      LOG.debug("closed " + this.storeName);
-      return result;
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-  }
-
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Flush changes to disk
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Prior to doing a cache flush, we need to snapshot the memcache. Locking is
-   * handled by the memcache.
-   */
-  void snapshotMemcache() {
-    this.memcache.snapshot();
-  }
-  
-  /**
-   * Write out a brand-new set of items to the disk.
-   *
-   * We should only store key/vals that are appropriate for the data-columns 
-   * stored in this HStore.
-   *
-   * Also, we are not expecting any reads of this MapFile just yet.
-   *
-   * Return the entire list of HStoreFiles currently used by the HStore.
-   *
-   * @param logCacheFlushId flush sequence number
-   * @throws IOException
-   */
-  void flushCache(final long logCacheFlushId) throws IOException {
-      internalFlushCache(memcache.getSnapshot(), logCacheFlushId);
-  }
-  
-  private void internalFlushCache(SortedMap<HStoreKey, byte []> cache,
-      long logCacheFlushId) throws IOException {
-    
-    synchronized(flushLock) {
-      // A. Write the Maps out to the disk
-      HStoreFile flushedFile = new HStoreFile(conf, fs, basedir,
-        info.getEncodedName(), family.getFamilyName(), -1L, null);
-      String name = flushedFile.toString();
-      MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
-        this.bloomFilter);
-      
-      // Here we tried picking up an existing HStoreFile from disk and
-      // interlacing the memcache flush compacting as we go.  The notion was
-      // that interlacing would take as long as a pure flush with the added
-      // benefit of having one less file in the store.  Experiments showed that
-      // it takes two to three times the amount of time flushing -- more column
-      // families makes it so the two timings come closer together -- but it
-      // also complicates the flush. The code was removed.  Needed work picking
-      // which file to interlace (favor references first, etc.)
-      //
-      // Related, looks like 'merging compactions' in BigTable paper interlaces
-      // a memcache flush.  We don't.
-      int entries = 0;
-      try {
-        for (Map.Entry<HStoreKey, byte []> es: cache.entrySet()) {
-          HStoreKey curkey = es.getKey();
-          TextSequence f = HStoreKey.extractFamily(curkey.getColumn());
-          if (f.equals(this.family.getFamilyName())) {
-            entries++;
-            out.append(curkey, new ImmutableBytesWritable(es.getValue()));
-          }
-        }
-      } finally {
-        out.close();
-      }
-
-      // B. Write out the log sequence number that corresponds to this output
-      // MapFile.  The MapFile is current up to and including the log seq num.
-      flushedFile.writeInfo(fs, logCacheFlushId);
-      
-      // C. Flush the bloom filter if any
-      if (bloomFilter != null) {
-        flushBloomFilter();
-      }
-
-      // D. Finally, make the new MapFile available.
-      this.lock.writeLock().lock();
-      try {
-        Long flushid = Long.valueOf(logCacheFlushId);
-        // Open the map file reader.
-        this.readers.put(flushid,
-            flushedFile.getReader(this.fs, this.bloomFilter));
-        this.storefiles.put(flushid, flushedFile);
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Added " + name + " with " + entries +
-            " entries, sequence id " + logCacheFlushId + ", and size " +
-            StringUtils.humanReadableInt(flushedFile.length()) + " for " +
-            this.storeName);
-        }
-      } finally {
-        this.lock.writeLock().unlock();
-      }
-      return;
-    }
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Compaction
-  //////////////////////////////////////////////////////////////////////////////
-  
-  /**
-   * @return True if this store needs compaction.
-   */
-  boolean needsCompaction() {
-    return this.storefiles != null &&
-      (this.storefiles.size() >= this.compactionThreshold || hasReferences());
-  }
-  
-  /*
-   * @return True if this store has references.
-   */
-  private boolean hasReferences() {
-    if (this.storefiles != null) {
-      for (HStoreFile hsf: this.storefiles.values()) {
-        if (hsf.isReference()) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Compact the back-HStores.  This method may take some time, so the calling 
-   * thread must be able to block for long periods.
-   * 
-   * <p>During this time, the HStore can work as usual, getting values from
-   * MapFiles and writing new MapFiles from the Memcache.
-   * 
-   * Existing MapFiles are not destroyed until the new compacted TreeMap is 
-   * completely written-out to disk.
-   *
-   * The compactLock prevents multiple simultaneous compactions.
-   * The structureLock prevents us from interfering with other write operations.
-   * 
-   * We don't want to hold the structureLock for the whole time, as a compact() 
-   * can be lengthy and we want to allow cache-flushes during this period.
-   * @throws IOException
-   * 
-   * @return true if compaction completed successfully
-   */
-  boolean compact() throws IOException {
-    synchronized (compactLock) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("started compaction of " + storefiles.size() +
-          " files using " + compactionDir.toString() + " for " +
-          this.storeName);
-      }
-
-      // Storefiles are keyed by sequence id. The oldest file comes first.
-      // We need to return out of here a List that has the newest file first.
-      List<HStoreFile> filesToCompact =
-        new ArrayList<HStoreFile>(this.storefiles.values());
-      Collections.reverse(filesToCompact);
-      if (filesToCompact.size() < 1 ||
-        (filesToCompact.size() == 1 && !filesToCompact.get(0).isReference())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("nothing to compact for " + this.storeName);
-        }
-        return false;
-      }
-
-      if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
-        LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
-        return false;
-      }
-
-      // Step through them, writing to the brand-new MapFile
-      HStoreFile compactedOutputFile = new HStoreFile(conf, fs, 
-        this.compactionDir, info.getEncodedName(), family.getFamilyName(),
-        -1L, null);
-      MapFile.Writer compactedOut = compactedOutputFile.getWriter(this.fs,
-        this.compression, this.bloomFilter);
-      try {
-        compactHStoreFiles(compactedOut, filesToCompact);
-      } finally {
-        compactedOut.close();
-      }
-
-      // Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
-      // Compute max-sequenceID seen in any of the to-be-compacted TreeMaps.
-      long maxId = getMaxSequenceId(filesToCompact);
-      compactedOutputFile.writeInfo(fs, maxId);
-
-      // Move the compaction into place.
-      completeCompaction(filesToCompact, compactedOutputFile);
-      return true;
-    }
-  }
-  
-  /*
-   * Compact passed <code>toCompactFiles</code> into <code>compactedOut</code>.
-   * We create a new set of MapFile.Reader objects so we don't screw up the
-   * caching associated with the currently-loaded ones. Our iteration-based
-   * access pattern is practically designed to ruin the cache.
-   * 
-   * We work by opening a single MapFile.Reader for each file, and iterating
-   * through them in parallel. We always increment the lowest-ranked one.
-   * Updates to a single row/column will appear ranked by timestamp. This allows
-   * us to throw out deleted values or obsolete versions. @param compactedOut
-   * @param toCompactFiles @throws IOException
-   */
-  private void compactHStoreFiles(final MapFile.Writer compactedOut,
-      final List<HStoreFile> toCompactFiles) throws IOException {
-    
-    int size = toCompactFiles.size();
-    CompactionReader[] rdrs = new CompactionReader[size];
-    int index = 0;
-    for (HStoreFile hsf: toCompactFiles) {
-      try {
-        rdrs[index++] =
-          new MapFileCompactionReader(hsf.getReader(fs, bloomFilter));
-      } catch (IOException e) {
-        // Add info about which file threw exception. It may not be in the
-        // exception message so output a message here where we know the
-        // culprit.
-        LOG.warn("Failed with " + e.toString() + ": " + hsf.toString() +
-          (hsf.isReference() ? " " + hsf.getReference().toString() : "") +
-          " for " + this.storeName);
-        closeCompactionReaders(rdrs);
-        throw e;
-      }
-    }
-    try {
-      HStoreKey[] keys = new HStoreKey[rdrs.length];
-      ImmutableBytesWritable[] vals = new ImmutableBytesWritable[rdrs.length];
-      boolean[] done = new boolean[rdrs.length];
-      for(int i = 0; i < rdrs.length; i++) {
-        keys[i] = new HStoreKey();
-        vals[i] = new ImmutableBytesWritable();
-        done[i] = false;
-      }
-
-      // Now, advance through the readers in order.  This will have the
-      // effect of a run-time sort of the entire dataset.
-      int numDone = 0;
-      for(int i = 0; i < rdrs.length; i++) {
-        rdrs[i].reset();
-        done[i] = ! rdrs[i].next(keys[i], vals[i]);
-        if(done[i]) {
-          numDone++;
-        }
-      }
-
-      int timesSeen = 0;
-      Text lastRow = new Text();
-      Text lastColumn = new Text();
-      // Map of a row deletes keyed by column with a list of timestamps for value
-      Map<Text, List<Long>> deletes = null;
-      while (numDone < done.length) {
-        // Find the reader with the smallest key.  If two files have same key
-        // but different values -- i.e. one is delete and other is non-delete
-        // value -- we will find the first, the one that was written later and
-        // therefore the one whose value should make it out to the compacted
-        // store file.
-        int smallestKey = -1;
-        for(int i = 0; i < rdrs.length; i++) {
-          if(done[i]) {
-            continue;
-          }
-          if(smallestKey < 0) {
-            smallestKey = i;
-          } else {
-            if(keys[i].compareTo(keys[smallestKey]) < 0) {
-              smallestKey = i;
-            }
-          }
-        }
-
-        // Reflect the current key/val in the output
-        HStoreKey sk = keys[smallestKey];
-        if(lastRow.equals(sk.getRow())
-            && lastColumn.equals(sk.getColumn())) {
-          timesSeen++;
-        } else {
-          timesSeen = 1;
-          // We are on to a new row.  Create a new deletes list.
-          deletes = new HashMap<Text, List<Long>>();
-        }
-
-        byte [] value = (vals[smallestKey] == null)?
-          null: vals[smallestKey].get();
-        if (!isDeleted(sk, value, false, deletes) &&
-            timesSeen <= family.getMaxVersions()) {
-          // Keep old versions until we have maxVersions worth.
-          // Then just skip them.
-          if (sk.getRow().getLength() != 0 && sk.getColumn().getLength() != 0) {
-            // Only write out objects which have a non-zero length key and
-            // value
-            compactedOut.append(sk, vals[smallestKey]);
-          }
-        }
-
-        // Update last-seen items
-        lastRow.set(sk.getRow());
-        lastColumn.set(sk.getColumn());
-
-        // Advance the smallest key.  If that reader's all finished, then 
-        // mark it as done.
-        if(!rdrs[smallestKey].next(keys[smallestKey],
-            vals[smallestKey])) {
-          done[smallestKey] = true;
-          rdrs[smallestKey].close();
-          rdrs[smallestKey] = null;
-          numDone++;
-        }
-      }
-    } finally {
-      closeCompactionReaders(rdrs);
-    }
-  }
-  
-  private void closeCompactionReaders(final CompactionReader [] rdrs) {
-    for (int i = 0; i < rdrs.length; i++) {
-      if (rdrs[i] != null) {
-        try {
-          rdrs[i].close();
-        } catch (IOException e) {
-          LOG.warn("Exception closing reader for " + this.storeName, e);
-        }
-      }
-    }
-  }
-
-  /** Interface for generic reader for compactions */
-  interface CompactionReader {
-    
-    /**
-     * Closes the reader
-     * @throws IOException
-     */
-    public void close() throws IOException;
-    
-    /**
-     * Get the next key/value pair
-     * 
-     * @param key
-     * @param val
-     * @return true if more data was returned
-     * @throws IOException
-     */
-    public boolean next(WritableComparable key, Writable val)
-    throws IOException;
-    
-    /**
-     * Resets the reader
-     * @throws IOException
-     */
-    public void reset() throws IOException;
-  }
-
-  /** A compaction reader for MapFile */
-  static class MapFileCompactionReader implements CompactionReader {
-    final MapFile.Reader reader;
-    
-    MapFileCompactionReader(final MapFile.Reader r) {
-      this.reader = r;
-    }
-    
-    /** {@inheritDoc} */
-    public void close() throws IOException {
-      this.reader.close();
-    }
-
-    /** {@inheritDoc} */
-    public boolean next(WritableComparable key, Writable val)
-    throws IOException {
-      return this.reader.next(key, val);
-    }
-
-    /** {@inheritDoc} */
-    public void reset() throws IOException {
-      this.reader.reset();
-    }
-  }
-
-  /*
-   * Check if this is cell is deleted.
-   * If a memcache and a deletes, check key does not have an entry filled.
-   * Otherwise, check value is not the <code>HGlobals.deleteBytes</code> value.
-   * If passed value IS deleteBytes, then it is added to the passed
-   * deletes map.
-   * @param hsk
-   * @param value
-   * @param checkMemcache true if the memcache should be consulted
-   * @param deletes Map keyed by column with a value of timestamp. Can be null.
-   * If non-null and passed value is HGlobals.deleteBytes, then we add to this
-   * map.
-   * @return True if this is a deleted cell.  Adds the passed deletes map if
-   * passed value is HGlobals.deleteBytes.
-  */
-  private boolean isDeleted(final HStoreKey hsk, final byte [] value,
-      final boolean checkMemcache, final Map<Text, List<Long>> deletes) {
-    if (checkMemcache && memcache.isDeleted(hsk)) {
-      return true;
-    }
-    List<Long> timestamps =
-      (deletes == null) ? null: deletes.get(hsk.getColumn());
-    if (timestamps != null &&
-        timestamps.contains(Long.valueOf(hsk.getTimestamp()))) {
-      return true;
-    }
-    if (value == null) {
-      // If a null value, shouldn't be in here.  Mark it as deleted cell.
-      return true;
-    }
-    if (!HLogEdit.isDeleted(value)) {
-      return false;
-    }
-    // Cell has delete value.  Save it into deletes.
-    if (deletes != null) {
-      if (timestamps == null) {
-        timestamps = new ArrayList<Long>();
-        deletes.put(hsk.getColumn(), timestamps);
-      }
-      // We know its not already in the deletes array else we'd have returned
-      // earlier so no need to test if timestamps already has this value.
-      timestamps.add(Long.valueOf(hsk.getTimestamp()));
-    }
-    return true;
-  }
-  
-  /*
-   * It's assumed that the compactLock  will be acquired prior to calling this 
-   * method!  Otherwise, it is not thread-safe!
-   *
-   * It works by processing a compaction that's been written to disk.
-   * 
-   * <p>It is usually invoked at the end of a compaction, but might also be
-   * invoked at HStore startup, if the prior execution died midway through.
-   * 
-   * <p>Moving the compacted TreeMap into place means:
-   * <pre>
-   * 1) Wait for active scanners to exit
-   * 2) Acquiring the write-lock
-   * 3) Figuring out what MapFiles are going to be replaced
-   * 4) Moving the new compacted MapFile into place
-   * 5) Unloading all the replaced MapFiles.
-   * 6) Deleting all the old MapFile files.
-   * 7) Loading the new TreeMap.
-   * 8) Releasing the write-lock
-   * 9) Allow new scanners to proceed.
-   * </pre>
-   * 
-   * @param compactedFiles list of files that were compacted
-   * @param compactedFile HStoreFile that is the result of the compaction
-   * @throws IOException
-   */
-  private void completeCompaction(List<HStoreFile> compactedFiles,
-      HStoreFile compactedFile) throws IOException {
-    
-    // 1. Wait for active scanners to exit
-    
-    newScannerLock.writeLock().lock();                  // prevent new scanners
-    try {
-      synchronized (activeScanners) {
-        while (activeScanners.get() != 0) {
-          try {
-            activeScanners.wait();
-          } catch (InterruptedException e) {
-            // continue
-          }
-        }
-
-        // 2. Acquiring the HStore write-lock
-        this.lock.writeLock().lock();
-      }
-
-      try {
-        // 3. Moving the new MapFile into place.
-        
-        HStoreFile finalCompactedFile = new HStoreFile(conf, fs, basedir,
-            info.getEncodedName(), family.getFamilyName(), -1, null);
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("moving " + compactedFile.toString() + " in " +
-              this.compactionDir.toString() + " to " +
-              finalCompactedFile.toString() + " in " + basedir.toString() +
-              " for " + this.storeName);
-        }
-        if (!compactedFile.rename(this.fs, finalCompactedFile)) {
-          LOG.error("Failed move of compacted file " +
-              finalCompactedFile.toString() + " for " + this.storeName);
-          return;
-        }
-
-        // 4. and 5. Unload all the replaced MapFiles, close and delete.
-        
-        List<Long> toDelete = new ArrayList<Long>();
-        for (Map.Entry<Long, HStoreFile> e: this.storefiles.entrySet()) {
-          if (!compactedFiles.contains(e.getValue())) {
-            continue;
-          }
-          Long key = e.getKey();
-          MapFile.Reader reader = this.readers.remove(key);
-          if (reader != null) {
-            reader.close();
-          }
-          toDelete.add(key);
-        }
-
-        try {
-          for (Long key: toDelete) {
-            HStoreFile hsf = this.storefiles.remove(key);
-            hsf.delete();
-          }
-
-          // 6. Loading the new TreeMap.
-          Long orderVal = Long.valueOf(finalCompactedFile.loadInfo(fs));
-          this.readers.put(orderVal,
-            finalCompactedFile.getReader(this.fs, this.bloomFilter));
-          this.storefiles.put(orderVal, finalCompactedFile);
-        } catch (IOException e) {
-          e = RemoteExceptionHandler.checkIOException(e);
-          LOG.error("Failed replacing compacted files for " + this.storeName +
-              ". Compacted file is " + finalCompactedFile.toString() +
-              ".  Files replaced are " + compactedFiles.toString() +
-              " some of which may have been already removed", e);
-        }
-      } finally {
-        // 7. Releasing the write-lock
-        this.lock.writeLock().unlock();
-      }
-    } finally {
-      // 8. Allow new scanners to proceed.
-      newScannerLock.writeLock().unlock();
-    }
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Accessors.  
-  // (This is the only section that is directly useful!)
-  //////////////////////////////////////////////////////////////////////////////
-  
-  /**
-   * Return all the available columns for the given key.  The key indicates a 
-   * row and timestamp, but not a column name.
-   *
-   * The returned object should map column names to byte arrays (byte[]).
-   */
-  void getFull(HStoreKey key, TreeMap<Text, byte []> results)
-    throws IOException {
-    Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
-    
-    if (key == null) {
-      return;
-    }
-    
-    this.lock.readLock().lock();
-    memcache.getFull(key, results);
-    try {
-      MapFile.Reader[] maparray = getReaders();
-      for (int i = maparray.length - 1; i >= 0; i--) {
-        MapFile.Reader map = maparray[i];
-        synchronized(map) {
-          map.reset();
-          ImmutableBytesWritable readval = new ImmutableBytesWritable();
-          HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
-          if (readkey == null) {
-            continue;
-          }
-          do {
-            Text readcol = readkey.getColumn();
-            if (results.get(readcol) == null
-                && key.matchesWithoutColumn(readkey)) {
-              if(isDeleted(readkey, readval.get(), true, deletes)) {
-                break;
-              }
-              results.put(new Text(readcol), readval.get());
-              readval = new ImmutableBytesWritable();
-            } else if(key.getRow().compareTo(readkey.getRow()) < 0) {
-              break;
-            }
-            
-          } while(map.next(readkey, readval));
-        }
-      }
-      
-    } finally {
-      this.lock.readLock().unlock();
-    }
-  }
-  
-  MapFile.Reader [] getReaders() {
-    return this.readers.values().
-      toArray(new MapFile.Reader[this.readers.size()]);
-  }
-
-  /**
-   * Get the value for the indicated HStoreKey.  Grab the target value and the 
-   * previous 'numVersions-1' values, as well.
-   *
-   * If 'numVersions' is negative, the method returns all available versions.
-   * @param key
-   * @param numVersions Number of versions to fetch.  Must be > 0.
-   * @return values for the specified versions
-   * @throws IOException
-   */
-  byte [][] get(HStoreKey key, int numVersions) throws IOException {
-    if (numVersions <= 0) {
-      throw new IllegalArgumentException("Number of versions must be > 0");
-    }
-    
-    this.lock.readLock().lock();
-    try {
-      // Check the memcache
-      List<byte[]> results = this.memcache.get(key, numVersions);
-      // If we got sufficient versions from memcache, return.
-      if (results.size() == numVersions) {
-        return ImmutableBytesWritable.toArray(results);
-      }
-
-      // Keep a list of deleted cell keys.  We need this because as we go through
-      // the store files, the cell with the delete marker may be in one file and
-      // the old non-delete cell value in a later store file. If we don't keep
-      // around the fact that the cell was deleted in a newer record, we end up
-      // returning the old value if user is asking for more than one version.
-      // This List of deletes should not large since we are only keeping rows
-      // and columns that match those set on the scanner and which have delete
-      // values.  If memory usage becomes an issue, could redo as bloom filter.
-      Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
-      // This code below is very close to the body of the getKeys method.
-      MapFile.Reader[] maparray = getReaders();
-      for(int i = maparray.length - 1; i >= 0; i--) {
-        MapFile.Reader map = maparray[i];
-        synchronized(map) {
-          map.reset();
-          ImmutableBytesWritable readval = new ImmutableBytesWritable();
-          HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
-          if (readkey == null) {
-            // map.getClosest returns null if the passed key is > than the
-            // last key in the map file.  getClosest is a bit of a misnomer
-            // since it returns exact match or the next closest key AFTER not
-            // BEFORE.
-            continue;
-          }
-          if (!readkey.matchesRowCol(key)) {
-            continue;
-          }
-          if (!isDeleted(readkey, readval.get(), true, deletes)) {
-            results.add(readval.get());
-            // Perhaps only one version is wanted.  I could let this
-            // test happen later in the for loop test but it would cost
-            // the allocation of an ImmutableBytesWritable.
-            if (hasEnoughVersions(numVersions, results)) {
-              break;
-            }
-          }
-          for (readval = new ImmutableBytesWritable();
-              map.next(readkey, readval) &&
-              readkey.matchesRowCol(key) &&
-              !hasEnoughVersions(numVersions, results);
-              readval = new ImmutableBytesWritable()) {
-            if (!isDeleted(readkey, readval.get(), true, deletes)) {
-              results.add(readval.get());
-            }
-          }
-        }
-        if (hasEnoughVersions(numVersions, results)) {
-          break;
-        }
-      }
-      return results.size() == 0 ?
-        null : ImmutableBytesWritable.toArray(results);
-    } finally {
-      this.lock.readLock().unlock();
-    }
-  }
-  
-  private boolean hasEnoughVersions(final int numVersions,
-      final List<byte []> results) {
-    return numVersions > 0 && results.size() >= numVersions;
-  }
-
-  /**
-   * Get <code>versions</code> keys matching the origin key's
-   * row/column/timestamp and those of an older vintage
-   * Default access so can be accessed out of {@link HRegionServer}.
-   * @param origin Where to start searching.
-   * @param versions How many versions to return. Pass
-   * {@link HConstants.ALL_VERSIONS} to retrieve all. Versions will include
-   * size of passed <code>allKeys</code> in its count.
-   * @param allKeys List of keys prepopulated by keys we found in memcache.
-   * This method returns this passed list with all matching keys found in
-   * stores appended.
-   * @return The passed <code>allKeys</code> with <code>versions</code> of
-   * matching keys found in store files appended.
-   * @throws IOException
-   */
-  List<HStoreKey> getKeys(final HStoreKey origin, final int versions)
-    throws IOException {
-    
-    List<HStoreKey> keys = this.memcache.getKeys(origin, versions);
-    if (versions != ALL_VERSIONS && keys.size() >= versions) {
-      return keys;
-    }
-    
-    // This code below is very close to the body of the get method.
-    this.lock.readLock().lock();
-    try {
-      MapFile.Reader[] maparray = getReaders();
-      for(int i = maparray.length - 1; i >= 0; i--) {
-        MapFile.Reader map = maparray[i];
-        synchronized(map) {
-          map.reset();
-          
-          // do the priming read
-          ImmutableBytesWritable readval = new ImmutableBytesWritable();
-          HStoreKey readkey = (HStoreKey)map.getClosest(origin, readval);
-          if (readkey == null) {
-            // map.getClosest returns null if the passed key is > than the
-            // last key in the map file.  getClosest is a bit of a misnomer
-            // since it returns exact match or the next closest key AFTER not
-            // BEFORE.
-            continue;
-          }
-          
-          do{
-            // if the row matches, we might want this one.
-            if(rowMatches(origin, readkey)){
-              // if the cell matches, then we definitely want this key.
-              if (cellMatches(origin, readkey)) {
-                // store the key if it isn't deleted or superceeded by what's
-                // in the memcache
-                if (!isDeleted(readkey, readval.get(), false, null) &&
-                    !keys.contains(readkey)) {
-                  keys.add(new HStoreKey(readkey));
-
-                  // if we've collected enough versions, then exit the loop.
-                  if (versions != ALL_VERSIONS && keys.size() >= versions) {
-                    break;
-                  }
-                }
-              } else {
-                // the cell doesn't match, but there might be more with different
-                // timestamps, so move to the next key
-                continue;
-              }
-            } else{
-              // the row doesn't match, so we've gone too far.
-              break;
-            }
-          }while(map.next(readkey, readval)); // advance to the next key
-        }
-      }
-      
-      return keys;
-    } finally {
-      this.lock.readLock().unlock();
-    }
-  }
-  
-  /**
-   * Find the key that matches <i>row</i> exactly, or the one that immediately
-   * preceeds it.
-   */
-  public Text getRowKeyAtOrBefore(final Text row, final long timestamp)
-  throws IOException{
-    // if the exact key is found, return that key
-    // if we find a key that is greater than our search key, then use the 
-    // last key we processed, and if that was null, return null.
-
-    Text foundKey = memcache.getRowKeyAtOrBefore(row, timestamp);
-    if (foundKey != null) {
-      return foundKey;
-    }
-    
-    // obtain read lock
-    this.lock.readLock().lock();
-    try {
-      MapFile.Reader[] maparray = getReaders();
-      
-      Text bestSoFar = null;
-      
-      HStoreKey rowKey = new HStoreKey(row, timestamp);
-      
-      // process each store file
-      for(int i = maparray.length - 1; i >= 0; i--) {
-        Text row_from_mapfile = 
-          rowAtOrBeforeFromMapFile(maparray[i], row, timestamp);
-
-        // for when we have MapFile.Reader#getClosest before functionality
-/*        Text row_from_mapfile = null;
-        WritableComparable value = null; 
-        
-        HStoreKey hskResult = 
-          (HStoreKey)maparray[i].getClosest(rowKey, value, true);
-        
-        if (hskResult != null) {
-          row_from_mapfile = hskResult.getRow();
-        }*/
-                
-/*        LOG.debug("Best from this mapfile was " + row_from_mapfile);*/
-        
-        // short circuit on an exact match
-        if (row.equals(row_from_mapfile)) {
-          return row;
-        }
-        
-        // check to see if we've found a new closest row key as a result
-        if (bestSoFar == null || bestSoFar.compareTo(row_from_mapfile) < 0) {
-          bestSoFar = row_from_mapfile;
-        }
-      }
-      
-/*      LOG.debug("Went searching for " + row + ", found " + bestSoFar);*/
-      return bestSoFar;
-    } finally {
-      this.lock.readLock().unlock();
-    }
-  }
-  
-  /**
-   * Check an individual MapFile for the row at or before a given key 
-   * and timestamp
-   */
-  private Text rowAtOrBeforeFromMapFile(MapFile.Reader map, Text row, 
-    long timestamp)
-  throws IOException {
-    HStoreKey searchKey = new HStoreKey(row, timestamp);
-    Text previousRow = null;
-    ImmutableBytesWritable readval = new ImmutableBytesWritable();
-    HStoreKey readkey = new HStoreKey();
-    
-    synchronized(map) {
-      // don't bother with the rest of this if the file is empty
-      map.reset();
-      if (!map.next(readkey, readval)) {
-        return null;
-      }
-      
-      HStoreKey finalKey = new HStoreKey(); 
-      map.finalKey(finalKey);
-      if (finalKey.getRow().compareTo(row) < 0) {
-        return finalKey.getRow();
-      }
-      
-      // seek to the exact row, or the one that would be immediately before it
-      readkey = (HStoreKey)map.getClosest(searchKey, readval, true);
-      
-      if (readkey == null) {
-        // didn't find anything that would match, so returns
-        return null;
-      }
-      
-      do {
-        if (readkey.getRow().compareTo(row) == 0) {
-          // exact match on row
-          if (readkey.getTimestamp() <= timestamp) {
-            // timestamp fits, return this key
-            return readkey.getRow();
-          }
-          // getting here means that we matched the row, but the timestamp
-          // is too recent - hopefully one of the next cells will match
-          // better, so keep rolling
-          continue;
-        } else if (readkey.getRow().toString().compareTo(row.toString()) > 0 ) {
-          // if the row key we just read is beyond the key we're searching for,
-          // then we're done; return the last key we saw before this one
-          return previousRow;
-        } else {
-          // so, the row key doesn't match, and we haven't gone past the row
-          // we're seeking yet, so this row is a candidate for closest, as
-          // long as the timestamp is correct.
-          if (readkey.getTimestamp() <= timestamp) {
-            previousRow = new Text(readkey.getRow());
-          }
-          // otherwise, ignore this key, because it doesn't fulfill our 
-          // requirements.
-        }        
-      } while(map.next(readkey, readval));
-    }
-    // getting here means we exhausted all of the cells in the mapfile.
-    // whatever satisfying row we reached previously is the row we should 
-    // return
-    return previousRow;
-  }
-  
-  /**
-   * Test that the <i>target</i> matches the <i>origin</i>. If the 
-   * <i>origin</i> has an empty column, then it's assumed to mean any column 
-   * matches and only match on row and timestamp. Otherwise, it compares the
-   * keys with HStoreKey.matchesRowCol().
-   * @param origin The key we're testing against
-   * @param target The key we're testing
-   */
-  private boolean cellMatches(HStoreKey origin, HStoreKey target){
-    // if the origin's column is empty, then we're matching any column
-    if (origin.getColumn().equals(new Text())){
-      // if the row matches, then...
-      if (target.getRow().equals(origin.getRow())) {
-        // check the timestamp
-        return target.getTimestamp() <= origin.getTimestamp();
-      }
-      return false;
-    }
-    // otherwise, we want to match on row and column
-    return target.matchesRowCol(origin);
-  }
-    
-  /**
-   * Test that the <i>target</i> matches the <i>origin</i>. If the <i>origin</i>
-   * has an empty column, then it just tests row equivalence. Otherwise, it uses
-   * HStoreKey.matchesRowCol().
-   * @param origin Key we're testing against
-   * @param target Key we're testing
-   */
-  private boolean rowMatches(HStoreKey origin, HStoreKey target){
-    // if the origin's column is empty, then we're matching any column
-    if (origin.getColumn().equals(new Text())){
-      // if the row matches, then...
-      return target.getRow().equals(origin.getRow());
-    }
-    // otherwise, we want to match on row and column
-    return target.matchesRowCol(origin);
-  }
-  
-  /*
-   * Data structure to hold result of a look at store file sizes.
-   */
-  static class HStoreSize {
-    final long aggregate;
-    final long largest;
-    boolean splitable;
-    
-    HStoreSize(final long a, final long l, final boolean s) {
-      this.aggregate = a;
-      this.largest = l;
-      this.splitable = s;
-    }
-    
-    long getAggregate() {
-      return this.aggregate;
-    }
-    
-    long getLargest() {
-      return this.largest;
-    }
-    
-    boolean isSplitable() {
-      return this.splitable;
-    }
-    
-    void setSplitable(final boolean s) {
-      this.splitable = s;
-    }
-  }
-  
-  /**
-   * Gets size for the store.
-   * 
-   * @param midKey Gets set to the middle key of the largest splitable store
-   * file or its set to empty if largest is not splitable.
-   * @return Sizes for the store and the passed <code>midKey</code> is
-   * set to midKey of largest splitable.  Otherwise, its set to empty
-   * to indicate we couldn't find a midkey to split on
-   */
-  HStoreSize size(Text midKey) {
-    long maxSize = 0L;
-    long aggregateSize = 0L;
-    // Not splitable if we find a reference store file present in the store.
-    boolean splitable = true;
-    if (this.storefiles.size() <= 0) {
-      return new HStoreSize(0, 0, splitable);
-    }
-    
-    this.lock.readLock().lock();
-    try {
-      Long mapIndex = Long.valueOf(0L);
-      // Iterate through all the MapFiles
-      for(Map.Entry<Long, HStoreFile> e: storefiles.entrySet()) {
-        HStoreFile curHSF = e.getValue();
-        long size = curHSF.length();
-        aggregateSize += size;
-        if (maxSize == 0L || size > maxSize) {
-          // This is the largest one so far
-          maxSize = size;
-          mapIndex = e.getKey();
-        }
-        if (splitable) {
-          splitable = !curHSF.isReference();
-        }
-      }
-      MapFile.Reader r = this.readers.get(mapIndex);
-      
-      // seek back to the beginning of mapfile
-      r.reset();
-      
-      // get the first and last keys
-      HStoreKey firstKey = new HStoreKey();
-      HStoreKey lastKey = new HStoreKey();
-      Writable value = new ImmutableBytesWritable();
-      r.next((WritableComparable)firstKey, value);
-      r.finalKey((WritableComparable)lastKey);
-      
-      // get the midkey
-      HStoreKey midkey = (HStoreKey)r.midKey();
-
-      if (midkey != null) {
-        midKey.set(((HStoreKey)midkey).getRow());
-        // if the midkey is the same as the first and last keys, then we cannot
-        // (ever) split this region. 
-        if (midkey.getRow().equals(firstKey.getRow()) && 
-          midkey.getRow().equals(lastKey.getRow())) {
-          return new HStoreSize(aggregateSize, maxSize, false);
-        } 
-      }
-    } catch(IOException e) {
-      LOG.warn("Failed getting store size for " + this.storeName, e);
-    } finally {
-      this.lock.readLock().unlock();
-    }
-    return new HStoreSize(aggregateSize, maxSize, splitable);
-  }
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // File administration
-  //////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Return a scanner for both the memcache and the HStore files
-   */
-  HInternalScannerInterface getScanner(long timestamp, Text targetCols[],
-      Text firstRow, RowFilterInterface filter) throws IOException {
-
-    newScannerLock.readLock().lock();           // ability to create a new
-                                                // scanner during a compaction
-    try {
-      lock.readLock().lock();                   // lock HStore
-      try {
-        return new HStoreScanner(targetCols, firstRow, timestamp, filter);
-
-      } finally {
-        lock.readLock().unlock();
-      }
-    } finally {
-      newScannerLock.readLock().unlock();
-    }
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return this.storeName;
-  }
-
-  /*
-   * @see writeSplitInfo(Path p, HStoreFile hsf, FileSystem fs)
-   */
-  static HStoreFile.Reference readSplitInfo(final Path p, final FileSystem fs)
-  throws IOException {
-    FSDataInputStream in = fs.open(p);
-    try {
-      HStoreFile.Reference r = new HStoreFile.Reference();
-      r.readFields(in);
-      return r;
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * @param p Path to check.
-   * @return True if the path has format of a HStoreFile reference.
-   */
-  public static boolean isReference(final Path p) {
-    return isReference(p, REF_NAME_PARSER.matcher(p.getName()));
-  }
- 
-  private static boolean isReference(final Path p, final Matcher m) {
-    if (m == null || !m.matches()) {
-      LOG.warn("Failed match of store file name " + p.toString());
-      throw new RuntimeException("Failed match of store file name " +
-          p.toString());
-    }
-    return m.groupCount() > 1 && m.group(2) != null;
-  }
-
-  /**
-   * A scanner that iterates through the HStore files
-   */
-  private class StoreFileScanner extends HAbstractScanner {
-    @SuppressWarnings("hiding")
-    private MapFile.Reader[] readers;
-    
-    StoreFileScanner(long timestamp, Text[] targetCols, Text firstRow)
-    throws IOException {
-      super(timestamp, targetCols);
-      try {
-        this.readers = new MapFile.Reader[storefiles.size()];
-        
-        // Most recent map file should be first
-        int i = readers.length - 1;
-        for(HStoreFile curHSF: storefiles.values()) {
-          readers[i--] = curHSF.getReader(fs, bloomFilter);
-        }
-        
-        this.keys = new HStoreKey[readers.length];
-        this.vals = new byte[readers.length][];
-        
-        // Advance the readers to the first pos.
-        for(i = 0; i < readers.length; i++) {
-          keys[i] = new HStoreKey();
-          
-          if(firstRow.getLength() != 0) {
-            if(findFirstRow(i, firstRow)) {
-              continue;
-            }
-          }
-          
-          while(getNext(i)) {
-            if(columnMatch(i)) {
-              break;
-            }
-          }
-        }
-        
-      } catch (Exception ex) {
-        close();
-        IOException e = new IOException("HStoreScanner failed construction");
-        e.initCause(ex);
-        throw e;
-      }
-    }
-
-    /**
-     * The user didn't want to start scanning at the first row. This method
-     * seeks to the requested row.
-     *
-     * @param i         - which iterator to advance
-     * @param firstRow  - seek to this row
-     * @return          - true if this is the first row or if the row was not found
-     */
-    @Override
-    boolean findFirstRow(int i, Text firstRow) throws IOException {
-      ImmutableBytesWritable ibw = new ImmutableBytesWritable();
-      HStoreKey firstKey
-        = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
-      if (firstKey == null) {
-        // Didn't find it. Close the scanner and return TRUE
-        closeSubScanner(i);
-        return true;
-      }
-      this.vals[i] = ibw.get();
-      keys[i].setRow(firstKey.getRow());
-      keys[i].setColumn(firstKey.getColumn());
-      keys[i].setVersion(firstKey.getTimestamp());
-      return columnMatch(i);
-    }
-    
-    /**
-     * Get the next value from the specified reader.
-     * 
-     * @param i - which reader to fetch next value from
-     * @return - true if there is more data available
-     */
-    @Override
-    boolean getNext(int i) throws IOException {
-      boolean result = false;
-      ImmutableBytesWritable ibw = new ImmutableBytesWritable();
-      while (true) {
-        if (!readers[i].next(keys[i], ibw)) {
-          closeSubScanner(i);
-          break;
-        }
-        if (keys[i].getTimestamp() <= this.timestamp) {
-          vals[i] = ibw.get();
-          result = true;
-          break;
-        }
-      }
-      return result;
-    }
-    
-    /** Close down the indicated reader. */
-    @Override
-    void closeSubScanner(int i) {
-      try {
-        if(readers[i] != null) {
-          try {
-            readers[i].close();
-          } catch(IOException e) {
-            LOG.error(storeName + " closing sub-scanner", e);
-          }
-        }
-        
-      } finally {
-        readers[i] = null;
-        keys[i] = null;
-        vals[i] = null;
-      }
-    }
-
-    /** Shut it down! */
-    public void close() {
-      if(! scannerClosed) {
-        try {
-          for(int i = 0; i < readers.length; i++) {
-            if(readers[i] != null) {
-              try {
-                readers[i].close();
-              } catch(IOException e) {
-                LOG.error(storeName + " closing scanner", e);
-              }
-            }
-          }
-          
-        } finally {
-          scannerClosed = true;
-        }
-      }
-    }
-  }
-  
-  /**
-   * Scanner scans both the memcache and the HStore
-   */
-  private class HStoreScanner implements HInternalScannerInterface {
-    private HInternalScannerInterface[] scanners;
-    private TreeMap<Text, byte []>[] resultSets;
-    private HStoreKey[] keys;
-    private boolean wildcardMatch = false;
-    private boolean multipleMatchers = false;
-    private RowFilterInterface dataFilter;
-
-    /** Create an Scanner with a handle on the memcache and HStore files. */
-    @SuppressWarnings("unchecked")
-    HStoreScanner(Text[] targetCols, Text firstRow, long timestamp,
-        RowFilterInterface filter) throws IOException {
-      
-      this.dataFilter = filter;
-      if (null != dataFilter) {
-        dataFilter.reset();
-      }
-      this.scanners = new HInternalScannerInterface[2];
-      this.resultSets = new TreeMap[scanners.length];
-      this.keys = new HStoreKey[scanners.length];
-
-      try {
-        scanners[0] = memcache.getScanner(timestamp, targetCols, firstRow);
-        scanners[1] = new StoreFileScanner(timestamp, targetCols, firstRow);
-        
-        for (int i = 0; i < scanners.length; i++) {
-          if (scanners[i].isWildcardScanner()) {
-            this.wildcardMatch = true;
-          }
-          if (scanners[i].isMultipleMatchScanner()) {
-            this.multipleMatchers = true;
-          }
-        }
-
-      } catch(IOException e) {
-        for (int i = 0; i < this.scanners.length; i++) {
-          if(scanners[i] != null) {
-            closeScanner(i);
-          }
-        }
-        throw e;
-      }
-      
-      // Advance to the first key in each scanner.
-      // All results will match the required column-set and scanTime.
-      
-      for (int i = 0; i < scanners.length; i++) {
-        keys[i] = new HStoreKey();
-        resultSets[i] = new TreeMap<Text, byte []>();
-        if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
-          closeScanner(i);
-        }
-      }
-      // As we have now successfully completed initialization, increment the
-      // activeScanner count.
-      activeScanners.incrementAndGet();
-    }
-
-    /** @return true if the scanner is a wild card scanner */
-    public boolean isWildcardScanner() {
-      return wildcardMatch;
-    }
-
-    /** @return true if the scanner is a multiple match scanner */
-    public boolean isMultipleMatchScanner() {
-      return multipleMatchers;
-    }
-
-    /** {@inheritDoc} */
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
-      throws IOException {
-
-      // Filtered flag is set by filters.  If a cell has been 'filtered out'
-      // -- i.e. it is not to be returned to the caller -- the flag is 'true'.
-      boolean filtered = true;
-      boolean moreToFollow = true;
-      while (filtered && moreToFollow) {
-        // Find the lowest-possible key.
-        Text chosenRow = null;
-        long chosenTimestamp = -1;
-        for (int i = 0; i < this.keys.length; i++) {
-          if (scanners[i] != null &&
-              (chosenRow == null ||
-              (keys[i].getRow().compareTo(chosenRow) < 0) ||
-              ((keys[i].getRow().compareTo(chosenRow) == 0) &&
-              (keys[i].getTimestamp() > chosenTimestamp)))) {
-            chosenRow = new Text(keys[i].getRow());
-            chosenTimestamp = keys[i].getTimestamp();
-          }
-        }
-        
-        // Filter whole row by row key?
-        filtered = dataFilter != null? dataFilter.filter(chosenRow) : false;
-
-        // Store the key and results for each sub-scanner. Merge them as
-        // appropriate.
-        if (chosenTimestamp >= 0 && !filtered) {
-          // Here we are setting the passed in key with current row+timestamp
-          key.setRow(chosenRow);
-          key.setVersion(chosenTimestamp);
-          key.setColumn(HConstants.EMPTY_TEXT);
-          // Keep list of deleted cell keys within this row.  We need this
-          // because as we go through scanners, the delete record may be in an
-          // early scanner and then the same record with a non-delete, non-null
-          // value in a later. Without history of what we've seen, we'll return
-          // deleted values. This List should not ever grow too large since we
-          // are only keeping rows and columns that match those set on the
-          // scanner and which have delete values.  If memory usage becomes a
-          // problem, could redo as bloom filter.
-          List<HStoreKey> deletes = new ArrayList<HStoreKey>();
-          for (int i = 0; i < scanners.length && !filtered; i++) {
-            while ((scanners[i] != null
-                && !filtered
-                && moreToFollow)
-                && (keys[i].getRow().compareTo(chosenRow) == 0)) {
-              // If we are doing a wild card match or there are multiple
-              // matchers per column, we need to scan all the older versions of 
-              // this row to pick up the rest of the family members
-              if (!wildcardMatch
-                  && !multipleMatchers
-                  && (keys[i].getTimestamp() != chosenTimestamp)) {
-                break;
-              }
-
-              // Filter out null criteria columns that are not null
-              if (dataFilter != null) {
-                filtered = dataFilter.filterNotNull(resultSets[i]);
-              }
-
-              // NOTE: We used to do results.putAll(resultSets[i]);
-              // but this had the effect of overwriting newer
-              // values with older ones. So now we only insert
-              // a result if the map does not contain the key.
-              HStoreKey hsk = new HStoreKey(key.getRow(), EMPTY_TEXT,
-                key.getTimestamp());
-              for (Map.Entry<Text, byte[]> e : resultSets[i].entrySet()) {
-                hsk.setColumn(e.getKey());
-                if (HLogEdit.isDeleted(e.getValue())) {
-                  if (!deletes.contains(hsk)) {
-                    // Key changes as we cycle the for loop so add a copy to
-                    // the set of deletes.
-                    deletes.add(new HStoreKey(hsk));
-                  }
-                } else if (!deletes.contains(hsk) &&
-                    !filtered &&
-                    moreToFollow &&
-                    !results.containsKey(e.getKey())) {
-                  if (dataFilter != null) {
-                    // Filter whole row by column data?
-                    filtered =
-                        dataFilter.filter(chosenRow, e.getKey(), e.getValue());
-                    if (filtered) {
-                      results.clear();
-                      break;
-                    }
-                  }
-                  results.put(e.getKey(), e.getValue());
-                }
-              }
-              resultSets[i].clear();
-              if (!scanners[i].next(keys[i], resultSets[i])) {
-                closeScanner(i);
-              }
-            }
-          }          
-        }
-        
-        for (int i = 0; i < scanners.length; i++) {
-          // If the current scanner is non-null AND has a lower-or-equal
-          // row label, then its timestamp is bad. We need to advance it.
-          while ((scanners[i] != null) &&
-              (keys[i].getRow().compareTo(chosenRow) <= 0)) {
-            resultSets[i].clear();
-            if (!scanners[i].next(keys[i], resultSets[i])) {
-              closeScanner(i);
-            }
-          }
-        }
-
-        moreToFollow = chosenTimestamp >= 0;
-        
-        if (dataFilter != null) {
-          if (moreToFollow) {
-            dataFilter.rowProcessed(filtered, chosenRow);
-          }
-          if (dataFilter.filterAllRemaining()) {
-            moreToFollow = false;
-          }
-        }
-        
-        if (results.size() <= 0 && !filtered) {
-          // There were no results found for this row.  Marked it as 
-          // 'filtered'-out otherwise we will not move on to the next row.
-          filtered = true;
-        }
-      }
-      
-      // If we got no results, then there is no more to follow.
-      if (results == null || results.size() <= 0) {
-        moreToFollow = false;
-      }
-      
-      // Make sure scanners closed if no more results
-      if (!moreToFollow) {
-        for (int i = 0; i < scanners.length; i++) {
-          if (null != scanners[i]) {
-            closeScanner(i);
-          }
-        }
-      }
-      
-      return moreToFollow;
-    }
-
-    
-    /** Shut down a single scanner */
-    void closeScanner(int i) {
-      try {
-        try {
-          scanners[i].close();
-        } catch (IOException e) {
-          LOG.warn(storeName + " failed closing scanner " + i, e);
-        }
-      } finally {
-        scanners[i] = null;
-        keys[i] = null;
-        resultSets[i] = null;
-      }
-    }
-
-    /** {@inheritDoc} */
-    public void close() {
-      try {
-      for(int i = 0; i < scanners.length; i++) {
-        if(scanners[i] != null) {
-          closeScanner(i);
-        }
-      }
-      } finally {
-        synchronized (activeScanners) {
-          int numberOfScanners = activeScanners.decrementAndGet();
-          if (numberOfScanners < 0) {
-            LOG.error(storeName +
-                " number of active scanners less than zero: " +
-                numberOfScanners + " resetting to zero");
-            activeScanners.set(0);
-            numberOfScanners = 0;
-          }
-          if (numberOfScanners == 0) {
-            activeScanners.notifyAll();
-          }
-        }
-
-      }
-    }
-
-    /** {@inheritDoc} */
-    public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
-      throw new UnsupportedOperationException("Unimplemented serverside. " +
-        "next(HStoreKey, StortedMap(...) is more efficient");
-    }
-  }
-}

+ 0 - 878
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java

@@ -1,878 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.onelab.filter.Filter;
-import org.onelab.filter.Key;
-
-
-/**
- * A HStore data file.  HStores usually have one or more of these files.  They
- * are produced by flushing the memcache to disk.
- *
- * <p>Each HStore maintains a bunch of different data files. The filename is a
- * mix of the parent dir, the region name, the column name, and a file
- * identifier. The name may also be a reference to a store file located
- * elsewhere. This class handles all that path-building stuff for you.
- * 
- * <p>An HStoreFile usually tracks 4 things: its parent dir, the region
- * identifier, the column family, and the file identifier.  If you know those
- * four things, you know how to obtain the right HStoreFile.  HStoreFiles may
- * also refernce store files in another region serving either from
- * the top-half of the remote file or from the bottom-half.  Such references
- * are made fast splitting regions.
- * 
- * <p>Plain HStoreFiles are named for a randomly generated id as in:
- * <code>1278437856009925445</code>  A file by this name is made in both the
- * <code>mapfiles</code> and <code>info</code> subdirectories of a
- * HStore columnfamily directoy: E.g. If the column family is 'anchor:', then
- * under the region directory there is a subdirectory named 'anchor' within
- * which is a 'mapfiles' and 'info' subdirectory.  In each will be found a
- * file named something like <code>1278437856009925445</code>, one to hold the
- * data in 'mapfiles' and one under 'info' that holds the sequence id for this
- * store file.
- * 
- * <p>References to store files located over in some other region look like
- * this:
- * <code>1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184</code>:
- * i.e. an id followed by the name of the referenced region.  The data
- * ('mapfiles') of HStoreFile references are empty. The accompanying
- * <code>info</code> file contains the
- * midkey, the id of the remote store we're referencing and whether we're
- * to serve the top or bottom region of the remote store file.  Note, a region
- * is not splitable if it has instances of store file references (References
- * are cleaned up by compactions).
- * 
- * <p>When merging or splitting HRegions, we might want to modify one of the 
- * params for an HStoreFile (effectively moving it elsewhere).
- */
-public class HStoreFile implements HConstants {
-  static final Log LOG = LogFactory.getLog(HStoreFile.class.getName());
-  static final byte INFO_SEQ_NUM = 0;
-  static final String HSTORE_DATFILE_DIR = "mapfiles";
-  static final String HSTORE_INFO_DIR = "info";
-  static final String HSTORE_FILTER_DIR = "filter";
-  
-  /** 
-   * For split HStoreFiles, specifies if the file covers the lower half or
-   * the upper half of the key range
-   */
-  public static enum Range {
-    /** HStoreFile contains upper half of key range */
-    top,
-    /** HStoreFile contains lower half of key range */
-    bottom
-  }
-  
-  private final static Random rand = new Random();
-
-  private final Path basedir;
-  private final String encodedRegionName;
-  private final Text colFamily;
-  private final long fileId;
-  private final HBaseConfiguration conf;
-  private final FileSystem fs;
-  private final Reference reference;
-
-  /**
-   * Constructor that fully initializes the object
-   * @param conf Configuration object
-   * @param basedir qualified path that is parent of region directory
-   * @param encodedRegionName file name friendly name of the region
-   * @param colFamily name of the column family
-   * @param fileId file identifier
-   * @param ref Reference to another HStoreFile.
-   * @throws IOException
-   */
-  HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
-      String encodedRegionName, Text colFamily, long fileId,
-      final Reference ref) throws IOException {
-    this.conf = conf;
-    this.fs = fs;
-    this.basedir = basedir;
-    this.encodedRegionName = encodedRegionName;
-    this.colFamily = new Text(colFamily);
-    
-    long id = fileId;
-    if (id == -1) {
-      Path mapdir = HStoreFile.getMapDir(basedir, encodedRegionName, colFamily);
-      Path testpath = null;
-      do {
-        id = Math.abs(rand.nextLong());
-        testpath = new Path(mapdir, createHStoreFilename(id, null));
-      } while(fs.exists(testpath));
-    }
-    this.fileId = id;
-    
-    // If a reference, construction does not write the pointer files.  Thats
-    // done by invocations of writeReferenceFiles(hsf, fs).  Happens at fast
-    // split time.
-    this.reference = ref;
-  }
-
-  /** @return the region name */
-  boolean isReference() {
-    return reference != null;
-  }
-  
-  Reference getReference() {
-    return reference;
-  }
-
-  String getEncodedRegionName() {
-    return encodedRegionName;
-  }
-
-  /** @return the column family */
-  Text getColFamily() {
-    return colFamily;
-  }
-
-  /** @return the file identifier */
-  long getFileId() {
-    return fileId;
-  }
-
-  // Build full filenames from those components
-  
-  /** @return path for MapFile */
-  Path getMapFilePath() {
-    if (isReference()) {
-      return getMapFilePath(encodedRegionName, fileId,
-          reference.getEncodedRegionName());
-    }
-    return getMapFilePath(encodedRegionName, fileId, null);
-  }
-
-  private Path getMapFilePath(final Reference r) {
-    if (r == null) {
-      return getMapFilePath();
-    }
-    return getMapFilePath(r.getEncodedRegionName(), r.getFileId(), null);
-  }
-
-  private Path getMapFilePath(final String encodedName, final long fid,
-      final String ern) {
-    return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily), 
-      createHStoreFilename(fid, ern));
-  }
-
-  /** @return path for info file */
-  Path getInfoFilePath() {
-    if (isReference()) {
-      return getInfoFilePath(encodedRegionName, fileId,
-          reference.getEncodedRegionName());
- 
-    }
-    return getInfoFilePath(encodedRegionName, fileId, null);
-  }
-  
-  private Path getInfoFilePath(final String encodedName, final long fid,
-      final String ern) {
-    return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily), 
-      createHStoreFilename(fid, ern));
-  }
-
-  // File handling
-
-  /*
-   * Split by making two new store files that reference top and bottom regions
-   * of original store file.
-   * @param midKey
-   * @param dstA
-   * @param dstB
-   * @param fs
-   * @param c
-   * @throws IOException
-   *
-   * @param midKey the key which will be the starting key of the second region
-   * @param dstA the file which will contain keys from the start of the source
-   * @param dstB the file which will contain keys from midKey to end of source
-   * @param fs file system
-   * @param c configuration
-   * @throws IOException
-   */
-  void splitStoreFile(final HStoreFile dstA, final HStoreFile dstB,
-      final FileSystem fs)
-  throws IOException {
-    dstA.writeReferenceFiles(fs);
-    dstB.writeReferenceFiles(fs);
-  }
-  
-  void writeReferenceFiles(final FileSystem fs)
-  throws IOException {
-    createOrFail(fs, getMapFilePath());
-    writeSplitInfo(fs);
-  }
-  
-  /*
-   * If reference, create and write the remote store file id, the midkey and
-   * whether we're going against the top file region of the referent out to
-   * the info file. 
-   * @param p Path to info file.
-   * @param hsf
-   * @param fs
-   * @throws IOException
-   */
-  private void writeSplitInfo(final FileSystem fs) throws IOException {
-    Path p = getInfoFilePath();
-    if (fs.exists(p)) {
-      throw new IOException("File already exists " + p.toString());
-    }
-    FSDataOutputStream out = fs.create(p);
-    try {
-      reference.write(out);
-    } finally {
-      out.close();
-   }
-  }
-  
-  private void createOrFail(final FileSystem fs, final Path p)
-  throws IOException {
-    if (fs.exists(p)) {
-      throw new IOException("File already exists " + p.toString());
-    }
-    if (!fs.createNewFile(p)) {
-      throw new IOException("Failed create of " + p);
-    }
-  }
-
-  /**
-   * Merges the contents of the given source HStoreFiles into a single new one.
-   *
-   * @param srcFiles files to be merged
-   * @param fs file system
-   * @param conf configuration object
-   * @throws IOException
-   */
-  void mergeStoreFiles(List<HStoreFile> srcFiles, FileSystem fs, 
-      @SuppressWarnings("hiding") Configuration conf)
-  throws IOException {
-    // Copy all the source MapFile tuples into this HSF's MapFile
-    MapFile.Writer out = new MapFile.Writer(conf, fs,
-      getMapFilePath().toString(),
-      HStoreKey.class, ImmutableBytesWritable.class);
-    
-    try {
-      for(HStoreFile src: srcFiles) {
-        MapFile.Reader in = src.getReader(fs, null);
-        try {
-          HStoreKey readkey = new HStoreKey();
-          ImmutableBytesWritable readval = new ImmutableBytesWritable();
-          while(in.next(readkey, readval)) {
-            out.append(readkey, readval);
-          }
-          
-        } finally {
-          in.close();
-        }
-      }
-    } finally {
-      out.close();
-    }
-    // Build a unified InfoFile from the source InfoFiles.
-    
-    long unifiedSeqId = -1;
-    for(HStoreFile hsf: srcFiles) {
-      long curSeqId = hsf.loadInfo(fs);
-      if(curSeqId > unifiedSeqId) {
-        unifiedSeqId = curSeqId;
-      }
-    }
-    writeInfo(fs, unifiedSeqId);
-  }
-
-  /** 
-   * Reads in an info file
-   *
-   * @param fs file system
-   * @return The sequence id contained in the info file
-   * @throws IOException
-   */
-  long loadInfo(FileSystem fs) throws IOException {
-    Path p = null;
-    if (isReference()) {
-      p = getInfoFilePath(reference.getEncodedRegionName(),
-          reference.getFileId(), null);
-    } else {
-      p = getInfoFilePath();
-    }
-    DataInputStream in = new DataInputStream(fs.open(p));
-    try {
-      byte flag = in.readByte();
-      if(flag == INFO_SEQ_NUM) {
-        return in.readLong();
-      }
-      throw new IOException("Cannot process log file: " + p);
-    } finally {
-      in.close();
-    }
-  }
-  
-  /**
-   * Writes the file-identifier to disk
-   * 
-   * @param fs file system
-   * @param infonum file id
-   * @throws IOException
-   */
-  void writeInfo(FileSystem fs, long infonum) throws IOException {
-    Path p = getInfoFilePath();
-    FSDataOutputStream out = fs.create(p);
-    try {
-      out.writeByte(INFO_SEQ_NUM);
-      out.writeLong(infonum);
-    } finally {
-      out.close();
-    }
-  }
-  
-  /**
-   * Delete store map files.
-   * @throws IOException 
-   */
-  public void delete() throws IOException {
-    fs.delete(getMapFilePath());
-    fs.delete(getInfoFilePath());
-  }
-  
-  /**
-   * Renames the mapfiles and info directories under the passed
-   * <code>hsf</code> directory.
-   * @param fs
-   * @param hsf
-   * @return True if succeeded.
-   * @throws IOException
-   */
-  public boolean rename(final FileSystem fs, final HStoreFile hsf)
-  throws IOException {
-    Path src = getMapFilePath();
-    if (!fs.exists(src)) {
-      throw new FileNotFoundException(src.toString());
-    }
-    boolean success = fs.rename(src, hsf.getMapFilePath());
-    if (!success) {
-      LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
-    } else {
-      src = getInfoFilePath();
-      if (!fs.exists(src)) {
-        throw new FileNotFoundException(src.toString());
-      }
-      success = fs.rename(src, hsf.getInfoFilePath());
-      if (!success) {
-        LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
-      }
-    }
-    return success;
-  }
-  
-  /**
-   * Get reader for the store file map file.
-   * Client is responsible for closing file when done.
-   * @param fs
-   * @param bloomFilter If null, no filtering is done.
-   * @return MapFile.Reader
-   * @throws IOException
-   */
-  public MapFile.Reader getReader(final FileSystem fs,
-    final Filter bloomFilter)
-  throws IOException {
-    return isReference()?
-      new HStoreFile.HalfMapFileReader(fs, getMapFilePath(reference).toString(),
-        conf, reference.getFileRegion(), reference.getMidkey(), bloomFilter):
-      new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(),
-        conf, bloomFilter);
-  }
-
-  /**
-   * Get a store file writer.
-   * Client is responsible for closing file when done.
-   * @param fs
-   * @param compression Pass <code>SequenceFile.CompressionType.NONE</code>
-   * for none.
-   * @param bloomFilter If null, no filtering is done.
-   * @return MapFile.Writer
-   * @throws IOException
-   */
-  public MapFile.Writer getWriter(final FileSystem fs,
-      final SequenceFile.CompressionType compression,
-      final Filter bloomFilter)
-  throws IOException {
-    if (isReference()) {
-      throw new IOException("Illegal Access: Cannot get a writer on a" +
-        "HStoreFile reference");
-    }
-    return new BloomFilterMapFile.Writer(conf, fs,
-      getMapFilePath().toString(), HStoreKey.class,
-      ImmutableBytesWritable.class, compression, bloomFilter);
-  }
-
-  /**
-   * @return Length of the store map file.  If a reference, size is
-   * approximation.
-   * @throws IOException
-   */
-  public long length() throws IOException {
-    Path p = new Path(getMapFilePath(reference), MapFile.DATA_FILE_NAME);
-    long l = p.getFileSystem(conf).getFileStatus(p).getLen();
-    return (isReference())? l / 2: l;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return encodedRegionName + "/" + colFamily + "/" + fileId +
-      (isReference()? "/" + reference.toString(): "");
-  }
-  
-  /**
-   * Custom bloom filter key maker.
-   * @param key
-   * @return Key made of bytes of row and column only.
-   * @throws IOException
-   */
-  static Key getBloomFilterKey(WritableComparable key)
-  throws IOException {
-    HStoreKey hsk = (HStoreKey)key;
-    byte [] bytes = null;
-    try {
-      bytes = (hsk.getRow().toString() + hsk.getColumn().toString()).
-        getBytes(UTF8_ENCODING);
-    } catch (UnsupportedEncodingException e) {
-      throw new IOException(e.toString());
-    }
-    return new Key(bytes);
-  }
-
-  static boolean isTopFileRegion(final Range r) {
-    return r.equals(Range.top);
-  }
-
-  private static String createHStoreFilename(final long fid,
-      final String encodedRegionName) {
-    return Long.toString(fid) +
-      ((encodedRegionName != null) ? "." + encodedRegionName : "");
-  }
-  
-  static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
-    return new Path(dir, new Path(encodedRegionName, 
-        new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
-  }
-
-  /** @return the info directory path */
-  static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
-    return new Path(dir, new Path(encodedRegionName, 
-        new Path(colFamily.toString(), HSTORE_INFO_DIR)));
-  }
-
-  /** @return the bloom filter directory path */
-  static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
-    return new Path(dir, new Path(encodedRegionName,
-        new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
-  }
-
-  /*
-   * Data structure to hold reference to a store file over in another region.
-   */
-  static class Reference implements Writable {
-    private String encodedRegionName;
-    private long fileid;
-    private Range region;
-    private HStoreKey midkey;
-    
-    Reference(final String ern, final long fid, final HStoreKey m,
-        final Range fr) {
-      this.encodedRegionName = ern;
-      this.fileid = fid;
-      this.region = fr;
-      this.midkey = m;
-    }
-    
-    Reference() {
-      this(null, -1, null, Range.bottom);
-    }
-
-    long getFileId() {
-      return fileid;
-    }
-
-    Range getFileRegion() {
-      return region;
-    }
-    
-    HStoreKey getMidkey() {
-      return midkey;
-    }
-    
-    String getEncodedRegionName() {
-      return encodedRegionName;
-    }
-   
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return encodedRegionName + "/" + fileid + "/" + region;
-    }
-
-    // Make it serializable.
-
-    /** {@inheritDoc} */
-    public void write(DataOutput out) throws IOException {
-      out.writeUTF(encodedRegionName);
-      out.writeLong(fileid);
-      // Write true if we're doing top of the file.
-      out.writeBoolean(isTopFileRegion(region));
-      midkey.write(out);
-    }
-
-    /** {@inheritDoc} */
-    public void readFields(DataInput in) throws IOException {
-      encodedRegionName = in.readUTF();
-      fileid = in.readLong();
-      boolean tmp = in.readBoolean();
-      // If true, set region to top.
-      region = tmp? Range.top: Range.bottom;
-      midkey = new HStoreKey();
-      midkey.readFields(in);
-    }
-  }
-
-  /**
-   * Hbase customizations of MapFile.
-   */
-  static class HbaseMapFile extends MapFile {
-
-    static class HbaseReader extends MapFile.Reader {
-      /**
-       * @param fs
-       * @param dirName
-       * @param conf
-       * @throws IOException
-       */
-      public HbaseReader(FileSystem fs, String dirName, Configuration conf)
-      throws IOException {
-        super(fs, dirName, conf);
-        // Force reading of the mapfile index by calling midKey.
-        // Reading the index will bring the index into memory over
-        // here on the client and then close the index file freeing
-        // up socket connection and resources in the datanode. 
-        // Usually, the first access on a MapFile.Reader will load the
-        // index force the issue in HStoreFile MapFiles because an
-        // access may not happen for some time; meantime we're
-        // using up datanode resources.  See HADOOP-2341.
-        midKey();
-      }
-    }
-    
-    static class HbaseWriter extends MapFile.Writer {
-      /**
-       * @param conf
-       * @param fs
-       * @param dirName
-       * @param keyClass
-       * @param valClass
-       * @param compression
-       * @throws IOException
-       */
-      public HbaseWriter(Configuration conf, FileSystem fs, String dirName,
-          Class<Writable> keyClass, Class<Writable> valClass,
-          SequenceFile.CompressionType compression)
-      throws IOException {
-        super(conf, fs, dirName, keyClass, valClass, compression);
-        // Default for mapfiles is 128.  Makes random reads faster if we
-        // have more keys indexed and we're not 'next'-ing around in the
-        // mapfile.
-        setIndexInterval(conf.getInt("hbase.index.interval", 128));
-      }
-    }
-  }
-  
-  /**
-   * On write, all keys are added to a bloom filter.  On read, all keys are
-   * tested first against bloom filter. Keys are HStoreKey.  If passed bloom
-   * filter is null, just passes invocation to parent.
-   */
-  static class BloomFilterMapFile extends HbaseMapFile {
-    static class Reader extends HbaseReader {
-      private final Filter bloomFilter;
-
-      /**
-       * @param fs
-       * @param dirName
-       * @param conf
-       * @param filter
-       * @throws IOException
-       */
-      public Reader(FileSystem fs, String dirName, Configuration conf,
-          final Filter filter)
-      throws IOException {
-        super(fs, dirName, conf);
-        bloomFilter = filter;
-      }
-      
-      /** {@inheritDoc} */
-      @Override
-      public Writable get(WritableComparable key, Writable val)
-      throws IOException {
-        if (bloomFilter == null) {
-          return super.get(key, val);
-        }
-        if(bloomFilter.membershipTest(getBloomFilterKey(key))) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("bloom filter reported that key exists");
-          }
-          return super.get(key, val);
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("bloom filter reported that key does not exist");
-        }
-        return null;
-      }
-
-      /** {@inheritDoc} */
-      @Override
-      public WritableComparable getClosest(WritableComparable key,
-          Writable val) throws IOException {
-        if (bloomFilter == null) {
-          return super.getClosest(key, val);
-        }
-        // Note - the key being passed to us is always a HStoreKey
-        if(bloomFilter.membershipTest(getBloomFilterKey(key))) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("bloom filter reported that key exists");
-          }
-          return super.getClosest(key, val);
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("bloom filter reported that key does not exist");
-        }
-        return null;
-      }
-    }
-    
-    static class Writer extends HbaseWriter {
-      private final Filter bloomFilter;
-      
-      /**
-       * @param conf
-       * @param fs
-       * @param dirName
-       * @param keyClass
-       * @param valClass
-       * @param compression
-       * @param filter
-       * @throws IOException
-       */
-      @SuppressWarnings("unchecked")
-      public Writer(Configuration conf, FileSystem fs, String dirName,
-          Class keyClass, Class valClass,
-          SequenceFile.CompressionType compression, final Filter filter)
-      throws IOException {
-        super(conf, fs, dirName, keyClass, valClass, compression);
-        bloomFilter = filter;
-      }
-      
-      /** {@inheritDoc} */
-      @Override
-      public void append(WritableComparable key, Writable val)
-      throws IOException {
-        if (bloomFilter != null) {
-          bloomFilter.add(getBloomFilterKey(key));
-        }
-        super.append(key, val);
-      }
-    }
-  }
-  
-  /**
-   * A facade for a {@link MapFile.Reader} that serves up either the top or
-   * bottom half of a MapFile (where 'bottom' is the first half of the file
-   * containing the keys that sort lowest and 'top' is the second half of the
-   * file with keys that sort greater than those of the bottom half).
-   * Subclasses BloomFilterMapFile.Reader in case 
-   * 
-   * <p>This file is not splitable.  Calls to {@link #midKey()} return null.
-   */
-  static class HalfMapFileReader extends BloomFilterMapFile.Reader {
-    private final boolean top;
-    private final WritableComparable midkey;
-    private boolean firstNextCall = true;
-    
-    HalfMapFileReader(final FileSystem fs, final String dirName, 
-        final Configuration conf, final Range r,
-        final WritableComparable midKey)
-    throws IOException {
-      this(fs, dirName, conf, r, midKey, null);
-    }
-    
-    HalfMapFileReader(final FileSystem fs, final String dirName, 
-        final Configuration conf, final Range r,
-        final WritableComparable midKey, final Filter filter)
-    throws IOException {
-      super(fs, dirName, conf, filter);
-      top = isTopFileRegion(r);
-      midkey = midKey;
-    }
-    
-    @SuppressWarnings("unchecked")
-    private void checkKey(final WritableComparable key)
-    throws IOException {
-      if (top) {
-        if (key.compareTo(midkey) < 0) {
-          throw new IOException("Illegal Access: Key is less than midKey of " +
-          "backing mapfile");
-        }
-      } else if (key.compareTo(midkey) >= 0) {
-        throw new IOException("Illegal Access: Key is greater than or equal " +
-        "to midKey of backing mapfile");
-      }
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public synchronized void finalKey(WritableComparable key)
-    throws IOException {
-      if (top) {
-        checkKey(key);
-        super.finalKey(key); 
-      } else {
-        reset();
-        Writable value = new ImmutableBytesWritable();
-        
-        key = super.getClosest(midkey, value, true);
-      }
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public synchronized Writable get(WritableComparable key, Writable val)
-        throws IOException {
-      checkKey(key);
-      return super.get(key, val);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override
-    public synchronized WritableComparable getClosest(WritableComparable key,
-      Writable val)
-    throws IOException {
-      WritableComparable closest = null;
-      if (top) {
-        // If top, the lowest possible key is midkey.  Do not have to check
-        // what comes back from super getClosest.  Will return exact match or
-        // greater.
-        closest = (key.compareTo(this.midkey) < 0)?
-          this.midkey: super.getClosest(key, val);
-      } else {
-        // We're serving bottom of the file.
-        if (key.compareTo(this.midkey) < 0) {
-          // Check key is within range for bottom.
-          closest = super.getClosest(key, val);
-          // midkey was made against largest store file at time of split. Smaller
-          // store files could have anything in them.  Check return value is
-          // not beyond the midkey (getClosest returns exact match or next
-          // after).
-          if (closest != null && closest.compareTo(this.midkey) >= 0) {
-            // Don't let this value out.
-            closest = null;
-          }
-        }
-        // Else, key is > midkey so let out closest = null.
-      }
-      return closest;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unused")
-    @Override
-    public synchronized WritableComparable midKey() throws IOException {
-      // Returns null to indicate file is not splitable.
-      return null;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override
-    public synchronized boolean next(WritableComparable key, Writable val)
-    throws IOException {
-      if (firstNextCall) {
-        firstNextCall = false;
-        if (this.top) {
-          // Seek to midkey.  Midkey may not exist in this file.  That should be
-          // fine.  Then we'll either be positioned at end or start of file.
-          WritableComparable nearest = getClosest(midkey, val);
-          // Now copy the mid key into the passed key.
-          if (nearest != null) {
-            Writables.copyWritable(nearest, key);
-            return true;
-          }
-          return false;
-        }
-      }
-      boolean result = super.next(key, val);
-      if (!top && key.compareTo(midkey) >= 0) {
-        result = false;
-      }
-      return result;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public synchronized void reset() throws IOException {
-      if (top) {
-        firstNextCall = true;
-        seek(midkey);
-        return;
-      }
-      super.reset();
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public synchronized boolean seek(WritableComparable key)
-    throws IOException {
-      checkKey(key);
-      return super.seek(key);
-    }
-  }
-}

+ 0 - 353
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java

@@ -1,353 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.io.TextSequence;
-import org.apache.hadoop.io.*;
-
-import java.io.*;
-import java.nio.ByteBuffer;
-
-/**
- * A Key for a stored row
- */
-public class HStoreKey implements WritableComparable {
-  /**
-   * Colon character in UTF-8
-   */
-  public static final char COLUMN_FAMILY_DELIMITER = ':';
-  
-  private Text row;
-  private Text column;
-  private long timestamp;
-
-
-  /** Default constructor used in conjunction with Writable interface */
-  public HStoreKey() {
-    this(new Text());
-  }
-  
-  /**
-   * Create an HStoreKey specifying only the row
-   * The column defaults to the empty string and the time stamp defaults to
-   * Long.MAX_VALUE
-   * 
-   * @param row - row key
-   */
-  public HStoreKey(Text row) {
-    this(row, Long.MAX_VALUE);
-  }
-  
-  /**
-   * Create an HStoreKey specifying the row and timestamp
-   * The column name defaults to the empty string
-   * 
-   * @param row row key
-   * @param timestamp timestamp value
-   */
-  public HStoreKey(Text row, long timestamp) {
-    this(row, new Text(), timestamp);
-  }
-  
-  /**
-   * Create an HStoreKey specifying the row and column names
-   * The timestamp defaults to LATEST_TIMESTAMP
-   * 
-   * @param row row key
-   * @param column column key
-   */
-  public HStoreKey(Text row, Text column) {
-    this(row, column, HConstants.LATEST_TIMESTAMP);
-  }
-  
-  /**
-   * Create an HStoreKey specifying all the fields
-   * 
-   * @param row row key
-   * @param column column key
-   * @param timestamp timestamp value
-   */
-  public HStoreKey(Text row, Text column, long timestamp) {
-    // Make copies by doing 'new Text(arg)'.
-    this.row = new Text(row);
-    this.column = new Text(column);
-    this.timestamp = timestamp;
-  }
-  
-  /** @return Approximate size in bytes of this key. */
-  public long getSize() {
-    return this.row.getLength() + this.column.getLength() +
-      8 /* There is no sizeof in java. Presume long is 8 (64bit machine)*/;
-  }
-  
-  /**
-   * Constructs a new HStoreKey from another
-   * 
-   * @param other the source key
-   */
-  public HStoreKey(HStoreKey other) {
-    this(other.row, other.column, other.timestamp);
-  }
-  
-  /**
-   * Change the value of the row key
-   * 
-   * @param newrow new row key value
-   */
-  public void setRow(Text newrow) {
-    this.row.set(newrow);
-  }
-  
-  /**
-   * Change the value of the column key
-   * 
-   * @param newcol new column key value
-   */
-  public void setColumn(Text newcol) {
-    this.column.set(newcol);
-  }
-  
-  /**
-   * Change the value of the timestamp field
-   * 
-   * @param timestamp new timestamp value
-   */
-  public void setVersion(long timestamp) {
-    this.timestamp = timestamp;
-  }
-  
-  /**
-   * Set the value of this HStoreKey from the supplied key
-   * 
-   * @param k key value to copy
-   */
-  public void set(HStoreKey k) {
-    this.row = k.getRow();
-    this.column = k.getColumn();
-    this.timestamp = k.getTimestamp();
-  }
-  
-  /** @return value of row key */
-  public Text getRow() {
-    return row;
-  }
-  
-  /** @return value of column key */
-  public Text getColumn() {
-    return column;
-  }
-  
-  /** @return value of timestamp */
-  public long getTimestamp() {
-    return timestamp;
-  }
-  
-  /**
-   * Compares the row and column of two keys
-   * @param other Key to compare against. Compares row and column.
-   * @return True if same row and column.
-   * @see #matchesWithoutColumn(HStoreKey)
-   * @see #matchesRowFamily(HStoreKey)
-   */ 
-  public boolean matchesRowCol(HStoreKey other) {
-    return this.row.compareTo(other.row) == 0
-      && this.column.compareTo(other.column) == 0;
-  }
-  
-  /**
-   * Compares the row and timestamp of two keys
-   * 
-   * @param other Key to copmare against. Compares row and timestamp.
-   * 
-   * @return True if same row and timestamp is greater than <code>other</code>
-   * @see #matchesRowCol(HStoreKey)
-   * @see #matchesRowFamily(HStoreKey)
-   */
-  public boolean matchesWithoutColumn(HStoreKey other) {
-    return this.row.compareTo(other.row) == 0
-      && this.timestamp >= other.getTimestamp();
-  }
-  
-  /**
-   * Compares the row and column family of two keys
-   * 
-   * @param that Key to compare against. Compares row and column family
-   * 
-   * @return true if same row and column family
-   * @throws InvalidColumnNameException 
-   * @see #matchesRowCol(HStoreKey)
-   * @see #matchesWithoutColumn(HStoreKey)
-   */
-  public boolean matchesRowFamily(HStoreKey that)
-  throws InvalidColumnNameException {
-    return this.row.compareTo(that.row) == 0 &&
-      extractFamily(this.column).
-        compareTo(extractFamily(that.getColumn())) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return row.toString() + "/" + column.toString() + "/" + timestamp;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public boolean equals(Object obj) {
-    return compareTo(obj) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public int hashCode() {
-    int result = this.row.hashCode();
-    result ^= this.column.hashCode();
-    result ^= this.timestamp;
-    return result;
-  }
-
-  // Comparable
-
-  public int compareTo(Object o) {
-    HStoreKey other = (HStoreKey)o;
-    int result = this.row.compareTo(other.row);
-    if (result != 0) {
-      return result;
-    }
-    result = this.column.compareTo(other.column);
-    if (result != 0) {
-      return result;
-    }
-    // The below older timestamps sorting ahead of newer timestamps looks
-    // wrong but it is intentional. This way, newer timestamps are first
-    // found when we iterate over a memcache and newer versions are the
-    // first we trip over when reading from a store file.
-    if (this.timestamp < other.timestamp) {
-      result = 1;
-    } else if (this.timestamp > other.timestamp) {
-      result = -1;
-    }
-    return result;
-  }
-
-  // Writable
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    row.write(out);
-    column.write(out);
-    out.writeLong(timestamp);
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    row.readFields(in);
-    column.readFields(in);
-    timestamp = in.readLong();
-  }
-  
-  // Statics
-  // TODO: Move these utility methods elsewhere (To a Column class?).
-  
-  /**
-   * Extracts the column family name from a column
-   * For example, returns 'info' if the specified column was 'info:server'
-   * @param col name of column
-   * @return column famile as a TextSequence based on the passed
-   * <code>col</code>.  If <code>col</code> is reused, make a new Text of
-   * the result by calling {@link TextSequence#toText()}.
-   * @throws InvalidColumnNameException 
-   */
-  public static TextSequence extractFamily(final Text col)
-  throws InvalidColumnNameException {
-    return extractFamily(col, false);
-  }
-  
-  /**
-   * Extracts the column family name from a column
-   * For example, returns 'info' if the specified column was 'info:server'
-   * @param col name of column
-   * @return column famile as a TextSequence based on the passed
-   * <code>col</code>.  If <code>col</code> is reused, make a new Text of
-   * the result by calling {@link TextSequence#toText()}.
-   * @throws InvalidColumnNameException 
-   */
-  public static TextSequence extractFamily(final Text col,
-    final boolean withColon)
-  throws InvalidColumnNameException {
-    int offset = getColonOffset(col);
-    // Include ':' in copy?
-    offset += (withColon)? 1: 0;
-    if (offset == col.getLength()) {
-      return new TextSequence(col);
-    }
-    return new TextSequence(col, 0, offset);
-  }
-  
-  /**
-   * Extracts the column qualifier, the portion that follows the colon (':')
-   * family/qualifier separator.
-   * For example, returns 'server' if the specified column was 'info:server'
-   * @param col name of column
-   * @return column qualifier as a TextSequence based on the passed
-   * <code>col</code>.  If <code>col</code> is reused, make a new Text of
-   * the result by calling {@link TextSequence#toText()}.
-   * @throws InvalidColumnNameException 
-   */
-  public static TextSequence extractQualifier(final Text col)
-  throws InvalidColumnNameException {
-    int offset = getColonOffset(col);
-    if (offset + 1 == col.getLength()) {
-      return null;
-    }
-    return new TextSequence(col, offset + 1);
-  }
-  
-  private static int getColonOffset(final Text col)
-  throws InvalidColumnNameException {
-    int offset = -1;
-    ByteBuffer bb = ByteBuffer.wrap(col.getBytes());
-    for (int lastPosition = bb.position(); bb.hasRemaining();
-        lastPosition = bb.position()) {
-      if (Text.bytesToCodePoint(bb) == COLUMN_FAMILY_DELIMITER) {
-        offset = lastPosition;
-        break;
-      }
-    }
-    if(offset < 0) {
-      throw new InvalidColumnNameException(col + " is missing the colon " +
-        "family/qualifier separator");
-    }
-    return offset;
-  }
-
-  /**
-   * Returns row and column bytes out of an HStoreKey.
-   * @param hsk Store key.
-   * @return byte array encoding of HStoreKey
-   * @throws UnsupportedEncodingException
-   */
-  public static byte[] getBytes(final HStoreKey hsk)
-  throws UnsupportedEncodingException {
-    StringBuilder s = new StringBuilder(hsk.getRow().toString());
-    s.append(hsk.getColumn().toString());
-    return s.toString().getBytes(HConstants.UTF8_ENCODING);
-  }
-}

+ 0 - 1222
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java

@@ -1,1222 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.filter.StopRowFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * Used to communicate with a single HBase table
- */
-public class HTable implements HConstants {
-  protected final Log LOG = LogFactory.getLog(this.getClass().getName());
-
-  protected final HConnection connection;
-  protected final Text tableName;
-  protected final long pause;
-  protected final int numRetries;
-  protected Random rand;
-  protected AtomicReference<BatchUpdate> batch;
-
-  protected volatile boolean tableDoesNotExist;
-  
-  // For row mutation operations
-  
-  protected volatile boolean closed;
-
-  protected void checkClosed() {
-    if (tableDoesNotExist) {
-      throw new IllegalStateException("table does not exist: " + tableName);
-    }
-    if (closed) {
-      throw new IllegalStateException("table is closed");
-    }
-  }
-  
-  /**
-   * Creates an object to access a HBase table
-   * 
-   * @param conf configuration object
-   * @param tableName name of the table
-   * @throws IOException
-   */
-  public HTable(HBaseConfiguration conf, Text tableName) throws IOException {
-    closed = true;
-    tableDoesNotExist = true;
-    this.connection = HConnectionManager.getConnection(conf);
-    this.tableName = tableName;
-    this.pause = conf.getLong("hbase.client.pause", 10 * 1000);
-    this.numRetries = conf.getInt("hbase.client.retries.number", 5);
-    this.rand = new Random();
-    this.batch = new AtomicReference<BatchUpdate>();
-    this.connection.locateRegion(tableName, EMPTY_START_ROW);
-    tableDoesNotExist = false;
-    closed = false;
-  }
-
-  /**
-   * Find region location hosting passed row using cached info
-   * @param row Row to find.
-   * @return Location of row.
-   */
-  HRegionLocation getRegionLocation(Text row) throws IOException {
-    checkClosed();
-    return this.connection.locateRegion(this.tableName, row);
-  }
-
-  /**
-   * Find region location hosting passed row
-   * @param row Row to find.
-   * @param reload If true do not use cache, otherwise bypass.
-   * @return Location of row.
-   */
-  HRegionLocation getRegionLocation(Text row, boolean reload) throws IOException {
-    checkClosed();
-    return reload?
-      this.connection.relocateRegion(this.tableName, row):
-      this.connection.locateRegion(tableName, row);
-  }
-
-
-  /** @return the connection */
-  public HConnection getConnection() {
-    checkClosed();
-    return connection;
-  }
-
-  /**
-   * Releases resources associated with this table. After calling close(), all
-   * other methods will throw an IllegalStateException
-   */
-  public synchronized void close() {
-    if (!closed) {
-      closed = true;
-      batch.set(null);
-      connection.close(tableName);
-    }
-  }
-  
-  /**
-   * Verifies that no update is in progress
-   */
-  public synchronized void checkUpdateInProgress() {
-    updateInProgress(false);
-  }
-  
-  /*
-   * Checks to see if an update is in progress
-   * 
-   * @param updateMustBeInProgress
-   *    If true, an update must be in progress. An IllegalStateException will be
-   *    thrown if not.
-   *    
-   *    If false, an update must not be in progress. An IllegalStateException
-   *    will be thrown if an update is in progress.
-   */
-  private void updateInProgress(boolean updateMustBeInProgress) {
-    if (updateMustBeInProgress) {
-      if (batch.get() == null) {
-        throw new IllegalStateException("no update in progress");
-      }
-    } else {
-      if (batch.get() != null) {
-        throw new IllegalStateException("update in progress");
-      }
-    }
-  }
-  
-
-  /** @return the table name */
-  public Text getTableName() {
-    return this.tableName;
-  }
-
-  /**
-   * @return table metadata 
-   * @throws IOException
-   */
-  public HTableDescriptor getMetadata() throws IOException {
-    HTableDescriptor [] metas = this.connection.listTables();
-    HTableDescriptor result = null;
-    for (int i = 0; i < metas.length; i++) {
-      if (metas[i].getName().equals(this.tableName)) {
-        result = metas[i];
-        break;
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Gets the starting row key for every region in the currently open table
-   * @return Array of region starting row keys
-   */
-  public Text[] getStartKeys() throws IOException {
-    checkClosed();
-    List<Text> keyList = new ArrayList<Text>();
-
-    long scannerId = -1L;
-
-    Text startRow = new Text(tableName.toString() + ",,999999999999999");
-    HRegionLocation metaLocation = null;
-    HRegionInterface server;
-    
-    // scan over the each meta region
-    do {
-      try{
-        // turn the start row into a location
-        metaLocation = 
-          connection.locateRegion(META_TABLE_NAME, startRow);
-
-        // connect to the server hosting the .META. region
-        server = 
-          connection.getHRegionConnection(metaLocation.getServerAddress());
-
-        // open a scanner over the meta region
-        scannerId = server.openScanner(
-          metaLocation.getRegionInfo().getRegionName(),
-          COLUMN_FAMILY_ARRAY, tableName, LATEST_TIMESTAMP,
-          null);
-        
-        // iterate through the scanner, accumulating unique table names
-        SCANNER_LOOP: while (true) {
-          HbaseMapWritable values = server.next(scannerId);
-          if (values == null || values.size() == 0) {
-            break;
-          }
-          for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-            HStoreKey key = (HStoreKey) e.getKey();
-            if (key.getColumn().equals(COL_REGIONINFO)) {
-              HRegionInfo info = new HRegionInfo();
-              info = (HRegionInfo) Writables.getWritable(
-                  ((ImmutableBytesWritable) e.getValue()).get(), info);
-
-              if (!info.getTableDesc().getName().equals(this.tableName)) {
-                break SCANNER_LOOP;
-              }
-
-              if (info.isOffline()) {
-                continue SCANNER_LOOP;
-              }
-
-              if (info.isSplit()) {
-                continue SCANNER_LOOP;
-              }
-
-              keyList.add(info.getStartKey());
-            }
-          }
-        }
-        
-        // close that remote scanner
-        server.close(scannerId);
-          
-        // advance the startRow to the end key of the current region
-        startRow = metaLocation.getRegionInfo().getEndKey();          
-      } catch (IOException e) {
-        // need retry logic?
-        throw e;
-      }
-    } while (startRow.compareTo(EMPTY_START_ROW) != 0);
-
-    Text[] arr = new Text[keyList.size()];
-    for (int i = 0; i < keyList.size(); i++ ){
-      arr[i] = keyList.get(i);
-    }
-    
-    return arr;
-  }
-  
-  /** 
-   * Get a single value for the specified row and column
-   *
-   * @param row row key
-   * @param column column name
-   * @return value for specified row/column
-   * @throws IOException
-   */
-  public byte[] get(Text row, Text column) throws IOException {
-    checkClosed();
-    byte [] value = null;
-    for(int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      
-      try {
-        value = server.get(r.getRegionInfo().getRegionName(), row, column);
-        break;
-        
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-    return value;
-  }
- 
-  /** 
-   * Get the specified number of versions of the specified row and column
-   * 
-   * @param row         - row key
-   * @param column      - column name
-   * @param numVersions - number of versions to retrieve
-   * @return            - array byte values
-   * @throws IOException
-   */
-  public byte[][] get(Text row, Text column, int numVersions) throws IOException {
-    checkClosed();
-    byte [][] values = null;
-    for (int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server = 
-        connection.getHRegionConnection(r.getServerAddress());
-      
-      try {
-        values = server.get(r.getRegionInfo().getRegionName(), row, column,
-            numVersions);
-        
-        break;
-        
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          // No more tries
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-
-    if (values != null) {
-      ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-      for (int i = 0 ; i < values.length; i++) {
-        bytes.add(values[i]);
-      }
-      return bytes.toArray(new byte[values.length][]);
-    }
-    return null;
-  }
-  
-  /** 
-   * Get the specified number of versions of the specified row and column with
-   * the specified timestamp.
-   *
-   * @param row         - row key
-   * @param column      - column name
-   * @param timestamp   - timestamp
-   * @param numVersions - number of versions to retrieve
-   * @return            - array of values that match the above criteria
-   * @throws IOException
-   */
-  public byte[][] get(Text row, Text column, long timestamp, int numVersions)
-  throws IOException {
-    checkClosed();
-    byte [][] values = null;
-    for (int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      
-      try {
-        values = server.get(r.getRegionInfo().getRegionName(), row, column,
-            timestamp, numVersions);
-        
-        break;
-    
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          // No more tries
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-
-    if (values != null) {
-      ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-      for (int i = 0 ; i < values.length; i++) {
-        bytes.add(values[i]);
-      }
-      return bytes.toArray(new byte[values.length][]);
-    }
-    return null;
-  }
-    
-  /** 
-   * Get all the data for the specified row at the latest timestamp
-   * 
-   * @param row row key
-   * @return Map of columns to values.  Map is empty if row does not exist.
-   * @throws IOException
-   */
-  public SortedMap<Text, byte[]> getRow(Text row) throws IOException {
-    return getRow(row, HConstants.LATEST_TIMESTAMP);
-  }
-
-  /** 
-   * Get all the data for the specified row at a specified timestamp
-   * 
-   * @param row row key
-   * @param ts timestamp
-   * @return Map of columns to values.  Map is empty if row does not exist.
-   * @throws IOException
-   */
-  public SortedMap<Text, byte[]> getRow(Text row, long ts) throws IOException {
-    checkClosed();
-    HbaseMapWritable value = null;
-    for (int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      
-      try {
-        value = server.getRow(r.getRegionInfo().getRegionName(), row, ts);
-        break;
-        
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          // No more tries
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-    SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-    if (value != null && value.size() != 0) {
-      for (Map.Entry<Writable, Writable> e: value.entrySet()) {
-        HStoreKey key = (HStoreKey) e.getKey();
-        results.put(key.getColumn(),
-            ((ImmutableBytesWritable) e.getValue()).get());
-      }
-    }
-    return results;
-  }
-
-
-  /** 
-   * Get a scanner on the current table starting at the specified row.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(Text[] columns, Text startRow)
-  throws IOException {
-    return obtainScanner(columns, startRow, HConstants.LATEST_TIMESTAMP, null);
-  }
-  
-  /** 
-   * Get a scanner on the current table starting at the specified row.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @param timestamp only return results whose timestamp <= this value
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(Text[] columns, Text startRow,
-      long timestamp)
-  throws IOException {
-    return obtainScanner(columns, startRow, timestamp, null);
-  }
-  
-  /** 
-   * Get a scanner on the current table starting at the specified row.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @param filter a row filter using row-key regexp and/or column data filter.
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(Text[] columns, Text startRow,
-      RowFilterInterface filter)
-  throws IOException { 
-    return obtainScanner(columns, startRow, HConstants.LATEST_TIMESTAMP, filter);
-  }
-
-  /** 
-   * Get a scanner on the current table starting at the specified row and
-   * ending just before <code>stopRow<code>.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @param stopRow Row to stop scanning on. Once we hit this row we stop
-   * returning values; i.e. we return the row before this one but not the
-   * <code>stopRow</code> itself.
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(final Text[] columns,
-      final Text startRow, final Text stopRow)
-  throws IOException {
-    return obtainScanner(columns, startRow, stopRow,
-      HConstants.LATEST_TIMESTAMP);
-  }
-
-  /** 
-   * Get a scanner on the current table starting at the specified row and
-   * ending just before <code>stopRow<code>.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @param stopRow Row to stop scanning on. Once we hit this row we stop
-   * returning values; i.e. we return the row before this one but not the
-   * <code>stopRow</code> itself.
-   * @param timestamp only return results whose timestamp <= this value
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(final Text[] columns,
-      final Text startRow, final Text stopRow, final long timestamp)
-  throws IOException {
-    return obtainScanner(columns, startRow, timestamp,
-      new WhileMatchRowFilter(new StopRowFilter(stopRow)));
-  }
-  
-  /** 
-   * Get a scanner on the current table starting at the specified row.
-   * Return the specified columns.
-   *
-   * @param columns columns to scan. If column name is a column family, all
-   * columns of the specified column family are returned.  Its also possible
-   * to pass a regex in the column qualifier. A column qualifier is judged to
-   * be a regex if it contains at least one of the following characters:
-   * <code>\+|^&*$[]]}{)(</code>.
-   * @param startRow starting row in table to scan
-   * @param timestamp only return results whose timestamp <= this value
-   * @param filter a row filter using row-key regexp and/or column data filter.
-   * @return scanner
-   * @throws IOException
-   */
-  public HScannerInterface obtainScanner(Text[] columns,
-      Text startRow, long timestamp, RowFilterInterface filter)
-  throws IOException {
-    checkClosed();
-    return new ClientScanner(columns, startRow, timestamp, filter);
-  }
-
-  /** 
-   * Start a batch of row insertions/updates.
-   * 
-   * No changes are committed until the call to commitBatchUpdate returns.
-   * A call to abortBatchUpdate will abandon the entire batch.
-   *
-   * @param row name of row to be updated
-   * @return lockid to be used in subsequent put, delete and commit calls
-   * 
-   * @deprecated Batch operations are now the default. startBatchUpdate is now
-   * implemented by @see {@link #startUpdate(Text)} 
-   */
-  @Deprecated
-  public synchronized long startBatchUpdate(final Text row) {
-    return startUpdate(row);
-  }
-  
-  /** 
-   * Abort a batch mutation
-   * @param lockid lock id returned by startBatchUpdate
-   * 
-   * @deprecated Batch operations are now the default. abortBatch is now 
-   * implemented by @see {@link #abort(long)}
-   */
-  @Deprecated
-  public synchronized void abortBatch(final long lockid) {
-    abort(lockid);
-  }
-  
-  /** 
-   * Finalize a batch mutation
-   *
-   * @param lockid lock id returned by startBatchUpdate
-   * @throws IOException
-   * 
-   * @deprecated Batch operations are now the default. commitBatch(long) is now
-   * implemented by @see {@link #commit(long)}
-   */
-  @Deprecated
-  public void commitBatch(final long lockid) throws IOException {
-    commit(lockid, System.currentTimeMillis());
-  }
-
-  /** 
-   * Finalize a batch mutation
-   *
-   * @param lockid lock id returned by startBatchUpdate
-   * @param timestamp time to associate with all the changes
-   * @throws IOException
-   * 
-   * @deprecated Batch operations are now the default. commitBatch(long, long)
-   * is now implemented by @see {@link #commit(long, long)}
-   */
-  @Deprecated
-  public synchronized void commitBatch(final long lockid, final long timestamp)
-  throws IOException {
-
-    commit(lockid, timestamp);
-  }
-  
-  /** 
-   * Start an atomic row insertion/update.  No changes are committed until the 
-   * call to commit() returns. A call to abort() will abandon any updates in
-   * progress.
-   * 
-   * <p>
-   * Example:
-   * <br>
-   * <pre><span style="font-family: monospace;">
-   * long lockid = table.startUpdate(new Text(article.getName()));
-   * for (File articleInfo: article.listFiles(new NonDirectories())) {
-   *   String article = null;
-   *   try {
-   *     DataInputStream in = new DataInputStream(new FileInputStream(articleInfo));
-   *     article = in.readUTF();
-   *   } catch (IOException e) {
-   *     // Input error - abandon update
-   *     table.abort(lockid);
-   *     throw e;
-   *   }
-   *   try {
-   *     table.put(lockid, columnName(articleInfo.getName()), article.getBytes());
-   *   } catch (RuntimeException e) {
-   *     // Put failed - abandon update
-   *     table.abort(lockid);
-   *     throw e;
-   *   }
-   * }
-   * table.commit(lockid);
-   * </span></pre>
-   *
-   * 
-   * @param row Name of row to start update against.  Note, choose row names
-   * with care.  Rows are sorted lexicographically (comparison is done
-   * using {@link Text#compareTo(Object)}.  If your keys are numeric,
-   * lexicographic sorting means that 46 sorts AFTER 450 (If you want to use
-   * numerics for keys, zero-pad).
-   * @return Row lock id..
-   * @see #commit(long)
-   * @see #commit(long, long)
-   * @see #abort(long)
-   */
-  public synchronized long startUpdate(final Text row) {
-    checkClosed();
-    updateInProgress(false);
-    batch.set(new BatchUpdate(rand.nextLong()));
-    return batch.get().startUpdate(row);
-  }
-  
-  /** 
-   * Update a value for the specified column.
-   * Runs {@link #abort(long)} if exception thrown.
-   *
-   * @param lockid lock id returned from startUpdate
-   * @param column column whose value is being set
-   * @param val new value for column.  Cannot be null.
-   */
-  public void put(long lockid, Text column, byte val[]) {
-    checkClosed();
-    if (val == null) {
-      throw new IllegalArgumentException("value cannot be null");
-    }
-    updateInProgress(true);
-    batch.get().put(lockid, column, val);
-  }
-  
-  /** 
-   * Update a value for the specified column.
-   * Runs {@link #abort(long)} if exception thrown.
-   *
-   * @param lockid lock id returned from startUpdate
-   * @param column column whose value is being set
-   * @param val new value for column.  Cannot be null.
-   * @throws IOException throws this if the writable can't be
-   * converted into a byte array 
-   */
-  public void put(long lockid, Text column, Writable val) throws IOException {    
-    put(lockid, column, Writables.getBytes(val));
-  }
-  
-  /** 
-   * Delete the value for a column.
-   * Deletes the cell whose row/column/commit-timestamp match those of the
-   * delete.
-   * @param lockid lock id returned from startUpdate
-   * @param column name of column whose value is to be deleted
-   */
-  public void delete(long lockid, Text column) {
-    checkClosed();
-    updateInProgress(true);
-    batch.get().delete(lockid, column);
-  }
-  
-  /** 
-   * Delete all cells that match the passed row and column.
-   * @param row Row to update
-   * @param column name of column whose value is to be deleted
-   * @throws IOException 
-   */
-  public void deleteAll(final Text row, final Text column) throws IOException {
-    deleteAll(row, column, LATEST_TIMESTAMP);
-  }
-  
-  /** 
-   * Delete all cells that match the passed row and column and whose
-   * timestamp is equal-to or older than the passed timestamp.
-   * @param row Row to update
-   * @param column name of column whose value is to be deleted
-   * @param ts Delete all cells of the same timestamp or older.
-   * @throws IOException 
-   */
-  public void deleteAll(final Text row, final Text column, final long ts)
-  throws IOException {
-    checkClosed();
-    for(int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      try {
-        server.deleteAll(r.getRegionInfo().getRegionName(), row, column, ts);
-        break;
-        
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-  }
-  
-  /**
-   * Completely delete the row's cells of the same timestamp or older.
-   *
-   * @param row Key of the row you want to completely delete.
-   * @param ts Timestamp of cells to delete
-   */
-  public void deleteAll(final Text row, long ts)
-  throws IOException {
-    checkClosed();
-    for(int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      try {
-        server.deleteAll(r.getRegionInfo().getRegionName(), row, ts);
-        break;
-
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-  }
-      
-  /**
-   * Completely delete the row's cells.
-   *
-   * @param row Key of the row you want to completely delete.
-   */
-  public void deleteAll(final Text row)
-  throws IOException {
-    deleteAll(row, HConstants.LATEST_TIMESTAMP);
-  }
-  
-  /**
-   * Delete all cells for a row with matching column family with timestamps
-   * less than or equal to <i>timestamp</i>.
-   *
-   * @param row The row to operate on
-   * @param family The column family to match
-   * @param timestamp Timestamp to match
-   */
-  public void deleteFamily(final Text row, final Text family, long timestamp)
-  throws IOException {
-    checkClosed();
-    for(int tries = 0; tries < numRetries; tries++) {
-      HRegionLocation r = getRegionLocation(row);
-      HRegionInterface server =
-        connection.getHRegionConnection(r.getServerAddress());
-      try {
-        server.deleteFamily(r.getRegionInfo().getRegionName(), row, family, timestamp);
-        break;
-
-      } catch (IOException e) {
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        if (tries == numRetries - 1) {
-          throw e;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("reloading table servers because: " + e.getMessage());
-        }
-        r = getRegionLocation(row, true);
-      }
-      try {
-        Thread.sleep(this.pause);
-      } catch (InterruptedException x) {
-        // continue
-      }
-    }
-  }
-
-  /**
-   * Delete all cells for a row with matching column family at all timestamps.
-   *
-   * @param row The row to operate on
-   * @param family The column family to match
-   */  
-  public void deleteFamily(final Text row, final Text family)
-  throws IOException{
-    deleteFamily(row, family, HConstants.LATEST_TIMESTAMP);
-  }
-  
-  /** 
-   * Abort a row mutation.
-   * 
-   * This method should be called only when an update has been started and it
-   * is determined that the update should not be committed.
-   * 
-   * Releases resources being held by the update in progress.
-   *
-   * @param lockid lock id returned from startUpdate
-   */
-  public synchronized void abort(long lockid) {
-    checkClosed();
-    if (batch.get() != null && batch.get().getLockid() != lockid) {
-      throw new IllegalArgumentException("invalid lock id " + lockid);
-    }
-    batch.set(null);
-  }
-  
-  /** 
-   * Finalize a row mutation.
-   * 
-   * When this method is specified, we pass the server a value that says use
-   * the 'latest' timestamp.  If we are doing a put, on the server-side, cells
-   * will be given the servers's current timestamp.  If the we are commiting
-   * deletes, then delete removes the most recently modified cell of stipulated
-   * column.
-   * 
-   * @see #commit(long, long)
-   * 
-   * @param lockid lock id returned from startUpdate
-   * @throws IOException
-   */
-  public void commit(long lockid) throws IOException {
-    commit(lockid, LATEST_TIMESTAMP);
-  }
-
-  /** 
-   * Finalize a row mutation and release any resources associated with the update.
-   * 
-   * @param lockid lock id returned from startUpdate
-   * @param timestamp time to associate with the change
-   * @throws IOException
-   */
-  public synchronized void commit(long lockid, long timestamp)
-  throws IOException {
-    checkClosed();
-    updateInProgress(true);
-    if (batch.get().getLockid() != lockid) {
-      throw new IllegalArgumentException("invalid lock id " + lockid);
-    }
-    
-    try {
-      for (int tries = 0; tries < numRetries; tries++) {
-        HRegionLocation r = getRegionLocation(batch.get().getRow());
-        HRegionInterface server =
-          connection.getHRegionConnection(r.getServerAddress());
-        try {
-          server.batchUpdate(r.getRegionInfo().getRegionName(), timestamp,
-            batch.get());
-          break;
-        } catch (IOException e) {
-          if (e instanceof RemoteException) {
-            e = RemoteExceptionHandler.decodeRemoteException(
-                (RemoteException) e);
-          }
-          if (tries < numRetries - 1) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("reloading table servers because: " + e.getMessage());
-            }
-            r = getRegionLocation(batch.get().getRow(), true);
-          } else {
-            throw e;
-          }
-        }
-        try {
-          Thread.sleep(pause);
-        } catch (InterruptedException e) {
-          // continue
-        }
-      }
-    } finally {
-      batch.set(null);
-    }
-  }
-  
-  /**
-   * Renew lease on update
-   * 
-   * @param lockid              - lock id returned from startUpdate
-   * 
-   * @deprecated Batch updates are now the default. Consequently this method
-   * does nothing.
-   */
-  @Deprecated
-  public synchronized void renewLease(@SuppressWarnings("unused") long lockid) {
-    // noop
-  }
-
-  /**
-   * Implements the scanner interface for the HBase client.
-   * If there are multiple regions in a table, this scanner will iterate
-   * through them all.
-   */
-  protected class ClientScanner implements HScannerInterface {
-    private final Text EMPTY_COLUMN = new Text();
-    private Text[] columns;
-    private Text startRow;
-    private long scanTime;
-    @SuppressWarnings("hiding")
-    private boolean closed;
-    private HRegionLocation currentRegionLocation;
-    private HRegionInterface server;
-    private long scannerId;
-    private RowFilterInterface filter;
-    
-    protected ClientScanner(Text[] columns, Text startRow, long timestamp,
-      RowFilterInterface filter) 
-    throws IOException {
-
-      LOG.info("Creating scanner over " + tableName + " starting at key " + startRow);
-
-      // defaults
-      this.closed = false;
-      this.server = null;
-      this.scannerId = -1L;
-    
-      // save off the simple parameters
-      this.columns = columns;
-      this.startRow = startRow;
-      this.scanTime = timestamp;
-      
-      // save the filter, and make sure that the filter applies to the data
-      // we're expecting to pull back
-      this.filter = filter;
-      if (filter != null) {
-        filter.validate(columns);
-      }
-
-      nextScanner();
-    }
-        
-    /*
-     * Gets a scanner for the next region.
-     * Returns false if there are no more scanners.
-     */
-    private boolean nextScanner() throws IOException {
-      checkClosed();
-      
-      // close the previous scanner if it's open
-      if (this.scannerId != -1L) {
-        this.server.close(this.scannerId);
-        this.scannerId = -1L;
-      }
-
-      // if we're at the end of the table, then close and return false
-      // to stop iterating
-      if (this.currentRegionLocation != null){
-        LOG.debug("Advancing forward from region " 
-          + this.currentRegionLocation.getRegionInfo());
-        
-        if (this.currentRegionLocation.getRegionInfo().getEndKey() == null
-          || this.currentRegionLocation.getRegionInfo().getEndKey().equals(EMPTY_TEXT)) {
-            LOG.debug("We're at the end of the region, returning.");
-            close();
-            return false;
-        }
-      } 
-      
-      HRegionLocation oldLocation = this.currentRegionLocation;
-      
-      Text localStartKey = oldLocation == null ? 
-        startRow : oldLocation.getRegionInfo().getEndKey();
-
-      // advance to the region that starts with the current region's end key
-      LOG.debug("Advancing internal scanner to startKey " + localStartKey);
-      this.currentRegionLocation = getRegionLocation(localStartKey);
-      
-      LOG.debug("New region: " + this.currentRegionLocation);
-      
-      try {
-        for (int tries = 0; tries < numRetries; tries++) {
-          // connect to the server
-          server = connection.getHRegionConnection(
-            this.currentRegionLocation.getServerAddress());
-          
-          try {
-            // open a scanner on the region server starting at the 
-            // beginning of the region
-            scannerId = server.openScanner(
-              this.currentRegionLocation.getRegionInfo().getRegionName(),
-              this.columns, localStartKey, scanTime, filter);
-              
-            break;
-          } catch (IOException e) {
-            if (e instanceof RemoteException) {
-              e = RemoteExceptionHandler.decodeRemoteException(
-                  (RemoteException) e);
-            }
-            if (tries == numRetries - 1) {
-              // No more tries
-              throw e;
-            }
-            try {
-              Thread.sleep(pause);
-            } catch (InterruptedException ie) {
-              // continue
-            }
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("reloading table servers because: " + e.getMessage());
-            }
-            currentRegionLocation = getRegionLocation(localStartKey, true);
-          }
-        }
-      } catch (IOException e) {
-        close();
-        if (e instanceof RemoteException) {
-          e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-        }
-        throw e;
-      }
-      return true;
-    }
-
-    public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
-    throws IOException {
-      checkClosed();
-      if (this.closed) {
-        return false;
-      }
-      HbaseMapWritable values = null;
-      // Clear the results so we don't inherit any values from any previous
-      // calls to next.
-      results.clear();
-      do {
-        values = server.next(scannerId);
-      } while (values != null && values.size() == 0 && nextScanner());
-
-      if (values != null && values.size() != 0) {
-        for (Map.Entry<Writable, Writable> e: values.entrySet()) {
-          HStoreKey k = (HStoreKey) e.getKey();
-          key.setRow(k.getRow());
-          key.setVersion(k.getTimestamp());
-          key.setColumn(EMPTY_COLUMN);
-          results.put(k.getColumn(),
-              ((ImmutableBytesWritable) e.getValue()).get());
-        }
-      }
-      return values == null ? false : values.size() != 0;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public void close() throws IOException {
-      checkClosed();
-      if (scannerId != -1L) {
-        try {
-          server.close(scannerId);
-          
-        } catch (IOException e) {
-          if (e instanceof RemoteException) {
-            e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
-          }
-          if (!(e instanceof NotServingRegionException)) {
-            throw e;
-          }
-        }
-        scannerId = -1L;
-      }
-      server = null;
-      closed = true;
-    }
-
-    /** {@inheritDoc} */
-    public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
-      return new Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>>() {
-        HStoreKey key = null;
-        SortedMap<Text, byte []> value = null;
-        
-        public boolean hasNext() {
-          boolean hasNext = false;
-          try {
-            this.key = new HStoreKey();
-            this.value = new TreeMap<Text, byte[]>();
-            hasNext = ClientScanner.this.next(key, value);
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-          return hasNext;
-        }
-
-        public Entry<HStoreKey, SortedMap<Text, byte[]>> next() {
-          return new Map.Entry<HStoreKey, SortedMap<Text, byte[]>>() {
-            public HStoreKey getKey() {
-              return key;
-            }
-
-            public SortedMap<Text, byte[]> getValue() {
-              return value;
-            }
-
-            public SortedMap<Text, byte[]> setValue(@SuppressWarnings("unused")
-            SortedMap<Text, byte[]> value) {
-              throw new UnsupportedOperationException();
-            }
-          };
-        }
-
-        public void remove() {
-          throw new UnsupportedOperationException();
-        }
-      };
-    }
-  }
-}

+ 0 - 259
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java

@@ -1,259 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-
-/**
- * HTableDescriptor contains the name of an HTable, and its
- * column families.
- */
-public class HTableDescriptor implements WritableComparable {
-  /** table descriptor for root table */
-  public static final HTableDescriptor rootTableDesc =
-    new HTableDescriptor(HConstants.ROOT_TABLE_NAME,
-        new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
-            HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE,
-            null));
-  
-  /** table descriptor for meta table */
-  public static final HTableDescriptor metaTableDesc =
-    new HTableDescriptor(HConstants.META_TABLE_NAME,
-        new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
-            HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE,
-            null));
-  
-  private boolean rootregion;
-  private boolean metaregion;
-  private Text name;
-  // TODO: Does this need to be a treemap?  Can it be a HashMap?
-  private final TreeMap<Text, HColumnDescriptor> families;
-  
-  /*
-   * Legal table names can only contain 'word characters':
-   * i.e. <code>[a-zA-Z_0-9-.]</code>.
-   * Lets be restrictive until a reason to be otherwise. One reason to limit
-   * characters in table name is to ensure table regions as entries in META
-   * regions can be found (See HADOOP-1581 'HBASE: Un-openable tablename bug').
-   */
-  private static final Pattern LEGAL_TABLE_NAME =
-    Pattern.compile("^[\\w-.]+$");
-
-  /** Used to construct the table descriptors for root and meta tables */
-  private HTableDescriptor(Text name, HColumnDescriptor family) {
-    rootregion = name.equals(HConstants.ROOT_TABLE_NAME);
-    this.metaregion = true;
-    this.name = new Text(name);
-    this.families = new TreeMap<Text, HColumnDescriptor>();
-    families.put(family.getName(), family);
-  }
-
-  /**
-   * Constructs an empty object.
-   * For deserializing an HTableDescriptor instance only.
-   * @see #HTableDescriptor(String)
-   */
-  public HTableDescriptor() {
-    this.name = new Text();
-    this.families = new TreeMap<Text, HColumnDescriptor>();
-  }
-
-  /**
-   * Constructor.
-   * @param name Table name.
-   * @throws IllegalArgumentException if passed a table name
-   * that is made of other than 'word' characters: i.e.
-   * <code>[a-zA-Z_0-9]
-   */
-  public HTableDescriptor(String name) {
-    this();
-    Matcher m = LEGAL_TABLE_NAME.matcher(name);
-    if (m == null || !m.matches()) {
-      throw new IllegalArgumentException(
-          "Table names can only contain 'word characters': i.e. [a-zA-Z_0-9");
-    }
-    this.name.set(name);
-    this.rootregion = false;
-    this.metaregion = false;
-  }
-  
-  /** @return true if this is the root region */
-  public boolean isRootRegion() {
-    return rootregion;
-  }
-  
-  /** @return true if table is the meta table */
-  public boolean isMetaTable() {
-    return metaregion && !rootregion;
-  }
-  
-  /** @return true if this is a meta region (part of the root or meta tables) */
-  public boolean isMetaRegion() {
-    return metaregion;
-  }
-
-  /** @return name of table */
-  public Text getName() {
-    return name;
-  }
-
-  /**
-   * Adds a column family.
-   * @param family HColumnDescriptor of familyto add.
-   */
-  public void addFamily(HColumnDescriptor family) {
-    if (family.getName() == null || family.getName().getLength() <= 0) {
-      throw new NullPointerException("Family name cannot be null or empty");
-    }
-    families.put(family.getName(), family);
-  }
-
-  /**
-   * Checks to see if this table contains the given column family
-   * 
-   * @param family - family name
-   * @return true if the table contains the specified family name
-   */
-  public boolean hasFamily(Text family) {
-    return families.containsKey(family);
-  }
-
-  /** 
-   * All the column families in this table.
-   * 
-   *  TODO: What is this used for? Seems Dangerous to let people play with our
-   *  private members.
-   *  
-   *  @return map of family members
-   */
-  public TreeMap<Text, HColumnDescriptor> families() {
-    return families;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public String toString() {
-    return "name: " + this.name.toString() + ", families: " + this.families;
-      }
-  
-  /** {@inheritDoc} */
-  @Override
-  public boolean equals(Object obj) {
-    return compareTo(obj) == 0;
-  }
-  
-  /** {@inheritDoc} */
-  @Override
-  public int hashCode() {
-    // TODO: Cache.
-    int result = this.name.hashCode();
-    if (this.families != null && this.families.size() > 0) {
-      for (Map.Entry<Text,HColumnDescriptor> e: this.families.entrySet()) {
-        result ^= e.hashCode();
-      }
-    }
-    return result;
-  }
-  
-  // Writable
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeBoolean(rootregion);
-    out.writeBoolean(metaregion);
-    name.write(out);
-    out.writeInt(families.size());
-    for(Iterator<HColumnDescriptor> it = families.values().iterator();
-        it.hasNext(); ) {
-      it.next().write(out);
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    this.rootregion = in.readBoolean();
-    this.metaregion = in.readBoolean();
-    this.name.readFields(in);
-    int numCols = in.readInt();
-    families.clear();
-    for(int i = 0; i < numCols; i++) {
-      HColumnDescriptor c = new HColumnDescriptor();
-      c.readFields(in);
-      families.put(c.getName(), c);
-    }
-  }
-
-  // Comparable
-
-  /** {@inheritDoc} */
-  public int compareTo(Object o) {
-    HTableDescriptor other = (HTableDescriptor) o;
-    int result = name.compareTo(other.name);
-    
-    if(result == 0) {
-      result = families.size() - other.families.size();
-    }
-    
-    if(result == 0 && families.size() != other.families.size()) {
-      result = Integer.valueOf(families.size()).compareTo(
-          Integer.valueOf(other.families.size()));
-    }
-    
-    if(result == 0) {
-      for(Iterator<HColumnDescriptor> it = families.values().iterator(),
-          it2 = other.families.values().iterator(); it.hasNext(); ) {
-        result = it.next().compareTo(it2.next());
-        if(result != 0) {
-          break;
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * @return Immutable sorted map of families.
-   */
-  public SortedMap<Text, HColumnDescriptor> getFamilies() {
-    return Collections.unmodifiableSortedMap(this.families);
-  }
-
-  /**
-   * @param rootdir qualified path of HBase root directory
-   * @param tableName name of table
-   * @return path for table
-   */
-  public static Path getTableDir(Path rootdir, Text tableName) {
-    return new Path(rootdir, tableName.toString());
-  }
-}

+ 0 - 41
src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java

@@ -1,41 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown when an invalid column name is encountered
- */
-public class InvalidColumnNameException extends IOException {
-  private static final long serialVersionUID = 1L << 29 - 1L;
-  /** default constructor */
-  public InvalidColumnNameException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public InvalidColumnNameException(String s) {
-    super(s);
-  }
-}

+ 0 - 34
src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java

@@ -1,34 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-
-/**
- * LeaseListener is an interface meant to be implemented by users of the Leases 
- * class.
- *
- * It receives events from the Leases class about the status of its accompanying
- * lease.  Users of the Leases class can use a LeaseListener subclass to, for 
- * example, clean up resources after a lease has expired.
- */
-public interface LeaseListener {
-  /** When a lease expires, this method is called. */
-  public void leaseExpired();
-}

+ 0 - 377
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java

@@ -1,377 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import java.io.*;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Leases
- *
- * There are several server classes in HBase that need to track external
- * clients that occasionally send heartbeats.
- * 
- * <p>These external clients hold resources in the server class.
- * Those resources need to be released if the external client fails to send a
- * heartbeat after some interval of time passes.
- *
- * <p>The Leases class is a general reusable class for this kind of pattern.
- * An instance of the Leases class will create a thread to do its dirty work.  
- * You should close() the instance if you want to clean up the thread properly.
- */
-public class Leases {
-  protected static final Log LOG = LogFactory.getLog(Leases.class.getName());
-
-  protected final int leasePeriod;
-  protected final int leaseCheckFrequency;
-  private final Thread leaseMonitorThread;
-  protected final Map<LeaseName, Lease> leases =
-    new HashMap<LeaseName, Lease>();
-  protected final TreeSet<Lease> sortedLeases = new TreeSet<Lease>();
-  protected AtomicBoolean stop = new AtomicBoolean(false);
-
-  /**
-   * Creates a lease
-   * 
-   * @param leasePeriod - length of time (milliseconds) that the lease is valid
-   * @param leaseCheckFrequency - how often the lease should be checked
-   * (milliseconds)
-   */
-  public Leases(final int leasePeriod, final int leaseCheckFrequency) {
-    this.leasePeriod = leasePeriod;
-    this.leaseCheckFrequency = leaseCheckFrequency;
-    this.leaseMonitorThread =
-      new LeaseMonitor(this.leaseCheckFrequency, this.stop);
-    this.leaseMonitorThread.setDaemon(true);
-  }
-  
-  /** Starts the lease monitor */
-  public void start() {
-    leaseMonitorThread.start();
-  }
-  
-  /**
-   * @param name Set name on the lease checking daemon thread.
-   */
-  public void setName(final String name) {
-    this.leaseMonitorThread.setName(name);
-  }
-
-  /**
-   * Shuts down this lease instance when all outstanding leases expire.
-   * Like {@link #close()} but rather than violently end all leases, waits
-   * first on extant leases to finish.  Use this method if the lease holders
-   * could loose data, leak locks, etc.  Presumes client has shutdown
-   * allocation of new leases.
-   */
-  public void closeAfterLeasesExpire() {
-    synchronized(this.leases) {
-      while (this.leases.size() > 0) {
-        LOG.info(Thread.currentThread().getName() + " " +
-          Integer.toString(leases.size()) + " lease(s) " +
-          "outstanding. Waiting for them to expire.");
-        try {
-          this.leases.wait(this.leaseCheckFrequency);
-        } catch (InterruptedException e) {
-          // continue
-        }
-      }
-    }
-    // Now call close since no leases outstanding.
-    close();
-  }
-  
-  /**
-   * Shut down this Leases instance.  All pending leases will be destroyed, 
-   * without any cancellation calls.
-   */
-  public void close() {
-    LOG.info(Thread.currentThread().getName() + " closing leases");
-    this.stop.set(true);
-    while (this.leaseMonitorThread.isAlive()) {
-      try {
-        this.leaseMonitorThread.interrupt();
-        this.leaseMonitorThread.join();
-      } catch (InterruptedException iex) {
-        // Ignore
-      }
-    }
-    synchronized(leases) {
-      synchronized(sortedLeases) {
-        leases.clear();
-        sortedLeases.clear();
-      }
-    }
-    LOG.info(Thread.currentThread().getName() + " closed leases");
-  }
-
-  /* A client obtains a lease... */
-  
-  /**
-   * Obtain a lease
-   * 
-   * @param holderId id of lease holder
-   * @param resourceId id of resource being leased
-   * @param listener listener that will process lease expirations
-   */
-  public void createLease(final long holderId, final long resourceId,
-      final LeaseListener listener) {
-    LeaseName name = null;
-    synchronized(leases) {
-      synchronized(sortedLeases) {
-        Lease lease = new Lease(holderId, resourceId, listener);
-        name = lease.getLeaseName();
-        if(leases.get(name) != null) {
-          throw new AssertionError("Impossible state for createLease(): " +
-            "Lease " + name + " is still held.");
-        }
-        leases.put(name, lease);
-        sortedLeases.add(lease);
-      }
-    }
-//    if (LOG.isDebugEnabled()) {
-//      LOG.debug("Created lease " + name);
-//    }
-  }
-  
-  /* A client renews a lease... */
-  /**
-   * Renew a lease
-   * 
-   * @param holderId id of lease holder
-   * @param resourceId id of resource being leased
-   * @throws IOException
-   */
-  public void renewLease(final long holderId, final long resourceId)
-  throws IOException {
-    LeaseName name = null;
-    synchronized(leases) {
-      synchronized(sortedLeases) {
-        name = createLeaseName(holderId, resourceId);
-        Lease lease = leases.get(name);
-        if (lease == null) {
-          // It's possible that someone tries to renew the lease, but 
-          // it just expired a moment ago.  So fail.
-          throw new IOException("Cannot renew lease that is not held: " +
-            name);
-        }
-        sortedLeases.remove(lease);
-        lease.renew();
-        sortedLeases.add(lease);
-      }
-    }
-//    if (LOG.isDebugEnabled()) {
-//      LOG.debug("Renewed lease " + name);
-//    }
-  }
-
-  /**
-   * Client explicitly cancels a lease.
-   * 
-   * @param holderId id of lease holder
-   * @param resourceId id of resource being leased
-   */
-  public void cancelLease(final long holderId, final long resourceId) {
-    LeaseName name = null;
-    synchronized(leases) {
-      synchronized(sortedLeases) {
-        name = createLeaseName(holderId, resourceId);
-        Lease lease = leases.get(name);
-        if (lease == null) {
-          // It's possible that someone tries to renew the lease, but 
-          // it just expired a moment ago.  So just skip it.
-          return;
-        }
-        sortedLeases.remove(lease);
-        leases.remove(name);
-      }
-    }
-  }
-
-  /**
-   * LeaseMonitor is a thread that expires Leases that go on too long.
-   * Its a daemon thread.
-   */
-  class LeaseMonitor extends Chore {
-    /**
-     * @param p
-     * @param s
-     */
-    public LeaseMonitor(int p, AtomicBoolean s) {
-      super(p, s);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected void chore() {
-      synchronized(leases) {
-        synchronized(sortedLeases) {
-          Lease top;
-          while((sortedLeases.size() > 0)
-              && ((top = sortedLeases.first()) != null)) {
-            if(top.shouldExpire()) {
-              leases.remove(top.getLeaseName());
-              sortedLeases.remove(top);
-              top.expired();
-            } else {
-              break;
-            }
-          }
-        }
-      }
-    }
-  }
-  
-  /*
-   * A Lease name.
-   * More lightweight than String or Text.
-   */
-  @SuppressWarnings("unchecked")
-  class LeaseName implements Comparable {
-    private final long holderId;
-    private final long resourceId;
-    
-    LeaseName(final long hid, final long rid) {
-      this.holderId = hid;
-      this.resourceId = rid;
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public boolean equals(Object obj) {
-      LeaseName other = (LeaseName)obj;
-      return this.holderId == other.holderId &&
-        this.resourceId == other.resourceId;
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public int hashCode() {
-      // Copy OR'ing from javadoc for Long#hashCode.
-      int result = (int)(this.holderId ^ (this.holderId >>> 32));
-      result ^= (int)(this.resourceId ^ (this.resourceId >>> 32));
-      return result;
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public String toString() {
-      return Long.toString(this.holderId) + "/" +
-        Long.toString(this.resourceId);
-    }
-
-    /** {@inheritDoc} */
-    public int compareTo(Object obj) {
-      LeaseName other = (LeaseName)obj;
-      if (this.holderId < other.holderId) {
-        return -1;
-      }
-      if (this.holderId > other.holderId) {
-        return 1;
-      }
-      // holderIds are equal
-      if (this.resourceId < other.resourceId) {
-        return -1;
-      }
-      if (this.resourceId > other.resourceId) {
-        return 1;
-      }
-      // Objects are equal
-      return 0;
-    }
-  }
-  
-  /** Create a lease id out of the holder and resource ids. */
-  protected LeaseName createLeaseName(final long hid, final long rid) {
-    return new LeaseName(hid, rid);
-  }
-
-  /** This class tracks a single Lease. */
-  @SuppressWarnings("unchecked")
-  private class Lease implements Comparable {
-    final long holderId;
-    final long resourceId;
-    final LeaseListener listener;
-    long lastUpdate;
-    private LeaseName leaseId;
-
-    Lease(final long holderId, final long resourceId,
-        final LeaseListener listener) {
-      this.holderId = holderId;
-      this.resourceId = resourceId;
-      this.listener = listener;
-      renew();
-    }
-    
-    synchronized LeaseName getLeaseName() {
-      if (this.leaseId == null) {
-        this.leaseId = createLeaseName(holderId, resourceId);
-      }
-      return this.leaseId;
-    }
-    
-    boolean shouldExpire() {
-      return (System.currentTimeMillis() - lastUpdate > leasePeriod);
-    }
-    
-    void renew() {
-      this.lastUpdate = System.currentTimeMillis();
-    }
-    
-    void expired() {
-      LOG.info(Thread.currentThread().getName() + " lease expired " +
-        getLeaseName());
-      listener.leaseExpired();
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public boolean equals(Object obj) {
-      return compareTo(obj) == 0;
-    }
-    
-    /** {@inheritDoc} */
-    @Override
-    public int hashCode() {
-      int result = this.getLeaseName().hashCode();
-      result ^= this.lastUpdate;
-      return result;
-    }
-    
-    //////////////////////////////////////////////////////////////////////////////
-    // Comparable
-    //////////////////////////////////////////////////////////////////////////////
-
-    /** {@inheritDoc} */
-    public int compareTo(Object o) {
-      Lease other = (Lease) o;
-      if(this.lastUpdate < other.lastUpdate) {
-        return -1;
-      } else if(this.lastUpdate > other.lastUpdate) {
-        return 1;
-      } else {
-        return this.getLeaseName().compareTo(other.getLeaseName());
-      }
-    }
-  }
-}

+ 0 - 307
src/contrib/hbase/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java

@@ -1,307 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * This class creates a single process HBase cluster. One thread is created for
- * a master and one per region server.
- * 
- * Call {@link #startup()} to start the cluster running and {@link #shutdown()}
- * to close it all down. {@link #join} the cluster is you want to wait on
- * shutdown completion.
- * 
- * <p>Runs master on port 60000 by default.  Because we can't just kill the
- * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to
- * be able to find the master with a remote client to run shutdown.  To use a
- * port other than 60000, set the hbase.master to a value of 'local:PORT':
- * that is 'local', not 'localhost', and the port number the master should use
- * instead of 60000.
- * 
- * <p>To make 'local' mode more responsive, make values such as
- * <code>hbase.regionserver.msginterval</code>,
- * <code>hbase.master.meta.thread.rescanfrequency</code>, and
- * <code>hbase.server.thread.wakefrequency</code> a second or less.
- */
-public class LocalHBaseCluster implements HConstants {
-  static final Log LOG = LogFactory.getLog(LocalHBaseCluster.class);
-  private final HMaster master;
-  private final List<RegionServerThread> regionThreads;
-  private final static int DEFAULT_NO = 1;
-  /** local mode */
-  public static final String LOCAL = "local";
-  /** 'local:' */
-  public static final String LOCAL_COLON = LOCAL + ":";
-  private final HBaseConfiguration conf;
-
-  /**
-   * Constructor.
-   * @param conf
-   * @throws IOException
-   */
-  public LocalHBaseCluster(final HBaseConfiguration conf)
-  throws IOException {
-    this(conf, DEFAULT_NO);
-  }
-
-  /**
-   * Constructor.
-   * @param conf Configuration to use.  Post construction has the master's
-   * address.
-   * @param noRegionServers Count of regionservers to start.
-   * @throws IOException
-   */
-  public LocalHBaseCluster(final HBaseConfiguration conf,
-    final int noRegionServers)
-  throws IOException {
-    super();
-    this.conf = conf;
-    doLocal(conf);
-    // Create the master
-    this.master = new HMaster(conf);
-    // Set the master's port for the HRegionServers
-    conf.set(MASTER_ADDRESS, this.master.getMasterAddress().toString());
-    // Start the HRegionServers.  Always have region servers come up on
-    // port '0' so there won't be clashes over default port as unit tests
-    // start/stop ports at different times during the life of the test.
-    conf.set(REGIONSERVER_ADDRESS, DEFAULT_HOST + ":0");
-    this.regionThreads = new ArrayList<RegionServerThread>();
-    for (int i = 0; i < noRegionServers; i++) {
-      addRegionServer();
-    }
-  }
-
-  /**
-   * Creates a region server.
-   * Call 'start' on the returned thread to make it run.
-   *
-   * @throws IOException
-   * @return Region server added.
-   */
-  public RegionServerThread addRegionServer() throws IOException {
-    synchronized (regionThreads) {
-      RegionServerThread t = new RegionServerThread(new HRegionServer(conf),
-          this.regionThreads.size());
-      this.regionThreads.add(t);
-      return t;
-    }
-  }
-
-  /** runs region servers */
-  public static class RegionServerThread extends Thread {
-    private final HRegionServer regionServer;
-    
-    RegionServerThread(final HRegionServer r, final int index) {
-      super(r, "RegionServer:" + index);
-      this.regionServer = r;
-    }
-
-    /** @return the region server */
-    public HRegionServer getRegionServer() {
-      return this.regionServer;
-    }
-  }
-
-  /**
-   * @return the HMaster thread
-   */
-  public HMaster getMaster() {
-    return this.master;
-  }
-
-  /**
-   * @return Read-only list of region server threads.
-   */
-  public List<RegionServerThread> getRegionServers() {
-    return Collections.unmodifiableList(this.regionThreads);
-  }
-
-  /**
-   * Wait for the specified region server to stop
-   * Removes this thread from list of running threads.
-   * @param serverNumber
-   * @return Name of region server that just went down.
-   */
-  public String waitOnRegionServer(int serverNumber) {
-    RegionServerThread regionServerThread;
-    synchronized (regionThreads) {
-      regionServerThread = this.regionThreads.remove(serverNumber);
-    }
-    while (regionServerThread.isAlive()) {
-      try {
-        LOG.info("Waiting on " +
-            regionServerThread.getRegionServer().serverInfo.toString());
-        regionServerThread.join();
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-      }
-    }
-    return regionServerThread.getName();
-  }
-
-  /**
-   * Wait for Mini HBase Cluster to shut down.
-   * Presumes you've already called {@link #shutdown()}.
-   */
-  public void join() {
-    if (this.regionThreads != null) {
-      synchronized(this.regionThreads) {
-        for(Thread t: this.regionThreads) {
-          if (t.isAlive()) {
-            try {
-              t.join();
-            } catch (InterruptedException e) {
-              // continue
-            }
-          }
-        }
-      }
-    }
-    if (this.master != null && this.master.isAlive()) {
-      try {
-        this.master.join();
-      } catch(InterruptedException e) {
-        // continue
-      }
-    }
-  }
-  
-  /**
-   * Start the cluster.
-   * @return Address to use contacting master.
-   */
-  public String startup() {
-    this.master.start();
-    synchronized (regionThreads) {
-      for (RegionServerThread t: this.regionThreads) {
-        t.start();
-      }
-    }
-    return this.master.getMasterAddress().toString();
-  }
-
-  /**
-   * Shut down the mini HBase cluster
-   */
-  public void shutdown() {
-    LOG.debug("Shutting down HBase Cluster");
-    if(this.master != null) {
-      this.master.shutdown();
-    }
-    // regionServerThreads can never be null because they are initialized when
-    // the class is constructed.
-    synchronized(this.regionThreads) {
-      for(Thread t: this.regionThreads) {
-        if (t.isAlive()) {
-          try {
-            t.join();
-          } catch (InterruptedException e) {
-            // continue
-          }
-        }
-      }
-    }
-    if (this.master != null) {
-      while (this.master.isAlive()) {
-        try {
-          // The below has been replaced to debug sometime hangs on end of
-          // tests.
-          // this.master.join():
-          threadDumpingJoin(this.master);
-        } catch(InterruptedException e) {
-          // continue
-        }
-      }
-    }
-    LOG.info("Shutdown " +
-      ((this.regionThreads != null)? this.master.getName(): "0 masters") +
-      " " + this.regionThreads.size() + " region server(s)");
-  }
-
-  public void threadDumpingJoin(final Thread t) throws InterruptedException {
-    if (t == null) {
-      return;
-    }
-    long startTime = System.currentTimeMillis();
-    while (t.isAlive()) {
-      Thread.sleep(1000);
-      if (System.currentTimeMillis() - startTime > 60000) {
-        startTime = System.currentTimeMillis();
-        ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
-            "Automatic Stack Trace every 60 seconds waiting on " +
-            t.getName());
-      }
-    }
-  }
-
-  /**
-   * Changes <code>hbase.master</code> from 'local' to 'localhost:PORT' in
-   * passed Configuration instance.
-   * @param c
-   * @return The passed <code>c</code> configuration modified if hbase.master
-   * value was 'local' otherwise, unaltered.
-   */
-  static HBaseConfiguration doLocal(final HBaseConfiguration c) {
-    if (!isLocal(c)) {
-      return c;
-    }
-    // Need to rewrite address in Configuration if not done already.
-    String address = c.get(MASTER_ADDRESS);
-    String port = address.startsWith(LOCAL_COLON)?
-      address.substring(LOCAL_COLON.length()):
-      Integer.toString(DEFAULT_MASTER_PORT);
-    c.set(MASTER_ADDRESS, "localhost:" + port);
-    return c;
-  }
-  
-  /**
-   * @param c Configuration to check.
-   * @return True if a 'local' address in hbase.master value.
-   */
-  public static boolean isLocal(final Configuration c) {
-    String address = c.get(MASTER_ADDRESS);
-    return address == null || address.equals(LOCAL) ||
-      address.startsWith(LOCAL_COLON);
-  }
-  
-  /**
-   * Test things basically work.
-   * @param args
-   * @throws IOException
-   */
-  public static void main(String[] args) throws IOException {
-    HBaseConfiguration conf = new HBaseConfiguration();
-    LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
-    cluster.startup();
-    HBaseAdmin admin = new HBaseAdmin(conf);
-    admin.createTable(new HTableDescriptor(cluster.getClass().getName()));
-    cluster.shutdown();
-  }
-}

+ 0 - 41
src/contrib/hbase/src/java/org/apache/hadoop/hbase/LockException.java

@@ -1,41 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown when a locking error is encountered
- */
-public class LockException extends IOException {
-  private static final long serialVersionUID = 1L << 13 - 1L;
-  /** default constructor */
-  public LockException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public LockException(String s) {
-    super(s);
-  }
-}

+ 0 - 29
src/contrib/hbase/src/java/org/apache/hadoop/hbase/LogRollListener.java

@@ -1,29 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase;
-
-/**
- * Mechanism by which the HLog requests a log roll
- */
-public interface LogRollListener {
-  /** Request that the log be rolled */
-  public void logRollRequested();
-}

+ 0 - 41
src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java

@@ -1,41 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown if the master is not running
- */
-public class MasterNotRunningException extends IOException {
-  private static final long serialVersionUID = 1L << 23 - 1L;
-  /** default constructor */
-  public MasterNotRunningException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public MasterNotRunningException(String s) {
-    super(s);
-  }
-}

+ 0 - 42
src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java

@@ -1,42 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown when no region server can be found for a region
- */
-public class NoServerForRegionException extends IOException {
-  private static final long serialVersionUID = 1L << 11 - 1L;
-
-  /** default constructor */
-  public NoServerForRegionException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public NoServerForRegionException(String s) {
-    super(s);
-  }
-}

+ 0 - 44
src/contrib/hbase/src/java/org/apache/hadoop/hbase/NotServingRegionException.java

@@ -1,44 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown by a region server if it is sent a request for a region it is not
- * serving.
- */
-public class NotServingRegionException extends IOException {
-  private static final long serialVersionUID = 1L << 17 - 1L;
-
-  /** default constructor */
-  public NotServingRegionException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public NotServingRegionException(String s) {
-    super(s);
-  }
-
-}

+ 0 - 44
src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java

@@ -1,44 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown if the region server log directory exists (which indicates another
- * region server is running at the same address)
- */
-public class RegionServerRunningException extends IOException {
-  private static final long serialVersionUID = 1L << 31 - 1L;
-  
-  /** Default Constructor */
-  public RegionServerRunningException() {
-    super();
-  }
-
-  /**
-   * Constructs the exception and supplies a string as the message
-   * @param s - message
-   */
-  public RegionServerRunningException(String s) {
-    super(s);
-  }
-
-}

+ 0 - 44
src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java

@@ -1,44 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * Used as a callback mechanism so that an HRegion can notify the HRegionServer
- * of the different stages making an HRegion unavailable.  Regions are made
- * unavailable during region split operations.
- */
-public interface RegionUnavailableListener {
-  /**
-   * <code>regionName</code> is closing.
-   * Listener should stop accepting new writes but can continue to service
-   * outstanding transactions.
-   * @param regionName
-   */
-  public void closing(final Text regionName);
-  
-  /**
-   * <code>regionName</code> is closed and no longer available.
-   * Listener should clean up any references to <code>regionName</code>
-   * @param regionName
-   */
-  public void closed(final Text regionName);
-}

+ 0 - 106
src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java

@@ -1,106 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-
-import org.apache.hadoop.ipc.RemoteException;
-
-/** 
- * An immutable class which contains a static method for handling
- * org.apache.hadoop.ipc.RemoteException exceptions.
- */
-public class RemoteExceptionHandler {
-  /* Not instantiable */
-  private RemoteExceptionHandler() {super();}
-  
-  /**
-   * Examine passed IOException.  See if its carrying a RemoteException. If so,
-   * run {@link #decodeRemoteException(RemoteException)} on it.  Otherwise,
-   * pass back <code>e</code> unaltered.
-   * @param e Exception to examine.
-   * @return Decoded RemoteException carried by <code>e</code> or
-   * <code>e</code> unaltered.
-   */
-  public static IOException checkIOException(final IOException e) {
-    IOException result = e;
-    if (e instanceof RemoteException) {
-      try {
-        result = RemoteExceptionHandler.decodeRemoteException(
-            (RemoteException) e);
-      } catch (IOException ex) {
-        result = ex;
-      }
-    }
-    return result;
-  }
-  
-  /**
-   * Converts org.apache.hadoop.ipc.RemoteException into original exception,
-   * if possible. If the original exception is an Error or a RuntimeException,
-   * throws the original exception.
-   * 
-   * @param re original exception
-   * @return decoded RemoteException if it is an instance of or a subclass of
-   *         IOException, or the original RemoteException if it cannot be decoded.
-   * 
-   * @throws IOException indicating a server error ocurred if the decoded 
-   *         exception is not an IOException. The decoded exception is set as
-   *         the cause.
-   */
-  @SuppressWarnings("unchecked")
-  public static IOException decodeRemoteException(final RemoteException re)
-  throws IOException {
-    IOException i = re;
-
-    try {
-      Class c = Class.forName(re.getClassName());
-
-      Class[] parameterTypes = { String.class };
-      Constructor ctor = c.getConstructor(parameterTypes);
-      
-      Object[] arguments = { re.getMessage() };
-      Throwable t = (Throwable) ctor.newInstance(arguments);
-      
-      if (t instanceof IOException) {
-        i = (IOException) t;
-
-      } else {
-        i = new IOException("server error");
-        i.initCause(t);
-        throw i;
-      }
-
-    } catch (ClassNotFoundException x) {
-      // continue
-    } catch (NoSuchMethodException x) {
-      // continue
-    } catch (IllegalAccessException x) {
-      // continue
-    } catch (InvocationTargetException x) {
-      // continue
-    } catch (InstantiationException x) {
-      // continue
-    }
-    return i;
-  }
-}

+ 0 - 141
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Shell.java

@@ -1,141 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-
-import jline.ConsoleReader;
-
-import org.apache.hadoop.hbase.hql.HQLClient;
-import org.apache.hadoop.hbase.hql.HelpCommand;
-import org.apache.hadoop.hbase.hql.ReturnMsg;
-import org.apache.hadoop.hbase.hql.HQLSecurityManager;
-import org.apache.hadoop.hbase.hql.TableFormatter;
-import org.apache.hadoop.hbase.hql.TableFormatterFactory;
-import org.apache.hadoop.hbase.hql.formatter.HtmlTableFormatter;
-
-/**
- * An hbase shell.
- * 
- * @see <a
- *      href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseShell">HbaseShell</a>
- */
-public class Shell {
-  /** audible keyboard bells */
-  public static final boolean DEFAULT_BELL_ENABLED = true;
-  public static String MASTER_ADDRESS = null;
-  public static String HTML_OPTION = null;
-  public static int RELAUNCH_FLAG = 7;
-  public static int EXIT_FLAG = 9999;
-
-  /** Return the boolean value indicating whether end of command or not */
-  static boolean isEndOfCommand(String line) {
-    return (line.lastIndexOf(';') > -1) ? true : false;
-  }
-
-  /** Return the string of prompt start string */
-  private static String getPrompt(final StringBuilder queryStr) {
-    return (queryStr.toString().equals("")) ? "hql > " : "  --> ";
-  }
-
-  /**
-   * @param watch true if execution time should be computed and returned
-   * @param start start of time interval
-   * @param end end of time interval
-   * @return a string of code execution time.
-   */
-  public static String executeTime(boolean watch, long start, long end) {
-    return watch ? " ("
-        + String.format("%.2f", Double.valueOf((end - start) * 0.001)) + " sec)"
-        : "";
-  }
-
-  /**
-   * Main method
-   * 
-   * @param args not used
-   * @throws IOException
-   */
-  public static void main(String args[]) throws IOException {
-    argumentParsing(args);
-    if (args.length != 0) {
-      if (args[0].equals("--help") || args[0].equals("-h")) {
-        System.out
-            .println("Usage: ./bin/hbase shell [--master:master_address:port] [--html]\n");
-        System.exit(1);
-      }
-    }
-    
-    HBaseConfiguration conf = new HBaseConfiguration();
-    ConsoleReader reader = new ConsoleReader();
-    System.setSecurityManager(new HQLSecurityManager());
-    reader.setBellEnabled(conf.getBoolean("hbaseshell.jline.bell.enabled",
-        DEFAULT_BELL_ENABLED));
-    Writer out = new OutputStreamWriter(System.out, "UTF-8");
-    TableFormatter tableFormater = new TableFormatterFactory(out, conf).get();
-    if (MASTER_ADDRESS != null) {
-      conf.set("hbase.master", MASTER_ADDRESS.substring(9, MASTER_ADDRESS.length()));
-    }
-    if (HTML_OPTION != null) {
-      tableFormater = new HtmlTableFormatter(out);
-    }
-
-    HelpCommand help = new HelpCommand(out, tableFormater);
-    if (args.length == 0 || !args[0].equals(String.valueOf(Shell.RELAUNCH_FLAG))) {
-      help.printVersion();
-    }
-    StringBuilder queryStr = new StringBuilder();
-    String extendedLine;
-    while ((extendedLine = reader.readLine(getPrompt(queryStr))) != null) {
-      if (isEndOfCommand(extendedLine)) {
-        queryStr.append(" " + extendedLine);
-        long start = System.currentTimeMillis();
-
-        HQLClient hql = new HQLClient(conf, MASTER_ADDRESS, out, tableFormater);
-        ReturnMsg rs = hql.executeQuery(queryStr.toString());
-
-        long end = System.currentTimeMillis();
-        if (rs != null) {
-          if (rs != null && rs.getType() > -1)
-            System.out.println(rs.getMsg() +
-              executeTime((rs.getType() == 1), start, end));
-          else if (rs.getType() == -9)           
-            System.out.println(rs.getMsg());
-        }
-        queryStr = new StringBuilder();
-      } else {
-        queryStr.append(" " + extendedLine);
-      }
-    }
-    System.out.println();
-  }
-
-  private static void argumentParsing(String[] args) {
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].toLowerCase().startsWith("--master:")) {
-        MASTER_ADDRESS = args[i];
-      } else if (args[i].toLowerCase().startsWith("--html")) {
-        HTML_OPTION = args[i];
-      }
-    }
-  }
-}

+ 0 - 38
src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableExistsException.java

@@ -1,38 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown when a table exists but should not
- */
-public class TableExistsException extends IOException {
-  private static final long serialVersionUID = 1L << 7 - 1L;
-  /** default constructor */
-  public TableExistsException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * 
-   * @param s message
-   */
-  public TableExistsException(String s) {
-    super(s);
-  }
-}

+ 0 - 41
src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java

@@ -1,41 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown if a table should be offline but is not
- */
-public class TableNotDisabledException extends IOException {
-  private static final long serialVersionUID = 1L << 19 - 1L;
-  /** default constructor */
-  public TableNotDisabledException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public TableNotDisabledException(String s) {
-    super(s);
-  }
-}

+ 0 - 37
src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotFoundException.java

@@ -1,37 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/** Thrown when a table can not be located */
-public class TableNotFoundException extends IOException {
-  private static final long serialVersionUID = 993179627856392526L;
-
-  /** default constructor */
-  public TableNotFoundException() {
-    super();
-  }
-
-  /** @param s message */
-  public TableNotFoundException(String s) {
-    super(s);
-  }
-}

+ 0 - 42
src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java

@@ -1,42 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown if a region server is passed an unknown scanner id
- */
-public class UnknownScannerException extends IOException {
-  private static final long serialVersionUID = 993179627856392526L;
-
-  /** constructor */
-  public UnknownScannerException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public UnknownScannerException(String s) {
-    super(s);
-  }
-}

+ 0 - 42
src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java

@@ -1,42 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-/**
- * Thrown when a request contains a key which is not part of this region
- */
-public class WrongRegionException extends IOException {
-  private static final long serialVersionUID = 993179627856392526L;
-
-  /** constructor */
-  public WrongRegionException() {
-    super();
-  }
-
-  /**
-   * Constructor
-   * @param s message
-   */
-  public WrongRegionException(String s) {
-    super(s);
-  }
-}

+ 0 - 67
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java

@@ -1,67 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-
-/*
- * Subclass of StopRowFilter that filters rows > the stop row,
- * making it include up to the last row but no further.
- */
-public class InclusiveStopRowFilter extends StopRowFilter{
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public InclusiveStopRowFilter() {super();}
-
-  /**
-   * Constructor that takes a stopRowKey on which to filter
-   * 
-   * @param stopRowKey rowKey to filter on.
-   */
-  public InclusiveStopRowFilter(final Text stopRowKey) {
-    super(stopRowKey);
-  }
-  
-  public boolean filter(final Text rowKey) {
-    if (rowKey == null) {
-      if (this.stopRowKey == null) {
-        return true;
-      }
-      return false;
-    }    
-    boolean result = this.stopRowKey.compareTo(rowKey) < 0;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Filter result for rowKey: " + rowKey + ".  Result: " + 
-        result);
-    }
-    return result;
-  }
-  
-}

+ 0 - 41
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java

@@ -1,41 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-/**
- * Used to indicate an invalid RowFilter.
- */
-public class InvalidRowFilterException extends RuntimeException {
-  private static final long serialVersionUID = 2667894046345657865L;
-
-
-  /** constructor */
-  public InvalidRowFilterException() {
-    super();
-  }
-
-  /**
-   * constructor
-   * @param s message
-   */
-  public InvalidRowFilterException(String s) {
-    super(s);
-  }
-}

+ 0 - 159
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java

@@ -1,159 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-/**
- * Implementation of RowFilterInterface that limits results to a specific page
- * size. It terminates scanning once the number of filter-passed results is >=
- * the given page size.
- * 
- * <p>
- * Note that this filter cannot guarantee that the number of results returned
- * to a client are <= page size. This is because the filter is applied
- * separately on different region servers. It does however optimize the scan of
- * individual HRegions by making sure that the page size is never exceeded
- * locally.
- * </p>
- */
-public class PageRowFilter implements RowFilterInterface {
-
-  private long pageSize = Long.MAX_VALUE;
-  private int rowsAccepted = 0;
-
-  static final Log LOG = LogFactory.getLog(PageRowFilter.class);
-  
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public PageRowFilter() {
-    super();
-  }
-
-  /**
-   * Constructor that takes a maximum page size.
-   * 
-   * @param pageSize Maximum result size.
-   */
-  public PageRowFilter(final long pageSize) {
-    this.pageSize = pageSize;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void validate(@SuppressWarnings("unused") final Text[] columns) {
-    // Doesn't filter columns
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void reset() {
-    rowsAccepted = 0;
-  }
-
-  /** {@inheritDoc} */
-  public void rowProcessed(boolean filtered,
-      @SuppressWarnings("unused") Text rowKey) {
-    if (!filtered) {
-      this.rowsAccepted++;
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("rowProcessed incremented rowsAccepted to " + 
-          this.rowsAccepted);
-      }
-    }
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean processAlways() {
-    return false;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filterAllRemaining() {
-    boolean result = this.rowsAccepted > this.pageSize;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filtering decision is " + result + " with rowsAccepted: " + 
-        this.rowsAccepted);
-    }
-    return result;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filter(@SuppressWarnings("unused") final Text rowKey) {
-    return filterAllRemaining();
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filter(@SuppressWarnings("unused") final Text rowKey,
-    @SuppressWarnings("unused") final Text colKey,
-    @SuppressWarnings("unused") final byte[] data) {
-    return filterAllRemaining();
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filterNotNull(@SuppressWarnings("unused")
-      final TreeMap<Text, byte[]> columns) {
-    return filterAllRemaining();
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void readFields(final DataInput in) throws IOException {
-    this.pageSize = in.readLong();
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void write(final DataOutput out) throws IOException {
-    out.writeLong(pageSize);
-  }
-}

+ 0 - 340
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java

@@ -1,340 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-import org.apache.hadoop.hbase.HLogEdit;
-
-/**
- * Implementation of RowFilterInterface that can filter by rowkey regular
- * expression and/or individual column values (equals comparison only).
- * Multiple column filters imply an implicit conjunction of filter criteria.
- */
-public class RegExpRowFilter implements RowFilterInterface {
-
-  private Pattern rowKeyPattern = null;
-  private String rowKeyRegExp = null;
-
-  private Map<Text, byte[]> equalsMap = new HashMap<Text, byte[]>();
-  private Set<Text> nullColumns = new HashSet<Text>();
-
-  static final Log LOG = LogFactory.getLog(RegExpRowFilter.class);
-  
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public RegExpRowFilter() {
-    super();
-  }
-
-  /**
-   * Constructor that takes a row key regular expression to filter on.
-   * 
-   * @param rowKeyRegExp
-   */
-  public RegExpRowFilter(final String rowKeyRegExp) {
-    this.rowKeyRegExp = rowKeyRegExp;
-  }
-
-  /**
-   * Constructor that takes a row key regular expression to filter on.
-   * 
-   * @param rowKeyRegExp
-   * @param columnFilter
-   */
-  public RegExpRowFilter(final String rowKeyRegExp,
-      final Map<Text, byte[]> columnFilter) {
-    this.rowKeyRegExp = rowKeyRegExp;
-    this.setColumnFilters(columnFilter);
-  }
-  
-  /** {@inheritDoc} */
-  @SuppressWarnings("unused")
-  public void rowProcessed(boolean filtered, Text rowKey) {
-    //doesn't care
-  }
-
-  /** {@inheritDoc} */
-  public boolean processAlways() {
-    return false;
-  }
-  
-  /**
-   * Specify a value that must be matched for the given column.
-   * 
-   * @param colKey
-   *          the column to match on
-   * @param value
-   *          the value that must equal the stored value.
-   */
-  public void setColumnFilter(final Text colKey, final byte[] value) {
-    if (value == null) {
-      nullColumns.add(colKey);
-    } else {
-      equalsMap.put(colKey, value);
-    }
-  }
-
-  /**
-   * Set column filters for a number of columns.
-   * 
-   * @param columnFilter
-   *          Map of columns with value criteria.
-   */
-  public void setColumnFilters(final Map<Text, byte[]> columnFilter) {
-    if (null == columnFilter) {
-      nullColumns.clear();
-      equalsMap.clear();
-    } else {
-      for (Entry<Text, byte[]> entry : columnFilter.entrySet()) {
-        setColumnFilter(entry.getKey(), entry.getValue());
-      }
-    }
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void reset() {
-    // Nothing to reset
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filterAllRemaining() {
-    return false;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filter(final Text rowKey) {
-    if (filtersByRowKey() && rowKey != null) {
-      boolean result = !getRowKeyPattern().matcher(rowKey.toString()).matches();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("filter returning " + result + " for rowKey: " + rowKey);
-      }
-      return result;
-    }
-    return false;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filter(final Text rowKey, final Text colKey,
-      final byte[] data) {
-    if (filter(rowKey)) {
-      return true;
-    }
-    if (filtersByColumnValue()) {
-      byte[] filterValue = equalsMap.get(colKey);
-      if (null != filterValue) {
-        boolean result = !Arrays.equals(filterValue, data);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("filter returning " + result + " for rowKey: " + rowKey + 
-            " colKey: " + colKey);
-        }
-        return result;
-      }
-    }
-    if (nullColumns.contains(colKey)) {
-      if (data != null && !HLogEdit.isDeleted(data)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("filter returning true for rowKey: " + rowKey + 
-            " colKey: " + colKey);
-        }
-        return true;
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filter returning false for rowKey: " + rowKey + " colKey: " + 
-        colKey);
-    }
-    return false;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public boolean filterNotNull(final TreeMap<Text, byte[]> columns) {
-    for (Entry<Text, byte[]> col : columns.entrySet()) {
-      if (nullColumns.contains(col.getKey())
-          && !HLogEdit.isDeleted(col.getValue())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("filterNotNull returning true for colKey: " + col.getKey()
-            + ", column should be null.");
-        }
-        return true;
-      }
-    }
-    for (Text col : equalsMap.keySet()) {
-      if (!columns.containsKey(col)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("filterNotNull returning true for colKey: " + col + 
-            ", column not found in given TreeMap<Text, byte[]>.");
-        }
-        return true;
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filterNotNull returning false.");
-    }
-    return false;
-  }
-
-  private boolean filtersByColumnValue() {
-    return equalsMap != null && equalsMap.size() > 0;
-  }
-
-  private boolean filtersByRowKey() {
-    return null != rowKeyPattern || null != rowKeyRegExp;
-  }
-
-  private String getRowKeyRegExp() {
-    if (null == rowKeyRegExp && rowKeyPattern != null) {
-      rowKeyRegExp = rowKeyPattern.toString();
-    }
-    return rowKeyRegExp;
-  }
-
-  private Pattern getRowKeyPattern() {
-    if (rowKeyPattern == null && rowKeyRegExp != null) {
-      rowKeyPattern = Pattern.compile(rowKeyRegExp);
-    }
-    return rowKeyPattern;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void readFields(final DataInput in) throws IOException {
-    boolean hasRowKeyPattern = in.readBoolean();
-    if (hasRowKeyPattern) {
-      rowKeyRegExp = in.readUTF();
-    }
-    // equals map
-    equalsMap.clear();
-    int size = in.readInt();
-    for (int i = 0; i < size; i++) {
-      Text key = new Text();
-      key.readFields(in);
-      int len = in.readInt();
-      byte[] value = null;
-      if (len >= 0) {
-        value = new byte[len];
-        in.readFully(value);
-      }
-      setColumnFilter(key, value);
-    }
-    // nullColumns
-    nullColumns.clear();
-    size = in.readInt();
-    for (int i = 0; i < size; i++) {
-      Text key = new Text();
-      key.readFields(in);
-      setColumnFilter(key, null);
-    }
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void validate(final Text[] columns) {
-    Set<Text> invalids = new HashSet<Text>();
-    for (Text colKey : getFilterColumns()) {
-      boolean found = false;
-      for (Text col : columns) {
-        if (col.equals(colKey)) {
-          found = true;
-          break;
-        }
-      }
-      if (!found) {
-        invalids.add(colKey);
-      }
-    }
-
-    if (invalids.size() > 0) {
-      throw new InvalidRowFilterException(String.format(
-          "RowFilter contains criteria on columns %s not in %s", invalids,
-          Arrays.toString(columns)));
-    }
-  }
-
-  private Set<Text> getFilterColumns() {
-    Set<Text> cols = new HashSet<Text>();
-    cols.addAll(equalsMap.keySet());
-    cols.addAll(nullColumns);
-    return cols;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void write(final DataOutput out) throws IOException {
-    if (!filtersByRowKey()) {
-      out.writeBoolean(false);
-    } else {
-      out.writeBoolean(true);
-      out.writeUTF(getRowKeyRegExp());
-    }
-
-    // equalsMap
-    out.writeInt(equalsMap.size());
-    for (Entry<Text, byte[]> entry : equalsMap.entrySet()) {
-      entry.getKey().write(out);
-      byte[] value = entry.getValue();
-      out.writeInt(value.length);
-      out.write(value);
-    }
-
-    // null columns
-    out.writeInt(nullColumns.size());
-    for (Text col : nullColumns) {
-      col.write(out);
-    }
-  }
-}

+ 0 - 125
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java

@@ -1,125 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.util.TreeMap;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-/**
- * 
- * Interface used for row-level filters applied to HRegion.HScanner scan
- * results during calls to next().
- */
-public interface RowFilterInterface extends Writable {
-
-  /**
-   * Resets the state of the filter. Used prior to the start of a Region scan.
-   * 
-   */
-  void reset();
-
-  /**
-   * Called to let filter know the final decision (to pass or filter) on a 
-   * given row.  With out HScanner calling this, the filter does not know if a 
-   * row passed filtering even if it passed the row itself because other 
-   * filters may have failed the row. E.g. when this filter is a member of a 
-   * RowFilterSet with an OR operator.
-   * 
-   * @see RowFilterSet
-   * @param filtered
-   * @param key
-   */
-  void rowProcessed(boolean filtered, Text key);
-
-  /**
-   * Returns whether or not the filter should always be processed in any 
-   * filtering call.  This precaution is necessary for filters that maintain 
-   * state and need to be updated according to their response to filtering 
-   * calls (see WhileMatchRowFilter for an example).  At times, filters nested 
-   * in RowFilterSets may or may not be called because the RowFilterSet 
-   * determines a result as fast as possible.  Returning true for 
-   * processAlways() ensures that the filter will always be called.
-   * 
-   * @return whether or not to always process the filter
-   */
-  boolean processAlways();
-  
-  /**
-   * Determines if the filter has decided that all remaining results should be
-   * filtered (skipped). This is used to prevent the scanner from scanning a
-   * the rest of the HRegion when for sure the filter will exclude all
-   * remaining rows.
-   * 
-   * @return true if the filter intends to filter all remaining rows.
-   */
-  boolean filterAllRemaining();
-
-  /**
-   * Filters on just a row key.
-   * 
-   * @param rowKey
-   * @return true if given row key is filtered and row should not be processed.
-   */
-  boolean filter(final Text rowKey);
-
-  /**
-   * Filters on row key and/or a column key.
-   * 
-   * @param rowKey
-   *          row key to filter on. May be null for no filtering of row key.
-   * @param colKey
-   *          column whose data will be filtered
-   * @param data
-   *          column value
-   * @return true if row filtered and should not be processed.
-   */
-  boolean filter(final Text rowKey, final Text colKey, final byte[] data);
-
-  /**
-   * Filters a row if:
-   * 1) The given row (@param columns) has a columnKey expected to be null AND 
-   * the value associated with that columnKey is non-null.
-   * 2) The filter has a criterion for a particular columnKey, but that 
-   * columnKey is not in the given row (@param columns).
-   * 
-   * Note that filterNotNull does not care whether the values associated with a 
-   * columnKey match.  Also note that a "null value" associated with a columnKey 
-   * is expressed as HConstants.DELETE_BYTES.
-   * 
-   * @param columns
-   * @return true if null/non-null criteria not met.
-   */
-  boolean filterNotNull(final TreeMap<Text, byte[]> columns);
-
-  /**
-   * Validates that this filter applies only to a subset of the given columns.
-   * This check is done prior to opening of scanner due to the limitation that
-   * filtering of columns is dependent on the retrieval of those columns within
-   * the HRegion. Criteria on columns that are not part of a scanner's column
-   * list will be ignored. In the case of null value filters, all rows will pass
-   * the filter. This behavior should be 'undefined' for the user and therefore
-   * not permitted.
-   * 
-   * @param columns
-   */
-  void validate(final Text[] columns);
-}

+ 0 - 313
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java

@@ -1,313 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-/**
- * Implementation of RowFilterInterface that represents a set of RowFilters
- * which will be evaluated with a specified boolean operator MUST_PASS_ALL 
- * (!AND) or MUST_PASS_ONE (!OR).  Since you can use RowFilterSets as children 
- * of RowFilterSet, you can create a hierarchy of filters to be evaluated.
- */
-public class RowFilterSet implements RowFilterInterface {
-
-  /** set operator */
-  public static enum Operator {
-    /** !AND */
-    MUST_PASS_ALL,
-    /** !OR */
-    MUST_PASS_ONE
-  }
-
-  private Operator operator = Operator.MUST_PASS_ALL;
-  private Set<RowFilterInterface> filters = new HashSet<RowFilterInterface>();
-
-  static final Log LOG = LogFactory.getLog(RowFilterSet.class);
-  
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public RowFilterSet() {
-    super();
-  }
-
-  /**
-   * Constructor that takes a set of RowFilters. The default operator 
-   * MUST_PASS_ALL is assumed.
-   * 
-   * @param rowFilters
-   */
-  public RowFilterSet(final Set<RowFilterInterface> rowFilters) {
-    this.filters = rowFilters;
-  }
-
-  /**
-   * Constructor that takes a set of RowFilters and an operator.
-   * 
-   * @param operator Operator to process filter set with.
-   * @param rowFilters Set of row filters.
-   */
-  public RowFilterSet(final Operator operator,
-      final Set<RowFilterInterface> rowFilters) {
-    this.filters = rowFilters;
-    this.operator = operator;
-  }
-
-  /** {@inheritDoc} */
-  public void validate(final Text[] columns) {
-    for (RowFilterInterface filter : filters) {
-      filter.validate(columns);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Validated subfilter of type " + 
-          filter.getClass().getSimpleName());
-      }
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void reset() {
-    for (RowFilterInterface filter : filters) {
-      filter.reset();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Reset subfilter of type " + 
-          filter.getClass().getSimpleName());
-      }
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void rowProcessed(boolean filtered, Text rowKey) {
-    for (RowFilterInterface filter : filters) {
-      filter.rowProcessed(filtered, rowKey);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Called rowProcessed on subfilter of type " + 
-          filter.getClass().getSimpleName());
-      }
-    }
-  }
-
-  /** {@inheritDoc} */
-  public boolean processAlways() {
-    for (RowFilterInterface filter : filters) {
-      if (filter.processAlways()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("processAlways() is true due to subfilter of type " + 
-            filter.getClass().getSimpleName());
-        }
-        return true;
-      }
-    }
-    return false;
-  }
-  
-  /** {@inheritDoc} */
-  public boolean filterAllRemaining() {
-    boolean result = operator == Operator.MUST_PASS_ONE;
-    for (RowFilterInterface filter : filters) {
-      if (operator == Operator.MUST_PASS_ALL) {
-        if (filter.filterAllRemaining()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("op.MPALL filterAllRemaining returning true due" + 
-              " to subfilter of type " + filter.getClass().getSimpleName());
-          }
-          return true;
-        }
-      } else if (operator == Operator.MUST_PASS_ONE) {
-        if (!filter.filterAllRemaining()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("op.MPONE filterAllRemaining returning false due" + 
-              " to subfilter of type " + filter.getClass().getSimpleName());
-          }
-          return false;
-        }
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filterAllRemaining default returning " + result);
-    }
-    return result;
-  }
-
-  /** {@inheritDoc} */
-  public boolean filter(final Text rowKey) {
-    boolean resultFound = false;
-    boolean result = operator == Operator.MUST_PASS_ONE;
-    for (RowFilterInterface filter : filters) {
-      if (!resultFound) {
-        if (operator == Operator.MUST_PASS_ALL) {
-          if (filter.filterAllRemaining() || filter.filter(rowKey)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPALL filter(Text) will return true due" + 
-                " to subfilter of type " + filter.getClass().getSimpleName());
-            }
-            result = true;
-            resultFound = true;
-          }
-        } else if (operator == Operator.MUST_PASS_ONE) {
-          if (!filter.filterAllRemaining() && !filter.filter(rowKey)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPONE filter(Text) will return false due" + 
-                " to subfilter of type " + filter.getClass().getSimpleName());
-            }
-            result = false;
-            resultFound = true;
-          }
-        }
-      } else if (filter.processAlways()) {
-        filter.filter(rowKey);
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filter(Text) returning " + result);
-    }
-    return result;
-  }
-
-  /** {@inheritDoc} */
-  public boolean filter(final Text rowKey, final Text colKey, 
-    final byte[] data) {
-    boolean resultFound = false;
-    boolean result = operator == Operator.MUST_PASS_ONE;
-    for (RowFilterInterface filter : filters) {
-      if (!resultFound) {
-        if (operator == Operator.MUST_PASS_ALL) {
-          if (filter.filterAllRemaining() || 
-            filter.filter(rowKey, colKey, data)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPALL filter(Text, Text, byte[]) will" + 
-                " return true due to subfilter of type " + 
-                filter.getClass().getSimpleName());
-            }
-            result = true;
-            resultFound = true;
-          }
-        } else if (operator == Operator.MUST_PASS_ONE) {
-          if (!filter.filterAllRemaining() && 
-            !filter.filter(rowKey, colKey, data)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPONE filter(Text, Text, byte[]) will" + 
-                " return false due to subfilter of type " + 
-                filter.getClass().getSimpleName());
-            }
-            result = false;
-            resultFound = true;
-          }
-        }
-      } else if (filter.processAlways()) {
-        filter.filter(rowKey, colKey, data);
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filter(Text, Text, byte[]) returning " + result);
-    }
-    return result;
-  }
-
-  /** {@inheritDoc} */
-  public boolean filterNotNull(final TreeMap<Text, byte[]> columns) {
-    boolean resultFound = false;
-    boolean result = operator == Operator.MUST_PASS_ONE;
-    for (RowFilterInterface filter : filters) {
-      if (!resultFound) {
-        if (operator == Operator.MUST_PASS_ALL) {
-          if (filter.filterAllRemaining() || filter.filterNotNull(columns)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPALL filterNotNull will return true due" + 
-                " to subfilter of type " + filter.getClass().getSimpleName());
-            }
-            result = true;
-            resultFound = true;
-          }
-        } else if (operator == Operator.MUST_PASS_ONE) {
-          if (!filter.filterAllRemaining() && !filter.filterNotNull(columns)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("op.MPONE filterNotNull will return false due" + 
-                " to subfilter of type " + filter.getClass().getSimpleName());
-            }
-            result = false;
-            resultFound = true;
-          }
-        }
-      } else if (filter.processAlways()) {
-        filter.filterNotNull(columns);
-      }
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("filterNotNull returning " + result);
-    }
-    return result;
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(final DataInput in) throws IOException {
-    byte opByte = in.readByte();
-    operator = Operator.values()[opByte];
-    int size = in.readInt();
-    if (size > 0) {
-      filters = new HashSet<RowFilterInterface>();
-      try {
-        for (int i = 0; i < size; i++) {
-          String className = in.readUTF();
-          Class<?> clazz = Class.forName(className);
-          RowFilterInterface filter;
-          filter = (RowFilterInterface) clazz.newInstance();
-          filter.readFields(in);
-          filters.add(filter);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Successfully read in subfilter of type " + 
-              filter.getClass().getSimpleName());
-          }
-        }
-      } catch (InstantiationException e) {
-        throw new RuntimeException("Failed to deserialize RowFilterInterface.",
-            e);
-      } catch (IllegalAccessException e) {
-        throw new RuntimeException("Failed to deserialize RowFilterInterface.",
-            e);
-      } catch (ClassNotFoundException e) {
-        throw new RuntimeException("Failed to deserialize RowFilterInterface.",
-            e);
-      }
-    }
-
-  }
-
-  /** {@inheritDoc} */
-  public void write(final DataOutput out) throws IOException {
-    out.writeByte(operator.ordinal());
-    out.writeInt(filters.size());
-    for (RowFilterInterface filter : filters) {
-      out.writeUTF(filter.getClass().getName());
-      filter.write(out);
-    }
-  }
-
-}

+ 0 - 148
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java

@@ -1,148 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-/**
- * Implementation of RowFilterInterface that filters out rows greater than or 
- * equal to a specified rowKey.
- */
-public class StopRowFilter implements RowFilterInterface {
-
-  protected Text stopRowKey;
-  
-  static final Log LOG = LogFactory.getLog(StopRowFilter.class);
-  
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public StopRowFilter() {
-    super();
-  }
-
-  /**
-   * Constructor that takes a stopRowKey on which to filter
-   * 
-   * @param stopRowKey rowKey to filter on.
-   */
-  public StopRowFilter(final Text stopRowKey) {
-    this.stopRowKey = stopRowKey;
-  }
-  
-  /**
-   * An accessor for the stopRowKey
-   * 
-   * @return the filter's stopRowKey
-   */
-  public Text getStopRowKey() {
-    return this.stopRowKey;
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void validate(@SuppressWarnings("unused") final Text[] columns) {
-    // Doesn't filter columns
-  }
-
-  /**
-   * 
-   * {@inheritDoc}
-   */
-  public void reset() {
-    // Nothing to reset
-  }
-
-  /** {@inheritDoc} */
-  @SuppressWarnings("unused")
-  public void rowProcessed(boolean filtered, Text rowKey) {
-    // Doesn't care
-  }
-
-  /** {@inheritDoc} */
-  public boolean processAlways() {
-    return false;
-  }
-  
-  /** {@inheritDoc} */
-  public boolean filterAllRemaining() {
-    return false;
-  }
-
-  /** {@inheritDoc} */
-  public boolean filter(final Text rowKey) {
-    if (rowKey == null) {
-      if (this.stopRowKey == null) {
-        return true;
-      }
-      return false;
-    }
-    boolean result = this.stopRowKey.compareTo(rowKey) <= 0;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Filter result for rowKey: " + rowKey + ".  Result: " + 
-        result);
-    }
-    return result;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * Because StopRowFilter does not examine column information, this method 
-   * defaults to calling the rowKey-only version of filter.
-   */
-  public boolean filter(@SuppressWarnings("unused") final Text rowKey,
-    @SuppressWarnings("unused") final Text colKey,
-    @SuppressWarnings("unused") final byte[] data) {
-    return filter(rowKey);
-  }
-
-  /** {@inheritDoc}
-   *
-   * Because StopRowFilter does not examine column information, this method 
-   * defaults to calling filterAllRemaining().
-   * 
-   * @param columns
-   */
-  public boolean filterNotNull(@SuppressWarnings("unused")
-      final TreeMap<Text, byte[]> columns) {
-    return filterAllRemaining();
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    stopRowKey = new Text(in.readUTF());
-  }
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeUTF(stopRowKey.toString());
-  }
-}

+ 0 - 181
src/contrib/hbase/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java

@@ -1,181 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-
-/**
- * WhileMatchRowFilter is a wrapper filter that filters everything after the 
- * first filtered row.  Once the nested filter returns true for either of it's 
- * filter(..) methods or filterNotNull(TreeMap<Text, byte[]>), this wrapper's 
- * filterAllRemaining() will return true.  All filtering methods will 
- * thereafter defer to the result of filterAllRemaining().
- */
-public class WhileMatchRowFilter implements RowFilterInterface {
-  
-  private boolean filterAllRemaining = false;
-  private RowFilterInterface filter;
-
-  static final Log LOG = LogFactory.getLog(WhileMatchRowFilter.class);
-  
-  /**
-   * Default constructor, filters nothing. Required though for RPC
-   * deserialization.
-   */
-  public WhileMatchRowFilter() {
-    super();
-  }
-  
-  /**
-   * Constructor
-   * @param filter
-   */
-  public WhileMatchRowFilter(RowFilterInterface filter) {
-    this.filter = filter;
-  }
-  
-  /**
-   * Returns the internal filter being wrapped
-   * 
-   * @return the internal filter
-   */
-  public RowFilterInterface getInternalFilter() {
-    return this.filter;
-  }
-  
-  /** {@inheritDoc} */
-  public void reset() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Resetting.");
-    }
-    this.filterAllRemaining = false;
-    this.filter.reset();
-  }
-
-  /** {@inheritDoc} */
-  public boolean processAlways() {
-    return true;
-  }
-  
-  /**
-   * Returns true once the nested filter has filtered out a row (returned true 
-   * on a call to one of it's filtering methods).  Until then it returns false.
-   * 
-   * @return true/false whether the nested filter has returned true on a filter 
-   * call.
-   */
-  public boolean filterAllRemaining() {
-    return this.filterAllRemaining || this.filter.filterAllRemaining();
-  }
-  
-  /** {@inheritDoc} */
-  public boolean filter(final Text rowKey) {
-    changeFAR(this.filter.filter(rowKey));
-    boolean result = filterAllRemaining();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Filter on rowKey:" + rowKey + ".  Result = " + result);
-    }
-    return result;
-  }
-  
-  /** {@inheritDoc} */
-  public boolean filter(final Text rowKey, final Text colKey,
-    final byte[] data) {
-    changeFAR(this.filter.filter(rowKey, colKey, data));
-    boolean result = filterAllRemaining();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Filter on rowKey:" + rowKey + ", colKey: " + colKey + 
-        ", data: " + data + ".  Result = " + result);
-    }
-    return result;
-  }
-  
-  /** {@inheritDoc} */
-  public boolean filterNotNull(final TreeMap<Text, byte[]> columns) {
-    changeFAR(this.filter.filterNotNull(columns));
-    boolean result = filterAllRemaining();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("FilterNotNull on cols:" + columns + ".  Result = " + 
-        result);
-    }
-    return result;
-  }
-  
-  /**
-   * Change filterAllRemaining from false to true if value is true, otherwise 
-   * leave as is.
-   * 
-   * @param value
-   */
-  private void changeFAR(boolean value) {
-    this.filterAllRemaining = this.filterAllRemaining || value;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("this.filterAllRemaining is now: " + 
-        this.filterAllRemaining);
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void rowProcessed(boolean filtered, Text rowKey) {
-    this.filter.rowProcessed(filtered, rowKey);
-  }
-  
-  /** {@inheritDoc} */
-  public void validate(Text[] columns) {
-    this.filter.validate(columns);
-  }
-  
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    String className = in.readUTF();
-    
-    try {
-      this.filter = (RowFilterInterface)(Class.forName(className).
-        newInstance());
-      this.filter.readFields(in);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Successfully read a sub-filter of type: " + 
-          className);
-      }
-    } catch (InstantiationException e) {
-      throw new RuntimeException("Failed to deserialize WhileMatchRowFilter.",
-          e);
-    } catch (IllegalAccessException e) {
-      throw new RuntimeException("Failed to deserialize WhileMatchRowFilter.",
-          e);
-    } catch (ClassNotFoundException e) {
-      throw new RuntimeException("Failed to deserialize WhileMatchRowFilter.",
-          e);
-    }
-  }
-  
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    out.writeUTF(this.filter.getClass().getName());
-    this.filter.write(out);
-  }
-}

+ 0 - 93
src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/master/hql_jsp.java

@@ -1,93 +0,0 @@
-package org.apache.hadoop.hbase.generated.master;
-
-import javax.servlet.*;
-import javax.servlet.http.*;
-import javax.servlet.jsp.*;
-import java.util.*;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.hql.TableFormatter;
-import org.apache.hadoop.hbase.hql.ReturnMsg;
-import org.apache.hadoop.hbase.hql.generated.HQLParser;
-import org.apache.hadoop.hbase.hql.Command;
-import org.apache.hadoop.hbase.hql.formatter.HtmlTableFormatter;
-
-public final class hql_jsp extends org.apache.jasper.runtime.HttpJspBase
-    implements org.apache.jasper.runtime.JspSourceDependent {
-
-  private static java.util.Vector _jspx_dependants;
-
-  public java.util.List getDependants() {
-    return _jspx_dependants;
-  }
-
-  public void _jspService(HttpServletRequest request, HttpServletResponse response)
-        throws java.io.IOException, ServletException {
-
-    JspFactory _jspxFactory = null;
-    PageContext pageContext = null;
-    HttpSession session = null;
-    ServletContext application = null;
-    ServletConfig config = null;
-    JspWriter out = null;
-    Object page = this;
-    JspWriter _jspx_out = null;
-    PageContext _jspx_page_context = null;
-
-
-    try {
-      _jspxFactory = JspFactory.getDefaultFactory();
-      response.setContentType("text/html;charset=UTF-8");
-      pageContext = _jspxFactory.getPageContext(this, request, response,
-      			null, true, 8192, true);
-      _jspx_page_context = pageContext;
-      application = pageContext.getServletContext();
-      config = pageContext.getServletConfig();
-      session = pageContext.getSession();
-      out = pageContext.getOut();
-      _jspx_out = out;
-
-      out.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>HQL</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n\n<body>\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"Hbase Logo\" title=\"Hbase Logo\" /></a>\n<h1 id=\"page_title\"><a href=\"http://wiki.apache.org/lucene-hadoop/Hbase/HbaseShell\">HQL</a></h1>\n<p id=\"links_menu\"><a href=\"/master.jsp\">Home</a></p>\n<hr id=\"head_rule\" />\n");
- String query = request.getParameter("q");
-   if (query == null) {
-     query = "";
-   }
-
-      out.write("\n<form action=\"/hql.jsp\" method=\"get\">\n    <p>\n    <label for=\"query\">Query: </label>\n    <input type=\"text\" name=\"q\" id=\"q\" size=\"60\" value=\"");
-      out.print( query );
-      out.write("\" />\n    <input type=\"submit\" value=\"submit\" />\n    </p>\n </form>\n <p>Enter 'help;' -- thats 'help' plus a semi-colon -- for the list of <em>HQL</em> commands.\n Data Definition, SHELL, INSERTS, DELETES, and UPDATE commands are disabled in this interface\n </p>\n \n ");
-
-  if (query.length() > 0) {
- 
-      out.write("\n <hr/>\n ");
-
-    HQLParser parser = new HQLParser(query, out, new HtmlTableFormatter(out));
-    Command cmd = parser.terminatedCommand();
-    if (cmd.getCommandType() != Command.CommandType.SELECT) {
- 
-      out.write("\n  <p>");
-      out.print( cmd.getCommandType() );
-      out.write("-type commands are disabled in this interface.</p>\n ");
-
-    } else { 
-      ReturnMsg rm = cmd.execute(new HBaseConfiguration());
-      String summary = rm == null? "": rm.toString();
- 
-      out.write("\n  <p>");
-      out.print( summary );
-      out.write("</p>\n ");
- } 
-  }
- 
-      out.write("\n</body>\n</html>\n");
-    } catch (Throwable t) {
-      if (!(t instanceof SkipPageException)){
-        out = _jspx_out;
-        if (out != null && out.getBufferSize() != 0)
-          out.clearBuffer();
-        if (_jspx_page_context != null) _jspx_page_context.handlePageException(t);
-      }
-    } finally {
-      if (_jspxFactory != null) _jspxFactory.releasePageContext(_jspx_page_context);
-    }
-  }
-}

+ 0 - 169
src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/master/master_jsp.java

@@ -1,169 +0,0 @@
-package org.apache.hadoop.hbase.generated.master;
-
-import javax.servlet.*;
-import javax.servlet.http.*;
-import javax.servlet.jsp.*;
-import java.util.*;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.hbase.HMaster;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HMaster.MetaRegion;
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.hql.ShowCommand;
-import org.apache.hadoop.hbase.hql.TableFormatter;
-import org.apache.hadoop.hbase.hql.ReturnMsg;
-import org.apache.hadoop.hbase.hql.formatter.HtmlTableFormatter;
-import org.apache.hadoop.hbase.HTableDescriptor;
-
-public final class master_jsp extends org.apache.jasper.runtime.HttpJspBase
-    implements org.apache.jasper.runtime.JspSourceDependent {
-
-  private static java.util.Vector _jspx_dependants;
-
-  public java.util.List getDependants() {
-    return _jspx_dependants;
-  }
-
-  public void _jspService(HttpServletRequest request, HttpServletResponse response)
-        throws java.io.IOException, ServletException {
-
-    JspFactory _jspxFactory = null;
-    PageContext pageContext = null;
-    HttpSession session = null;
-    ServletContext application = null;
-    ServletConfig config = null;
-    JspWriter out = null;
-    Object page = this;
-    JspWriter _jspx_out = null;
-    PageContext _jspx_page_context = null;
-
-
-    try {
-      _jspxFactory = JspFactory.getDefaultFactory();
-      response.setContentType("text/html;charset=UTF-8");
-      pageContext = _jspxFactory.getPageContext(this, request, response,
-      			null, true, 8192, true);
-      _jspx_page_context = pageContext;
-      application = pageContext.getServletContext();
-      config = pageContext.getServletConfig();
-      session = pageContext.getSession();
-      out = pageContext.getOut();
-      _jspx_out = out;
-
-
-  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
-  HBaseConfiguration conf = master.getConfiguration();
-  TableFormatter formatter = new HtmlTableFormatter(out);
-  ShowCommand show = new ShowCommand(out, formatter, "tables");
-  HServerAddress rootLocation = master.getRootRegionLocation();
-  Map<Text, MetaRegion> onlineRegions = master.getOnlineMetaRegions();
-  Map<String, HServerInfo> serverToServerInfos =
-    master.getServersToServerInfo();
-  int interval = conf.getInt("hbase.regionserver.msginterval", 6000)/1000;
-
-      out.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>Hbase Master: ");
-      out.print( master.getMasterAddress());
-      out.write("</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n\n<body>\n\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"Hbase Logo\" title=\"Hbase Logo\" /></a>\n<h1 id=\"page_title\">Master: ");
-      out.print(master.getMasterAddress());
-      out.write("</h1>\n<p id=\"links_menu\"><a href=\"/hql.jsp\">HQL</a>, <a href=\"/logs/\">Local logs</a>, <a href=\"/stacks\">Thread Dump</a>, <a href=\"/logLevel\">Log Level</a></p>\n<hr id=\"head_rule\" />\n\n<h2>Master Attributes</h2>\n<table>\n<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>\n<tr><td>Version</td><td>");
-      out.print( VersionInfo.getVersion() );
-      out.write(',');
-      out.write(' ');
-      out.write('r');
-      out.print( VersionInfo.getRevision() );
-      out.write("</td><td>Hbase version and svn revision</td></tr>\n<tr><td>Compiled</td><td>");
-      out.print( VersionInfo.getDate() );
-      out.write(',');
-      out.write(' ');
-      out.print( VersionInfo.getUser() );
-      out.write("</td><td>When this version was compiled and by whom</td></tr>\n<tr><td>Filesystem</td><td>");
-      out.print( conf.get("fs.default.name") );
-      out.write("</td><td>Filesystem hbase is running on</td></tr>\n<tr><td>Hbase Root Directory</td><td>");
-      out.print( master.getRootDir().toString() );
-      out.write("</td><td>Location of hbase home directory</td></tr>\n</table>\n\n<h2>Online META Regions</h2>\n");
- if (rootLocation != null) { 
-      out.write("\n<table>\n<tr><th>Name</th><th>Server</th></tr>\n<tr><td>");
-      out.print( HConstants.ROOT_TABLE_NAME.toString() );
-      out.write("</td><td>");
-      out.print( rootLocation.toString() );
-      out.write("</td></tr>\n");
-
-  if (onlineRegions != null && onlineRegions.size() > 0) { 
-      out.write('\n');
-      out.write(' ');
-      out.write(' ');
- for (Map.Entry<Text, HMaster.MetaRegion> e: onlineRegions.entrySet()) {
-    MetaRegion meta = e.getValue();
-  
-      out.write("\n  <tr><td>");
-      out.print( meta.getRegionName().toString() );
-      out.write("</td><td>");
-      out.print( meta.getServer().toString() );
-      out.write("</td></tr>\n  ");
- }
-  } 
-      out.write("\n</table>\n");
- } 
-      out.write("\n\n<h2>Tables</h2>\n");
- ReturnMsg msg = show.execute(conf); 
-      out.write("\n<p>");
-      out.print(msg );
-      out.write("</p>\n\n<h2>Region Servers</h2>\n");
- if (serverToServerInfos != null && serverToServerInfos.size() > 0) { 
-      out.write('\n');
- int totalRegions = 0;
-   int totalRequests = 0; 
-
-      out.write("\n\n<table>\n<tr><th rowspan=");
-      out.print( serverToServerInfos.size() + 1);
-      out.write("></th><th>Address</th><th>Start Code</th><th>Load</th></tr>\n\n");
-   for (Map.Entry<String, HServerInfo> e: serverToServerInfos.entrySet()) {
-       HServerInfo hsi = e.getValue();
-       String url = "http://" +
-         hsi.getServerAddress().getBindAddress().toString() + ":" +
-         hsi.getInfoPort() + "/";
-       String load = hsi.getLoad().toString();
-       totalRegions += hsi.getLoad().getNumberOfRegions();
-       totalRequests += hsi.getLoad().getNumberOfRequests();
-       long startCode = hsi.getStartCode();
-       String address = hsi.getServerAddress().toString();
-
-      out.write("\n<tr><td><a href=\"");
-      out.print( url );
-      out.write('"');
-      out.write('>');
-      out.print( address );
-      out.write("</a></td><td>");
-      out.print( startCode );
-      out.write("</td><td>");
-      out.print( load );
-      out.write("</td></tr>\n");
-   } 
-      out.write("\n<tr><th>Total: </th><td>servers: ");
-      out.print( serverToServerInfos.size() );
-      out.write("</td><td>&nbsp;</td><td>requests: ");
-      out.print( totalRequests );
-      out.write(" regions: ");
-      out.print( totalRegions );
-      out.write("</td></tr>\n</table>\n\n<p>Load is requests per <em>hbase.regionsserver.msginterval</em> (");
-      out.print(interval);
-      out.write(" second(s)) and count of regions loaded</p>\n");
- } 
-      out.write("\n</body>\n</html>\n");
-    } catch (Throwable t) {
-      if (!(t instanceof SkipPageException)){
-        out = _jspx_out;
-        if (out != null && out.getBufferSize() != 0)
-          out.clearBuffer();
-        if (_jspx_page_context != null) _jspx_page_context.handlePageException(t);
-      }
-    } finally {
-      if (_jspxFactory != null) _jspxFactory.releasePageContext(_jspx_page_context);
-    }
-  }
-}

+ 0 - 100
src/contrib/hbase/src/java/org/apache/hadoop/hbase/generated/regionserver/regionserver_jsp.java

@@ -1,100 +0,0 @@
-package org.apache.hadoop.hbase.generated.regionserver;
-
-import javax.servlet.*;
-import javax.servlet.http.*;
-import javax.servlet.jsp.*;
-import java.util.*;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.hbase.HRegionServer;
-import org.apache.hadoop.hbase.HRegion;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-public final class regionserver_jsp extends org.apache.jasper.runtime.HttpJspBase
-    implements org.apache.jasper.runtime.JspSourceDependent {
-
-  private static java.util.Vector _jspx_dependants;
-
-  public java.util.List getDependants() {
-    return _jspx_dependants;
-  }
-
-  public void _jspService(HttpServletRequest request, HttpServletResponse response)
-        throws java.io.IOException, ServletException {
-
-    JspFactory _jspxFactory = null;
-    PageContext pageContext = null;
-    HttpSession session = null;
-    ServletContext application = null;
-    ServletConfig config = null;
-    JspWriter out = null;
-    Object page = this;
-    JspWriter _jspx_out = null;
-    PageContext _jspx_page_context = null;
-
-
-    try {
-      _jspxFactory = JspFactory.getDefaultFactory();
-      response.setContentType("text/html;charset=UTF-8");
-      pageContext = _jspxFactory.getPageContext(this, request, response,
-      			null, true, 8192, true);
-      _jspx_page_context = pageContext;
-      application = pageContext.getServletContext();
-      config = pageContext.getServletConfig();
-      session = pageContext.getSession();
-      out = pageContext.getOut();
-      _jspx_out = out;
-
-
-  HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER);
-  HServerInfo serverInfo = regionServer.getServerInfo();
-  SortedMap<Text, HRegion> onlineRegions = regionServer.getOnlineRegions();
-
-      out.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>Hbase Region Server: ");
-      out.print( serverInfo.getServerAddress().toString() );
-      out.write("</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n\n<body>\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"Hbase Logo\" title=\"Hbase Logo\" /></a>\n<h1 id=\"page_title\">Region Server: ");
-      out.print( serverInfo.getServerAddress().toString() );
-      out.write("</h1>\n<p id=\"links_menu\"><a href=\"/logs/\">Local logs</a>, <a href=\"/stacks\">Thread Dump</a>, <a href=\"/logLevel\">Log Level</a></p>\n<hr id=\"head_rule\" />\n\n<h2>Region Server Attributes</h2>\n<table>\n<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>\n<tr><td>Version</td><td>");
-      out.print( VersionInfo.getVersion() );
-      out.write(',');
-      out.write(' ');
-      out.write('r');
-      out.print( VersionInfo.getRevision() );
-      out.write("</td><td>Hbase version and svn revision</td></tr>\n<tr><td>Compiled</td><td>");
-      out.print( VersionInfo.getDate() );
-      out.write(',');
-      out.write(' ');
-      out.print( VersionInfo.getUser() );
-      out.write("</td><td>When this version was compiled and by whom</td></tr>\n<tr><td>Load</td><td>");
-      out.print( serverInfo.getLoad().toString() );
-      out.write("</td><td>Requests/<em>hbase.regionserver.msginterval</em> + count of loaded regions</td></tr>\n</table>\n\n<h2>Online Regions</h2>\n");
- if (onlineRegions != null && onlineRegions.size() > 0) { 
-      out.write("\n<table>\n<tr><th>Region Name</th><th>Start Key</th><th>End Key</th></tr>\n");
-   for (HRegion r: onlineRegions.values()) { 
-      out.write("\n<tr><td>");
-      out.print( r.getRegionName().toString() );
-      out.write("</td><td>");
-      out.print( r.getStartKey().toString() );
-      out.write("</td><td>");
-      out.print( r.getEndKey().toString() );
-      out.write("</td></tr>\n");
-   } 
-      out.write("\n</table>\n<p>Region names are made of the containing table's name, a comma,\nthe start key, a comma, and a randomly generated region id.  To illustrate,\nthe region named\n<em>domains,apache.org,5464829424211263407</em> is party to the table \n<em>domains</em>, has an id of <em>5464829424211263407</em> and the first key\nin the region is <em>apache.org</em>.  The <em>-ROOT-</em>\nand <em>.META.</em> 'tables' are internal sytem tables.\nThe -ROOT- keeps a list of all regions in the .META. table.  The .META. table\nkeeps a list of all regions in the system. The empty key is used to denote\ntable start and table end.  A region with an\nempty start key is the first region in a table.  If region has both an empty\nstart and an empty end key, its the only region in the table.  See\n<a href=\"http://wiki.apache.org/lucene-hadoop/Hbase\">Hbase Home</a> for\nfurther explication.<p>\n");
- } else { 
-      out.write("\n<p>Not serving regions</p>\n");
- } 
-      out.write("\n</body>\n</html>\n");
-    } catch (Throwable t) {
-      if (!(t instanceof SkipPageException)){
-        out = _jspx_out;
-        if (out != null && out.getBufferSize() != 0)
-          out.clearBuffer();
-        if (_jspx_page_context != null) _jspx_page_context.handlePageException(t);
-      }
-    } finally {
-      if (_jspxFactory != null) _jspxFactory.releasePageContext(_jspx_page_context);
-    }
-  }
-}

+ 0 - 248
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/AlterCommand.java

@@ -1,248 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.io.Text;
-
-import org.apache.hadoop.hbase.BloomFilterDescriptor;
-import org.apache.hadoop.hbase.BloomFilterDescriptor.BloomFilterType;
-
-/**
- * Alters tables.
- */
-public class AlterCommand extends SchemaModificationCommand {
-  public enum OperationType {
-    ADD, DROP, CHANGE, NOOP
-  }
-
-  private OperationType operationType = OperationType.NOOP;
-  private Map<String, Map<String, Object>> columnSpecMap = new HashMap<String, Map<String, Object>>();
-  private String tableName;
-  private String column; // column to be dropped
-
-  public AlterCommand(Writer o) {
-    super(o);
-  }
-
-  @SuppressWarnings("unchecked")
-  public ReturnMsg execute(HBaseConfiguration conf) {
-    try {
-      HConnection conn = HConnectionManager.getConnection(conf);
-      if (!conn.tableExists(new Text(this.tableName))) {
-        return new ReturnMsg(0, "'" + this.tableName + "'" + TABLE_NOT_FOUND);
-      }
-
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      Set<String> columns = null;
-      HColumnDescriptor columnDesc = null;
-      switch (operationType) {
-        case ADD:
-          disableTable(admin, tableName);
-          columns = columnSpecMap.keySet();
-          for (String c : columns) {
-            columnDesc = getColumnDescriptor(c, columnSpecMap.get(c));
-            println("Adding " + c + " to " + tableName + "... Please wait.");
-            admin.addColumn(new Text(tableName), columnDesc);
-          }
-          enableTable(admin, tableName);
-          break;
-        case DROP:
-          disableTable(admin, tableName);
-          println("Dropping " + column + " from " + tableName + "... Please wait.");
-          column = appendDelimiter(column);
-          admin.deleteColumn(new Text(tableName), new Text(column));
-          enableTable(admin, tableName);
-          break;
-        case CHANGE:
-          disableTable(admin, tableName);
-
-          Map.Entry<String, Map<String, Object>> columnEntry = (Map.Entry<String, Map<String, Object>>) columnSpecMap
-              .entrySet().toArray()[0];
-
-          // add the : if there isn't one
-          Text columnName = new Text(
-              columnEntry.getKey().endsWith(":") ? columnEntry.getKey()
-                  : columnEntry.getKey() + ":");
-
-          // get the table descriptor so we can get the old column descriptor
-          HTableDescriptor tDesc = getTableDescByName(admin, tableName);
-          HColumnDescriptor oldColumnDesc = tDesc.families().get(columnName);
-
-          // combine the options specified in the shell with the options
-          // from the exiting descriptor to produce the new descriptor
-          columnDesc = getColumnDescriptor(columnName.toString(), columnEntry
-              .getValue(), oldColumnDesc);
-
-          // send the changes out to the master
-          admin.modifyColumn(new Text(tableName), columnName, columnDesc);
-
-          enableTable(admin, tableName);
-          break;
-        case NOOP:
-          return new ReturnMsg(0, "Invalid operation type.");
-      }
-      return new ReturnMsg(0, "Table altered successfully.");
-    } catch (Exception e) {
-      return new ReturnMsg(0, extractErrMsg(e));
-    }
-  }
-
-  private void disableTable(HBaseAdmin admin, String t) throws IOException {
-    println("Disabling " + t + "... Please wait.");
-    admin.disableTable(new Text(t));
-  }
-
-  private void enableTable(HBaseAdmin admin, String t) throws IOException {
-    println("Enabling " + t + "... Please wait.");
-    admin.enableTable(new Text(t));
-  }
-
-  /**
-   * Sets the table to be altered.
-   * 
-   * @param t Table to be altered.
-   */
-  public void setTable(String t) {
-    this.tableName = t;
-  }
-
-  /**
-   * Adds a column specification.
-   * 
-   * @param columnSpec Column specification
-   */
-  public void addColumnSpec(String c, Map<String, Object> columnSpec) {
-    columnSpecMap.put(c, columnSpec);
-  }
-
-  /**
-   * Sets the column to be dropped. Only applicable to the DROP operation.
-   * 
-   * @param c Column to be dropped.
-   */
-  public void setColumn(String c) {
-    this.column = c;
-  }
-
-  /**
-   * Sets the operation type of this alteration.
-   * 
-   * @param operationType Operation type
-   * @see OperationType
-   */
-  public void setOperationType(OperationType operationType) {
-    this.operationType = operationType;
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.DDL;
-  }
-
-  private HTableDescriptor getTableDescByName(HBaseAdmin admin, String tableName)
-      throws IOException {
-    HTableDescriptor[] tables = admin.listTables();
-    for (HTableDescriptor tDesc : tables) {
-      if (tDesc.getName().toString().equals(tableName)) {
-        return tDesc;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Given a column name, column spec, and original descriptor, returns an
-   * instance of HColumnDescriptor representing the column spec, with empty
-   * values drawn from the original as defaults
-   */
-  protected HColumnDescriptor getColumnDescriptor(String column,
-      Map<String, Object> columnSpec, HColumnDescriptor original)
-      throws IllegalArgumentException {
-    initOptions(original);
-
-    Set<String> specs = columnSpec.keySet();
-    for (String spec : specs) {
-      spec = spec.toUpperCase();
-
-      if (spec.equals("MAX_VERSIONS")) {
-        maxVersions = (Integer) columnSpec.get(spec);
-      } else if (spec.equals("MAX_LENGTH")) {
-        maxLength = (Integer) columnSpec.get(spec);
-      } else if (spec.equals("COMPRESSION")) {
-        compression = HColumnDescriptor.CompressionType.valueOf(((String) columnSpec
-            .get(spec)).toUpperCase());
-      } else if (spec.equals("IN_MEMORY")) {
-        inMemory = (Boolean) columnSpec.get(spec);
-      } else if (spec.equals("BLOOMFILTER")) {
-        bloomFilterType = BloomFilterType.valueOf(((String) columnSpec.get(spec))
-            .toUpperCase());
-      } else if (spec.equals("VECTOR_SIZE")) {
-        vectorSize = (Integer) columnSpec.get(spec);
-      } else if (spec.equals("NUM_HASH")) {
-        numHash = (Integer) columnSpec.get(spec);
-      } else if (spec.equals("NUM_ENTRIES")) {
-        numEntries = (Integer) columnSpec.get(spec);
-      } else {
-        throw new IllegalArgumentException("Invalid option: " + spec);
-      }
-    }
-
-    // Now we gather all the specified options for this column.
-    if (bloomFilterType != null) {
-      if (specs.contains("NUM_ENTRIES")) {
-        bloomFilterDesc = new BloomFilterDescriptor(bloomFilterType, numEntries);
-      } else {
-        bloomFilterDesc = new BloomFilterDescriptor(bloomFilterType, vectorSize,
-            numHash);
-      }
-    }
-
-    column = appendDelimiter(column);
-
-    HColumnDescriptor columnDesc = new HColumnDescriptor(new Text(column),
-        maxVersions, compression, inMemory, maxLength, bloomFilterDesc);
-
-    return columnDesc;
-  }
-
-  private void initOptions(HColumnDescriptor original) {
-    if (original == null) {
-      initOptions();
-      return;
-    }
-    maxVersions = original.getMaxVersions();
-    maxLength = original.getMaxValueLength();
-    compression = original.getCompression();
-    inMemory = original.isInMemory();
-    bloomFilterDesc = original.getBloomFilter();
-  }
-}

+ 0 - 100
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/BasicCommand.java

@@ -1,100 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-
-/**
- * Takes the lowest-common-denominator {@link Writer} doing its own printlns,
- * etc.
- * 
- * @see <a
- *      href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseShell">HBaseShell</a>
- */
-public abstract class BasicCommand implements Command, CommandFactory {
-  private final Writer out;
-  public final String LINE_SEPARATOR = System.getProperty("line.separator");
-  public final String TABLE_NOT_FOUND = " is non-existant table.";
-
-  // Shutdown constructor.
-  @SuppressWarnings("unused")
-  private BasicCommand() {
-    this(null);
-  }
-
-  /**
-   * Constructor
-   * 
-   * @param o A Writer.
-   */
-  public BasicCommand(final Writer o) {
-    this.out = o;
-  }
-
-  public BasicCommand getBasicCommand() {
-    return this;
-  }
-
-  /** basic commands are their own factories. */
-  public Command getCommand() {
-    return this;
-  }
-
-  protected String extractErrMsg(String msg) {
-    int index = msg.indexOf(":");
-    int eofIndex = msg.indexOf("\n");
-    return msg.substring(index + 1, eofIndex);
-  }
-
-  protected String extractErrMsg(Exception e) {
-    return extractErrMsg(e.getMessage());
-  }
-
-  /**
-   * Appends, if it does not exist, a delimiter (colon) at the end of the column
-   * name.
-   */
-  protected String appendDelimiter(String column) {
-    return (!column.endsWith(FAMILY_INDICATOR) && column
-        .indexOf(FAMILY_INDICATOR) == -1) ? column + FAMILY_INDICATOR : column;
-  }
-
-  /**
-   * @return Writer to use outputting.
-   */
-  public Writer getOut() {
-    return this.out;
-  }
-
-  public void print(final String msg) throws IOException {
-    this.out.write(msg);
-  }
-
-  public void println(final String msg) throws IOException {
-    print(msg);
-    print(LINE_SEPARATOR);
-    this.out.flush();
-  }
-
-  public CommandType getCommandType() {
-    return CommandType.SELECT;
-  }
-}

+ 0 - 66
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/ClearCommand.java

@@ -1,66 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-
-/**
- * Clears the console screen.
- */
-public class ClearCommand extends BasicCommand {
-  public ClearCommand(Writer o) {
-    super(o);
-  }
-
-  public ReturnMsg execute(@SuppressWarnings("unused")
-  HBaseConfiguration conf) {
-    clear();
-    return null;
-  }
-
-  private void clear() {
-    String osName = System.getProperty("os.name");
-    if (osName.length() > 7 && osName.subSequence(0, 7).equals("Windows")) {
-      try {
-        Runtime.getRuntime().exec("cmd /C cls");
-      } catch (IOException e) {
-        try {
-          println("Can't clear." + e.toString());
-        } catch (IOException e1) {
-          e1.printStackTrace();
-        }
-      }
-    } else {
-      try {
-        print("\033c");
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-    }
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.SHELL;
-  }
-}

+ 0 - 45
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/Command.java

@@ -1,45 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-
-public interface Command {
-  /** family indicator */
-  public static final String FAMILY_INDICATOR = ":";
-
-  public enum CommandType {
-    DDL, UPDATE, SELECT, INSERT, DELETE, SHELL
-  }
-
-  /**
-   * Execute a command
-   * 
-   * @param conf Configuration
-   * @return Result of command execution
-   */
-  public ReturnMsg execute(final HBaseConfiguration conf);
-
-  /**
-   * @return Type of this command whether DDL, SELECT, INSERT, UPDATE, DELETE,
-   *         or SHELL.
-   */
-  public CommandType getCommandType();
-}

+ 0 - 27
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/CommandFactory.java

@@ -1,27 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-/**
- * Parser uses command factories to create command.
- */
-public interface CommandFactory {
-  Command getCommand();
-}

+ 0 - 93
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/CreateCommand.java

@@ -1,93 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.Writer;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.io.Text;
-
-/**
- * Creates tables.
- */
-public class CreateCommand extends SchemaModificationCommand {
-  private Text tableName;
-  private Map<String, Map<String, Object>> columnSpecMap = new HashMap<String, Map<String, Object>>();
-
-  public CreateCommand(Writer o) {
-    super(o);
-  }
-
-  public ReturnMsg execute(HBaseConfiguration conf) {
-    try {
-      HConnection conn = HConnectionManager.getConnection(conf);
-      if (conn.tableExists(tableName)) {
-        return new ReturnMsg(0, "'" + tableName + "' table already exist.");
-      }
-
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      HTableDescriptor tableDesc = new HTableDescriptor(tableName.toString());
-      HColumnDescriptor columnDesc = null;
-      Set<String> columns = columnSpecMap.keySet();
-      for (String column : columns) {
-        columnDesc = getColumnDescriptor(column, columnSpecMap.get(column));
-        tableDesc.addFamily(columnDesc);
-      }
-
-      println("Creating table... Please wait.");
-
-      admin.createTable(tableDesc);
-      return new ReturnMsg(0, "Table created successfully.");
-    } catch (Exception e) {
-      return new ReturnMsg(0, extractErrMsg(e));
-    }
-  }
-
-  /**
-   * Sets the table to be created.
-   * 
-   * @param tableName Table to be created
-   */
-  public void setTable(String tableName) {
-    this.tableName = new Text(tableName);
-  }
-
-  /**
-   * Adds a column specification.
-   * 
-   * @param columnSpec Column specification
-   */
-  public void addColumnSpec(String column, Map<String, Object> columnSpec) {
-    columnSpecMap.put(column, columnSpec);
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.DDL;
-  }
-}

+ 0 - 131
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DeleteCommand.java

@@ -1,131 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.hbase.HTable;
-import org.apache.hadoop.io.Text;
-
-/**
- * Deletes values from tables.
- */
-public class DeleteCommand extends BasicCommand {
-  public DeleteCommand(Writer o) {
-    super(o);
-  }
-
-  private Text tableName;
-  private Text rowKey;
-  private List<String> columnList;
-
-  public ReturnMsg execute(HBaseConfiguration conf) {
-    if (columnList == null) {
-      throw new IllegalArgumentException("Column list is null");
-    }
-    try {
-      HConnection conn = HConnectionManager.getConnection(conf);
-      if (!conn.tableExists(tableName)) {
-        return new ReturnMsg(0, "'" + tableName + "'" + TABLE_NOT_FOUND);
-      }
-
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      HTable hTable = new HTable(conf, tableName);
-
-      if (rowKey != null) {
-        long lockID = hTable.startUpdate(rowKey);
-        for (Text column : getColumnList(admin, hTable)) {
-          hTable.delete(lockID, new Text(column));
-        }
-        hTable.commit(lockID);
-      } else {
-        admin.disableTable(tableName);
-        for (Text column : getColumnList(admin, hTable)) {
-          admin.deleteColumn(tableName, new Text(column));
-        }
-        admin.enableTable(tableName);
-      }
-
-      return new ReturnMsg(1, "Column(s) deleted successfully.");
-    } catch (IOException e) {
-      String[] msg = e.getMessage().split("[\n]");
-      return new ReturnMsg(0, msg[0]);
-    }
-  }
-
-  public void setTable(String tableName) {
-    this.tableName = new Text(tableName);
-  }
-
-  public void setRow(String row) {
-    this.rowKey = new Text(row);
-  }
-
-  /**
-   * Sets the column list.
-   * 
-   * @param columnList
-   */
-  public void setColumnList(List<String> columnList) {
-    this.columnList = columnList;
-  }
-
-  /**
-   * @param admin
-   * @param hTable
-   * @return return the column list.
-   */
-  public Text[] getColumnList(HBaseAdmin admin, HTable hTable) {
-    Text[] columns = null;
-    try {
-      if (columnList.contains("*")) {
-        columns = hTable.getRow(new Text(this.rowKey)).keySet().toArray(
-            new Text[] {});
-      } else {
-        List<Text> tmpList = new ArrayList<Text>();
-        for (int i = 0; i < columnList.size(); i++) {
-          Text column = null;
-          if (columnList.get(i).contains(":"))
-            column = new Text(columnList.get(i));
-          else
-            column = new Text(columnList.get(i) + ":");
-
-          tmpList.add(column);
-        }
-        columns = tmpList.toArray(new Text[] {});
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    return columns;
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.DELETE;
-  }
-}

+ 0 - 90
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DescCommand.java

@@ -1,90 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.io.Text;
-
-/**
- * Prints information about tables.
- */
-public class DescCommand extends BasicCommand {
-  private static final String[] HEADER = new String[] { "Column Family Descriptor" };
-  private Text tableName;
-  private final TableFormatter formatter;
-
-  // Not instantiable
-  @SuppressWarnings("unused")
-  private DescCommand() {
-    this(null, null);
-  }
-
-  public DescCommand(final Writer o, final TableFormatter f) {
-    super(o);
-    this.formatter = f;
-  }
-
-  public ReturnMsg execute(final HBaseConfiguration conf) {
-    if (tableName == null)
-      return new ReturnMsg(0, "Syntax error : Please check 'Describe' syntax.");
-    try {
-      HConnection conn = HConnectionManager.getConnection(conf);
-      if (!conn.tableExists(tableName)) {
-        return new ReturnMsg(0, "Table not found.");
-      }
-      HTableDescriptor[] tables = conn.listTables();
-      HColumnDescriptor[] columns = null;
-      for (int i = 0; i < tables.length; i++) {
-        if (tables[i].getName().equals(tableName)) {
-          columns = tables[i].getFamilies().values().toArray(
-              new HColumnDescriptor[] {});
-          break;
-        }
-      }
-      formatter.header(HEADER);
-      // Do a toString on the HColumnDescriptors
-      String[] columnStrs = new String[columns.length];
-      for (int i = 0; i < columns.length; i++) {
-        String tmp = columns[i].toString();
-        // Strip the curly-brackets if present.
-        if (tmp.length() > 2 && tmp.startsWith("{") && tmp.endsWith("}")) {
-          tmp = tmp.substring(1, tmp.length() - 1);
-        }
-        columnStrs[i] = tmp;
-        formatter.row(new String[] { columnStrs[i] });
-      }
-      formatter.footer();
-      return new ReturnMsg(1, columns.length + " columnfamily(s) in set.");
-    } catch (IOException e) {
-      return new ReturnMsg(0, "error msg : " + e.toString());
-    }
-  }
-
-  public void setArgument(String table) {
-    this.tableName = new Text(table);
-  }
-}

+ 0 - 68
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DisableCommand.java

@@ -1,68 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.io.Text;
-
-/**
- * Disables tables.
- */
-public class DisableCommand extends BasicCommand {
-  private String tableName;
-
-  public DisableCommand(Writer o) {
-    super(o);
-  }
-
-  public ReturnMsg execute(HBaseConfiguration conf) {
-    assert tableName != null;
-
-    try {
-      HConnection conn = HConnectionManager.getConnection(conf);
-      if (!conn.tableExists(new Text(tableName))) {
-        return new ReturnMsg(0, "'" + tableName + "'" + TABLE_NOT_FOUND);
-      }
-
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      admin.disableTable(new Text(tableName));
-
-      return new ReturnMsg(1, "Table disabled successfully.");
-    } catch (IOException e) {
-      String[] msg = e.getMessage().split("[\n]");
-      return new ReturnMsg(0, msg[0]);
-    }
-  }
-
-  public void setTable(String table) {
-    this.tableName = table;
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.DDL;
-  }
-}

+ 0 - 80
src/contrib/hbase/src/java/org/apache/hadoop/hbase/hql/DropCommand.java

@@ -1,80 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.hql;
-
-import java.io.IOException;
-import java.io.Writer;
-import java.util.List;
-
-import org.apache.hadoop.hbase.HBaseAdmin;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConnection;
-import org.apache.hadoop.hbase.HConnectionManager;
-import org.apache.hadoop.io.Text;
-
-/**
- * Drops tables.
- */
-public class DropCommand extends BasicCommand {
-  private List<String> tableList;
-
-  public DropCommand(Writer o) {
-    super(o);
-  }
-
-  public ReturnMsg execute(HBaseConfiguration conf) {
-    if (tableList == null) {
-      throw new IllegalArgumentException("List of tables is null.");
-    }
-
-    try {
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      HConnection conn = HConnectionManager.getConnection(conf);
-
-      int count = 0;
-      for (String table : tableList) {
-        if (!conn.tableExists(new Text(table))) {
-          println("'" + table + "' table not found.");
-        } else {
-          println("Dropping " + table + "... Please wait.");
-          admin.deleteTable(new Text(table));
-          count++;
-        }
-      }
-
-      if (count > 0) {
-        return new ReturnMsg(1, count + " table(s) dropped successfully.");
-      } else {
-        return new ReturnMsg(0, count + " table(s) dropped.");
-      }
-    } catch (IOException e) {
-      return new ReturnMsg(0, extractErrMsg(e));
-    }
-  }
-
-  public void setTableList(List<String> tableList) {
-    this.tableList = tableList;
-  }
-
-  @Override
-  public CommandType getCommandType() {
-    return CommandType.DDL;
-  }
-}

Some files were not shown because too many files changed in this diff