Bladeren bron

HADOOP-1391. Part1: includes create/delete table; enable/disable table; add/remove column.

Patch has been tested locally and a new test has been added for administrative functions.

Still to do: merge regions.


git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@542592 13f79535-47bb-0310-9956-ffa450edef68
Jim Kellerman 18 jaren geleden
bovenliggende
commit
d569973211
24 gewijzigde bestanden met toevoegingen van 1897 en 616 verwijderingen
  1. 1 1
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
  2. 527 172
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
  3. 290 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
  4. 6 4
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HGlobals.java
  5. 724 256
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
  6. 8 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java
  7. 1 1
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
  8. 5 13
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
  9. 8 7
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
  10. 7 28
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
  11. 6 6
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java
  12. 45 30
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
  13. 41 69
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
  14. 29 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java
  15. 29 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java
  16. 30 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java
  17. 29 0
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java
  18. 2 2
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
  19. 3 3
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
  20. 5 11
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
  21. 3 3
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
  22. 79 0
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
  23. 3 3
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
  24. 16 7
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java

+ 1 - 1
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java

@@ -62,7 +62,7 @@ public abstract class HAbstractScanner implements HInternalScannerInterface {
       try {
         int colpos = column.indexOf(":") + 1;
         if(colpos == 0) {
-          throw new IllegalArgumentException("Column name has no family indicator.");
+          throw new InvalidColumnNameException("Column name has no family indicator.");
         }
 
         String columnkey = column.substring(colpos);

+ 527 - 172
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java

@@ -15,11 +15,13 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.lang.Class;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.NoSuchElementException;
 import java.util.Random;
+import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
@@ -30,6 +32,7 @@ import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
 
 /**
  * HClient manages a connection to a single HRegionServer.
@@ -41,6 +44,10 @@ public class HClient implements HConstants {
     COLUMN_FAMILY
   };
   
+  private static final Text[] REGIONINFO = {
+    COL_REGIONINFO
+  };
+  
   private static final Text EMPTY_START_ROW = new Text();
   
   private long clientTimeout;
@@ -61,11 +68,11 @@ public class HClient implements HConstants {
   
   // Map tableName -> (Map startRow -> (HRegionInfo, HServerAddress)
   
-  private TreeMap<Text, TreeMap<Text, TableInfo>> tablesToServers;
+  private TreeMap<Text, SortedMap<Text, TableInfo>> tablesToServers;
   
   // For the "current" table: Map startRow -> (HRegionInfo, HServerAddress)
   
-  private TreeMap<Text, TableInfo> tableServers;
+  private SortedMap<Text, TableInfo> tableServers;
   
   // Known region HServerAddress.toString() -> HRegionInterface
   
@@ -87,21 +94,44 @@ public class HClient implements HConstants {
     this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
     
     this.master = null;
-    this.tablesToServers = new TreeMap<Text, TreeMap<Text, TableInfo>>();
+    this.tablesToServers = new TreeMap<Text, SortedMap<Text, TableInfo>>();
     this.tableServers = null;
     this.servers = new TreeMap<String, HRegionInterface>();
     
     // For row mutation operations
-    
+
     this.currentRegion = null;
     this.currentServer = null;
     this.rand = new Random();
   }
   
-  /**
-   * Find the address of the master and connect to it
-   */
-  private void checkMaster() {
+  private void handleRemoteException(RemoteException e) throws IOException {
+    String msg = e.getMessage();
+    if(e.getClassName().equals("org.apache.hadoop.hbase.InvalidColumnNameException")) {
+      throw new InvalidColumnNameException(msg);
+      
+    } else if(e.getClassName().equals("org.apache.hadoop.hbase.LockException")) {
+      throw new LockException(msg);
+      
+    } else if(e.getClassName().equals("org.apache.hadoop.hbase.MasterNotRunningException")) {
+      throw new MasterNotRunningException(msg);
+      
+    } else if(e.getClassName().equals("org.apache.hadoop.hbase.NoServerForRegionException")) {
+      throw new NoServerForRegionException(msg);
+      
+    } else if(e.getClassName().equals("org.apache.hadoop.hbase.NotServingRegionException")) {
+      throw new NotServingRegionException(msg);
+      
+    } else if(e.getClassName().equals("org.apache.hadoop.hbase.TableNotDisabledException")) {
+      throw new TableNotDisabledException(msg);
+      
+    } else {
+      throw e;
+    }
+  }
+  
+  /* Find the address of the master and connect to it */
+  private void checkMaster() throws IOException {
     if (this.master != null) {
       return;
     }
@@ -136,103 +166,406 @@ public class HClient implements HConstants {
       }
     }
     if(this.master == null) {
-      throw new IllegalStateException("Master is not running");
+      throw new MasterNotRunningException();
     }
   }
 
+  //////////////////////////////////////////////////////////////////////////////
+  // Administrative methods
+  //////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * Creates a new table
+   * 
+   * @param desc - table descriptor for table
+   * 
+   * @throws IllegalArgumentException - if the table name is reserved
+   * @throws MasterNotRunningException - if master is not running
+   * @throws NoServerForRegionException - if root region is not being served
+   * @throws IOException
+   */
   public synchronized void createTable(HTableDescriptor desc) throws IOException {
-    if(desc.getName().equals(ROOT_TABLE_NAME)
-        || desc.getName().equals(META_TABLE_NAME)) {
+    checkReservedTableName(desc.getName());
+    checkMaster();
+    try {
+      this.master.createTable(desc);
       
-      throw new IllegalArgumentException(desc.getName().toString()
-          + " is a reserved table name");
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+
+    // Save the current table
+    
+    SortedMap<Text, TableInfo> oldServers = this.tableServers;
+
+    try {
+      // Wait for new table to come on-line
+
+      findServersForTable(desc.getName());
+      
+    } finally {
+      if(oldServers != null && oldServers.size() != 0) {
+        // Restore old current table if there was one
+      
+        this.tableServers = oldServers;
+      }
     }
-    checkMaster();
-    locateRootRegion();
-    this.master.createTable(desc);
   }
 
   public synchronized void deleteTable(Text tableName) throws IOException {
+    checkReservedTableName(tableName);
     checkMaster();
-    locateRootRegion();
-    this.master.deleteTable(tableName);
+    TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
+
+    try {
+      this.master.deleteTable(tableName);
+      
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+
+    // Wait until first region is deleted
+    
+    HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
+
+    DataInputBuffer inbuf = new DataInputBuffer();
+    HStoreKey key = new HStoreKey();
+    HRegionInfo info = new HRegionInfo();
+    for(int tries = 0; tries < numRetries; tries++) {
+      long scannerId = -1L;
+      try {
+        scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
+            REGIONINFO, tableName);
+        LabelledData[] values = server.next(scannerId, key);
+        if(values == null || values.length == 0) {
+          break;
+        }
+        boolean found = false;
+        for(int j = 0; j < values.length; j++) {
+          if(values[j].getLabel().equals(COL_REGIONINFO)) {
+            byte[] bytes = new byte[values[j].getData().getSize()];
+            System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
+            inbuf.reset(bytes, bytes.length);
+            info.readFields(inbuf);
+            if(info.tableDesc.getName().equals(tableName)) {
+              found = true;
+            }
+          }
+        }
+        if(!found) {
+          break;
+        }
+        
+      } finally {
+        if(scannerId != -1L) {
+          try {
+            server.close(scannerId);
+            
+          } catch(Exception e) {
+            LOG.warn(e);
+          }
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Sleep. Waiting for first region to be deleted from " + tableName);
+      }
+      try {
+        Thread.sleep(clientTimeout);
+        
+      } catch(InterruptedException e) {
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Waiting for first region to be deleted from " + tableName);
+      }
+    }
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("table deleted " + tableName);
+    }
+  }
+
+  public synchronized void addColumn(Text tableName, HColumnDescriptor column) throws IOException {
+    checkReservedTableName(tableName);
+    checkMaster();
+    try {
+      this.master.addColumn(tableName, column);
+      
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+  }
+
+  public synchronized void deleteColumn(Text tableName, Text columnName) throws IOException {
+    checkReservedTableName(tableName);
+    checkMaster();
+    try {
+      this.master.deleteColumn(tableName, columnName);
+      
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+  }
+  
+  public synchronized void mergeRegions(Text regionName1, Text regionName2) throws IOException {
+    
+  }
+  
+  public synchronized void enableTable(Text tableName) throws IOException {
+    checkReservedTableName(tableName);
+    checkMaster();
+    TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
+    
+    try {
+      this.master.enableTable(tableName);
+      
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+
+    // Wait until first region is enabled
+    
+    HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
+
+    DataInputBuffer inbuf = new DataInputBuffer();
+    HStoreKey key = new HStoreKey();
+    HRegionInfo info = new HRegionInfo();
+    for(int tries = 0; tries < numRetries; tries++) {
+      int valuesfound = 0;
+      long scannerId = -1L;
+      try {
+        scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
+            REGIONINFO, tableName);
+        LabelledData[] values = server.next(scannerId, key);
+        if(values == null || values.length == 0) {
+          if(valuesfound == 0) {
+            throw new NoSuchElementException("table " + tableName + " not found");
+          }
+        }
+        valuesfound += 1;
+        boolean isenabled = false;
+        for(int j = 0; j < values.length; j++) {
+          if(values[j].getLabel().equals(COL_REGIONINFO)) {
+            byte[] bytes = new byte[values[j].getData().getSize()];
+            System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
+            inbuf.reset(bytes, bytes.length);
+            info.readFields(inbuf);
+            isenabled = !info.offLine;
+          }
+        }
+        if(isenabled) {
+          break;
+        }
+        
+      } finally {
+        if(scannerId != -1L) {
+          try {
+            server.close(scannerId);
+            
+          } catch(Exception e) {
+            LOG.warn(e);
+          }
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Sleep. Waiting for first region to be enabled from " + tableName);
+      }
+      try {
+        Thread.sleep(clientTimeout);
+        
+      } catch(InterruptedException e) {
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Waiting for first region to be enabled from " + tableName);
+      }
+    }
+  }
+
+  public synchronized void disableTable(Text tableName) throws IOException {
+    checkReservedTableName(tableName);
+    checkMaster();
+    TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
+
+    try {
+      this.master.disableTable(tableName);
+      
+    } catch(RemoteException e) {
+      handleRemoteException(e);
+    }
+
+    // Wait until first region is disabled
+    
+    HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
+
+    DataInputBuffer inbuf = new DataInputBuffer();
+    HStoreKey key = new HStoreKey();
+    HRegionInfo info = new HRegionInfo();
+    for(int tries = 0; tries < numRetries; tries++) {
+      int valuesfound = 0;
+      long scannerId = -1L;
+      try {
+        scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
+            REGIONINFO, tableName);
+        LabelledData[] values = server.next(scannerId, key);
+        if(values == null || values.length == 0) {
+          if(valuesfound == 0) {
+            throw new NoSuchElementException("table " + tableName + " not found");
+          }
+        }
+        valuesfound += 1;
+        boolean disabled = false;
+        for(int j = 0; j < values.length; j++) {
+          if(values[j].getLabel().equals(COL_REGIONINFO)) {
+            byte[] bytes = new byte[values[j].getData().getSize()];
+            System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
+            inbuf.reset(bytes, bytes.length);
+            info.readFields(inbuf);
+            disabled = info.offLine;
+          }
+        }
+        if(disabled) {
+          break;
+        }
+        
+      } finally {
+        if(scannerId != -1L) {
+          try {
+            server.close(scannerId);
+            
+          } catch(Exception e) {
+            LOG.warn(e);
+          }
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Sleep. Waiting for first region to be disabled from " + tableName);
+      }
+      try {
+        Thread.sleep(clientTimeout);
+        
+      } catch(InterruptedException e) {
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Waiting for first region to be disabled from " + tableName);
+      }
+    }
   }
   
   public synchronized void shutdown() throws IOException {
     checkMaster();
     this.master.shutdown();
   }
+
+  /*
+   * Verifies that the specified table name is not a reserved name
+   * @param tableName - the table name to be checked
+   * @throws IllegalArgumentException - if the table name is reserved
+   */
+  private void checkReservedTableName(Text tableName) {
+    if(tableName.equals(ROOT_TABLE_NAME)
+        || tableName.equals(META_TABLE_NAME)) {
+      
+      throw new IllegalArgumentException(tableName + " is a reserved table name");
+    }
+  }
+  
+  private TableInfo getFirstMetaServerForTable(Text tableName) throws IOException {
+    SortedMap<Text, TableInfo> metaservers = findMetaServersForTable(tableName);
+    return metaservers.get(metaservers.firstKey());
+  }
   
+  //////////////////////////////////////////////////////////////////////////////
+  // Client API
+  //////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * Loads information so that a table can be manipulated.
+   * 
+   * @param tableName - the table to be located
+   * @throws IOException - if the table can not be located after retrying
+   */
   public synchronized void openTable(Text tableName) throws IOException {
     if(tableName == null || tableName.getLength() == 0) {
       throw new IllegalArgumentException("table name cannot be null or zero length");
     }
     this.tableServers = tablesToServers.get(tableName);
-    if(this.tableServers == null ) {            // We don't know where the table is
-      findTableInMeta(tableName);               // Load the information from meta
+    if(this.tableServers == null ) {
+      // We don't know where the table is.
+      // Load the information from meta.
+      this.tableServers = findServersForTable(tableName);
     }
   }
 
-  private void findTableInMeta(Text tableName) throws IOException {
-    TreeMap<Text, TableInfo> metaServers =
-      this.tablesToServers.get(META_TABLE_NAME);
-    
-    if (metaServers == null) {                // Don't know where the meta is
-      loadMetaFromRoot(tableName);
-      if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
-        // All we really wanted was the meta or root table
-        return;
-      }
-      metaServers = this.tablesToServers.get(META_TABLE_NAME);
-    }
+  /*
+   * Locates a table by searching the META region
+   * 
+   * @param tableName - name of table to find servers for
+   * @return - map of first row to table info for all regions in the table
+   * @throws IOException
+   */
+  private SortedMap<Text, TableInfo> findServersForTable(Text tableName)
+      throws IOException {
 
-    this.tableServers = new TreeMap<Text, TableInfo>();
-    for(int tries = 0;
-        this.tableServers.size() == 0 && tries < this.numRetries;
-        tries++) {
-      Text firstMetaRegion = (metaServers.containsKey(tableName))?
-        tableName: metaServers.headMap(tableName).lastKey();
-      for(TableInfo t: metaServers.tailMap(firstMetaRegion).values()) {
-        scanOneMetaRegion(t, tableName);
-      }
-      if (this.tableServers.size() == 0) {
-        // Table not assigned. Sleep and try again
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sleeping. Table " + tableName
-              + " not currently being served.");
-        }
-        try {
-          Thread.sleep(this.clientTimeout);
-        } catch(InterruptedException e) {
-        }
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Wake. Retry finding table " + tableName);
-        }
+    SortedMap<Text, TableInfo> servers = null;
+    if(tableName.equals(ROOT_TABLE_NAME)) {
+      servers = locateRootRegion();
+
+    } else if(tableName.equals(META_TABLE_NAME)) {
+      servers = loadMetaFromRoot();
+      
+    } else {
+      servers = new TreeMap<Text, TableInfo>();
+      for(TableInfo t: findMetaServersForTable(tableName).values()) {
+        servers.putAll(scanOneMetaRegion(t, tableName));
       }
+      this.tablesToServers.put(tableName, servers);
     }
-    if (this.tableServers.size() == 0) {
-      throw new IOException("failed to scan " + META_TABLE_NAME + " after "
-          + this.numRetries + " retries");
+    return servers;
+  }
+
+  /*
+   * Finds the meta servers that contain information about the specified table
+   * @param tableName - the name of the table to get information about
+   * @return - returns a SortedMap of the meta servers
+   * @throws IOException
+   */
+  private SortedMap<Text, TableInfo> findMetaServersForTable(Text tableName)
+      throws IOException {
+    
+    SortedMap<Text, TableInfo> metaServers = 
+      this.tablesToServers.get(META_TABLE_NAME);
+    
+    if(metaServers == null) {                 // Don't know where the meta is
+      metaServers = loadMetaFromRoot();
     }
-    this.tablesToServers.put(tableName, this.tableServers);
+    Text firstMetaRegion = (metaServers.containsKey(tableName)) ?
+        tableName : metaServers.headMap(tableName).lastKey();
+
+    return metaServers.tailMap(firstMetaRegion);
   }
 
   /*
    * Load the meta table from the root table.
+   * 
+   * @return map of first row to TableInfo for all meta regions
+   * @throws IOException
    */
-  private void loadMetaFromRoot(Text tableName) throws IOException {
-    locateRootRegion();
-    if(tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
-      return;
+  private TreeMap<Text, TableInfo> loadMetaFromRoot() throws IOException {
+    SortedMap<Text, TableInfo> rootRegion =
+      this.tablesToServers.get(ROOT_TABLE_NAME);
+    
+    if(rootRegion == null) {
+      rootRegion = locateRootRegion();
     }
-    scanRoot();
+    return scanRoot(rootRegion.get(rootRegion.firstKey()));
   }
   
   /*
-   * Repeatedly try to find the root region by asking the HMaster for where it
-   * could be.
+   * Repeatedly try to find the root region by asking the master for where it is
+   * 
+   * @return TreeMap<Text, TableInfo> for root regin if found
+   * @throws NoServerForRegionException - if the root region can not be located after retrying
+   * @throws IOException 
    */
-  private void locateRootRegion() throws IOException {
+  private TreeMap<Text, TableInfo> locateRootRegion() throws IOException {
     checkMaster();
     
     HServerAddress rootRegionLocation = null;
@@ -256,18 +589,14 @@ public class HClient implements HConstants {
         }
       }
       if(rootRegionLocation == null) {
-        throw new IOException("Timed out trying to locate root region");
+        throw new NoServerForRegionException(
+            "Timed out trying to locate root region");
       }
       
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
 
       try {
         rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName);
-        this.tableServers = new TreeMap<Text, TableInfo>();
-        this.tableServers.put(EMPTY_START_ROW,
-            new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
-        
-        this.tablesToServers.put(ROOT_TABLE_NAME, this.tableServers);
         break;
         
       } catch(NotServingRegionException e) {
@@ -293,120 +622,149 @@ public class HClient implements HConstants {
     }
     
     if (rootRegionLocation == null) {
-      throw new IOException("unable to locate root region server");
+      throw new NoServerForRegionException(
+          "unable to locate root region server");
     }
+    
+    TreeMap<Text, TableInfo> rootServer = new TreeMap<Text, TableInfo>();
+    rootServer.put(EMPTY_START_ROW,
+        new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
+    
+    this.tablesToServers.put(ROOT_TABLE_NAME, rootServer);
+    return rootServer;
   }
 
-  /*
+  /* 
    * Scans the root region to find all the meta regions
+   * @return - TreeMap of meta region servers
+   * @throws IOException
    */
-  private void scanRoot() throws IOException {
-    this.tableServers = new TreeMap<Text, TableInfo>();
-    TableInfo t = this.tablesToServers.get(ROOT_TABLE_NAME).get(EMPTY_START_ROW);
-    for(int tries = 0;
-        scanOneMetaRegion(t, META_TABLE_NAME) == 0 && tries < this.numRetries;
-        tries++) {
-      
-      // The table is not yet being served. Sleep and retry.
-      
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Sleeping. Table " + META_TABLE_NAME
-            + " not currently being served.");
-      }
-      try {
-        Thread.sleep(this.clientTimeout);
-        
-      } catch(InterruptedException e) {
-      }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Wake. Retry finding table " + META_TABLE_NAME);
-      }
-    }
-    if(this.tableServers.size() == 0) {
-      throw new IOException("failed to scan " + ROOT_TABLE_NAME + " after "
-          + this.numRetries + " retries");
-    }
-    this.tablesToServers.put(META_TABLE_NAME, this.tableServers);
+  private TreeMap<Text, TableInfo> scanRoot(TableInfo rootRegion)
+      throws IOException {
+    
+    TreeMap<Text, TableInfo> metaservers =
+      scanOneMetaRegion(rootRegion, META_TABLE_NAME);
+    this.tablesToServers.put(META_TABLE_NAME, metaservers);
+    return metaservers;
   }
 
   /*
    * Scans a single meta region
-   * @param t the table we're going to scan
+   * @param t the meta region we're going to scan
    * @param tableName the name of the table we're looking for
-   * @return returns the number of servers that are serving the table
+   * @return returns a map of startingRow to TableInfo
+   * @throws NoSuchElementException - if table does not exist
+   * @throws IllegalStateException - if table is offline
+   * @throws NoServerForRegionException - if table can not be found after retrying
+   * @throws IOException 
    */
-  private int scanOneMetaRegion(TableInfo t, Text tableName)
+  private TreeMap<Text, TableInfo> scanOneMetaRegion(TableInfo t, Text tableName)
       throws IOException {
     
     HRegionInterface server = getHRegionConnection(t.serverAddress);
-    int servers = 0;
-    long scannerId = -1L;
-    try {
-      scannerId =
-        server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName);
-      
-      DataInputBuffer inbuf = new DataInputBuffer();
-      while(true) {
-        HRegionInfo regionInfo = null;
-        String serverAddress = null;
-        HStoreKey key = new HStoreKey();
-        LabelledData[] values = server.next(scannerId, key);
-        if(values.length == 0) {
-          if(servers == 0) {
-            // If we didn't find any servers then the table does not exist
-            
-            throw new NoSuchElementException("table '" + tableName
-                + "' does not exist");
+    TreeMap<Text, TableInfo> servers = new TreeMap<Text, TableInfo>();
+    for(int tries = 0; servers.size() == 0 && tries < this.numRetries;
+        tries++) {
+  
+      long scannerId = -1L;
+      try {
+        scannerId =
+          server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName);
+
+        DataInputBuffer inbuf = new DataInputBuffer();
+        while(true) {
+          HRegionInfo regionInfo = null;
+          String serverAddress = null;
+          HStoreKey key = new HStoreKey();
+          LabelledData[] values = server.next(scannerId, key);
+          if(values.length == 0) {
+            if(servers.size() == 0) {
+              // If we didn't find any servers then the table does not exist
+
+              throw new NoSuchElementException("table '" + tableName
+                  + "' does not exist");
+            }
+
+            // We found at least one server for the table and now we're done.
+
+            break;
           }
-          
-          // We found at least one server for the table and now we're done.
-          
-          break;
-        }
 
-        byte[] bytes = null;
-        TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-        for(int i = 0; i < values.length; i++) {
-          bytes = new byte[values[i].getData().getSize()];
-          System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
-          results.put(values[i].getLabel(), bytes);
-        }
-        regionInfo = new HRegionInfo();
-        bytes = results.get(COL_REGIONINFO);
-        inbuf.reset(bytes, bytes.length);
-        regionInfo.readFields(inbuf);
+          byte[] bytes = null;
+          TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+          for(int i = 0; i < values.length; i++) {
+            bytes = new byte[values[i].getData().getSize()];
+            System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
+            results.put(values[i].getLabel(), bytes);
+          }
+          regionInfo = new HRegionInfo();
+          bytes = results.get(COL_REGIONINFO);
+          inbuf.reset(bytes, bytes.length);
+          regionInfo.readFields(inbuf);
 
-        if(!regionInfo.tableDesc.getName().equals(tableName)) {
-          // We're done
-          break;
+          if(!regionInfo.tableDesc.getName().equals(tableName)) {
+            // We're done
+            break;
+          }
+
+          if(regionInfo.offLine) {
+            throw new IllegalStateException("table offline: " + tableName);
+          }
+
+          bytes = results.get(COL_SERVER);
+          if(bytes == null || bytes.length == 0) {
+            // We need to rescan because the table we want is unassigned.
+
+            if(LOG.isDebugEnabled()) {
+              LOG.debug("no server address for " + regionInfo.toString());
+            }
+            servers.clear();
+            break;
+          }
+          serverAddress = new String(bytes, UTF8_ENCODING);
+
+          servers.put(regionInfo.startKey, 
+              new TableInfo(regionInfo, new HServerAddress(serverAddress)));
         }
+      } finally {
+        if(scannerId != -1L) {
+          try {
+            server.close(scannerId);
 
-        bytes = results.get(COL_SERVER);
-        if(bytes == null || bytes.length == 0) {
-          // We need to rescan because the table we want is unassigned.
-          
-          if(LOG.isDebugEnabled()) {
-            LOG.debug("no server address for " + regionInfo.toString());
+          } catch(Exception e) {
+            LOG.warn(e);
           }
-          servers = 0;
-          this.tableServers.clear();
-          break;
         }
-        servers += 1;
-        serverAddress = new String(bytes, UTF8_ENCODING);
+      }
+        
+      if(servers.size() == 0 && tries == this.numRetries - 1) {
+        throw new NoServerForRegionException("failed to find server for "
+            + tableName + " after " + this.numRetries + " retries");
+      }
 
-        this.tableServers.put(regionInfo.startKey, 
-            new TableInfo(regionInfo, new HServerAddress(serverAddress)));
+      // The table is not yet being served. Sleep and retry.
+
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Sleeping. Table " + tableName
+            + " not currently being served.");
       }
-      return servers;
-      
-    } finally {
-      if(scannerId != -1L) {
-        server.close(scannerId);
+      try {
+        Thread.sleep(this.clientTimeout);
+
+      } catch(InterruptedException e) {
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Retry finding table " + tableName);
       }
     }
+    return servers;
   }
 
+  /* 
+   * Establishes a connection to the region server at the specified address
+   * @param regionServer - the server to connect to
+   * @throws IOException
+   */
   synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
       throws IOException {
 
@@ -436,13 +794,12 @@ public class HClient implements HConstants {
       throws IOException {
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     
-    TreeMap<Text, TableInfo> metaTables =
+    SortedMap<Text, TableInfo> metaTables =
       this.tablesToServers.get(META_TABLE_NAME);
     
     if(metaTables == null) {
       // Meta is not loaded yet so go do that
-      loadMetaFromRoot(META_TABLE_NAME);
-      metaTables = tablesToServers.get(META_TABLE_NAME);
+      metaTables = loadMetaFromRoot();
     }
 
     for (TableInfo t: metaTables.values()) {
@@ -512,10 +869,11 @@ public class HClient implements HConstants {
     
     // Reload information for the whole table
     
-    findTableInMeta(info.regionInfo.tableDesc.getName());
+    this.tableServers = findServersForTable(info.regionInfo.tableDesc.getName());
     
     if(this.tableServers.get(info.regionInfo.startKey) == null ) {
-      throw new IOException("region " + info.regionInfo.regionName + " does not exist");
+      throw new IOException("region " + info.regionInfo.regionName
+          + " does not exist");
     }
   }
   
@@ -879,8 +1237,7 @@ public class HClient implements HConstants {
     System.err.println("              address is read from configuration.");
     System.err.println("Commands:");
     System.err.println(" shutdown     Shutdown the HBase cluster.");
-    System.err.println(" createTable  Takes table name, column families, " +
-      "and maximum versions.");
+    System.err.println(" createTable  Takes table name, column families,... ");
     System.err.println(" deleteTable  Takes a table name.");
     System.err.println(" iistTables   List all tables.");
     System.err.println("Example Usage:");
@@ -928,16 +1285,14 @@ public class HClient implements HConstants {
         }
         
         if (cmd.equals("createTable")) {
-          if (i + 3 > args.length) {
+          if (i + 2 > args.length) {
             throw new IllegalArgumentException("Must supply a table name " +
-              ", at least one column family and maximum number of versions");
+              "and at least one column family");
           }
-          int maxVersions = (Integer.parseInt(args[args.length - 1]));
-          HTableDescriptor desc =
-            new HTableDescriptor(args[i + 1], maxVersions);
+          HTableDescriptor desc = new HTableDescriptor(args[i + 1]);
           boolean addedFamily = false;
           for (int ii = i + 2; ii < (args.length - 1); ii++) {
-            desc.addFamily(new Text(args[ii]));
+            desc.addFamily(new HColumnDescriptor(args[ii]));
             addedFamily = true;
           }
           if (!addedFamily) {

+ 290 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java

@@ -0,0 +1,290 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+
+/**
+ * A HColumnDescriptor contains information about a column family such as the
+ * number of versions, compression settings, etc.
+ *
+ */
+public class HColumnDescriptor implements WritableComparable {
+  
+  // For future backward compatibility
+  
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)1;
+  
+  // Legal family names can only contain 'word characters' and end in a colon.
+
+  private static final Pattern LEGAL_FAMILY_NAME = Pattern.compile("\\w+:");
+
+  /** 
+   * The type of compression.
+   * @see org.apache.hadoop.io.SequenceFile.Writer
+   */
+  public static enum CompressionType {
+    /** Do not compress records. */
+    NONE, 
+    /** Compress values only, each separately. */
+    RECORD,
+    /** Compress sequences of records together in blocks. */
+    BLOCK
+  }
+  
+  // Internal values for compression type used for serialization
+  
+  private static final byte COMPRESSION_NONE = (byte)0;
+  private static final byte COMPRESSION_RECORD = (byte)1;
+  private static final byte COMPRESSION_BLOCK = (byte)2;
+  
+  private static final int DEFAULT_N_VERSIONS = 3;
+  
+  Text name;                                    // Column family name
+  int maxVersions;                              // Number of versions to keep
+  byte compressionType;                         // Compression setting if any
+  boolean inMemory;                             // Serve reads from in-memory cache
+  int maxValueLength;                           // Maximum value size
+  boolean bloomFilterEnabled;                   // True if column has a bloom filter
+  byte versionNumber;                           // Version number of this class
+  
+  /**
+   * Default constructor. Must be present for Writable.
+   */
+  public HColumnDescriptor() {
+    this.name = new Text();
+    this.maxVersions = DEFAULT_N_VERSIONS;
+    this.compressionType = COMPRESSION_NONE;
+    this.inMemory = false;
+    this.maxValueLength = Integer.MAX_VALUE;
+    this.bloomFilterEnabled = false;
+    this.versionNumber = COLUMN_DESCRIPTOR_VERSION;
+  }
+  
+  public HColumnDescriptor(String columnName) {
+    this();
+    this.name.set(columnName);
+  }
+  
+  /**
+   * Constructor - specify all parameters.
+   * @param name                - Column family name
+   * @param maxVersions         - Maximum number of versions to keep
+   * @param compression         - Compression type
+   * @param inMemory            - If true, column data should be kept in a
+   *                              HRegionServer's cache
+   * @param maxValueLength      - Restrict values to &lt;= this value
+   * @param bloomFilter         - Enable a bloom filter for this column
+   * 
+   * @throws IllegalArgumentException if passed a family name that is made of 
+   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
+   * end in a <code>:</code>
+   * @throws IllegalArgumentException if the number of versions is &lt;= 0
+   */
+  public HColumnDescriptor(Text name, int maxVersions, CompressionType compression,
+      boolean inMemory, int maxValueLength, boolean bloomFilter) {
+    String familyStr = name.toString();
+    Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
+    if(m == null || !m.matches()) {
+      throw new IllegalArgumentException(
+          "Family names can only contain 'word characters' and must end with a ':'");
+    }
+    this.name = name;
+
+    if(maxVersions <= 0) {
+      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
+      // Until there is support, consider 0 or < 0 -- a configuration error.
+      throw new IllegalArgumentException("Maximum versions must be positive");
+    }
+    this.maxVersions = maxVersions;
+
+    if(compression == CompressionType.NONE) {
+      this.compressionType = COMPRESSION_NONE;
+
+    } else if(compression == CompressionType.BLOCK) {
+      this.compressionType = COMPRESSION_BLOCK;
+      
+    } else if(compression == CompressionType.RECORD) {
+      this.compressionType = COMPRESSION_RECORD;
+      
+    } else {
+      assert(false);
+    }
+    this.inMemory = inMemory;
+    this.maxValueLength = maxValueLength;
+    this.bloomFilterEnabled = bloomFilter;
+    this.versionNumber = COLUMN_DESCRIPTOR_VERSION;
+  }
+  
+  /**
+   * @return    - name of column family
+   */
+  public Text getName() {
+    return name;
+  }
+  
+  /**
+   * @return    - compression type being used for the column family
+   */
+  public CompressionType getCompression() {
+    CompressionType value = null;
+
+    if(this.compressionType == COMPRESSION_NONE) {
+      value = CompressionType.NONE;
+      
+    } else if(this.compressionType == COMPRESSION_BLOCK) {
+      value = CompressionType.BLOCK;
+      
+    } else if(this.compressionType == COMPRESSION_RECORD) {
+      value = CompressionType.RECORD;
+      
+    } else {
+      assert(false);
+    }
+    return value;
+  }
+  
+  /**
+   * @return    - maximum number of versions
+   */
+  public int getMaxVersions() {
+    return this.maxVersions;
+  }
+  
+  @Override
+  public String toString() {
+    String compression = "none";
+    switch(compressionType) {
+    case COMPRESSION_NONE:
+      break;
+    case COMPRESSION_RECORD:
+      compression = "record";
+      break;
+    case COMPRESSION_BLOCK:
+      compression = "block";
+      break;
+    default:
+      assert(false);
+    }
+    
+    return "(" + name + ", max versions: " + maxVersions + ", compression: "
+      + compression + ", in memory: " + inMemory + ", max value length: "
+      + maxValueLength + ", bloom filter:" + bloomFilterEnabled + ")";
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    return compareTo(obj) == 0;
+  }
+  
+  @Override
+  public int hashCode() {
+    int result = this.name.hashCode();
+    result ^= Integer.valueOf(this.maxVersions).hashCode();
+    result ^= Byte.valueOf(this.compressionType).hashCode();
+    result ^= Boolean.valueOf(this.inMemory).hashCode();
+    result ^= Integer.valueOf(this.maxValueLength).hashCode();
+    result ^= Boolean.valueOf(this.bloomFilterEnabled).hashCode();
+    result ^= Byte.valueOf(this.versionNumber).hashCode();
+    return result;
+  }
+  
+  //////////////////////////////////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////////////////////////////////
+
+  public void readFields(DataInput in) throws IOException {
+    this.versionNumber = in.readByte();
+    this.name.readFields(in);
+    this.maxVersions = in.readInt();
+    this.compressionType = in.readByte();
+    this.inMemory = in.readBoolean();
+    this.maxValueLength = in.readInt();
+    this.bloomFilterEnabled = in.readBoolean();
+  }
+
+  public void write(DataOutput out) throws IOException {
+    out.writeByte(this.versionNumber);
+    this.name.write(out);
+    out.writeInt(this.maxVersions);
+    out.writeByte(this.compressionType);
+    out.writeBoolean(this.inMemory);
+    out.writeInt(this.maxValueLength);
+    out.writeBoolean(this.bloomFilterEnabled);
+  }
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Comparable
+  //////////////////////////////////////////////////////////////////////////////
+
+  public int compareTo(Object o) {
+    // NOTE: we don't do anything with the version number yet.
+    // Version numbers will come into play when we introduce an incompatible
+    // change in the future such as the addition of access control lists.
+    
+    HColumnDescriptor other = (HColumnDescriptor)o;
+    
+    int result = this.name.compareTo(other.getName());
+    
+    if(result == 0) {
+      result = Integer.valueOf(this.maxVersions).compareTo(
+          Integer.valueOf(other.maxVersions));
+    }
+    
+    if(result == 0) {
+      result = Integer.valueOf(this.compressionType).compareTo(
+          Integer.valueOf(other.compressionType));
+    }
+    
+    if(result == 0) {
+      if(this.inMemory == other.inMemory) {
+        result = 0;
+        
+      } else if(this.inMemory) {
+        result = -1;
+        
+      } else {
+        result = 1;
+      }
+    }
+    
+    if(result == 0) {
+      result = other.maxValueLength - this.maxValueLength;
+    }
+    
+    if(result == 0) {
+      if(this.bloomFilterEnabled == other.bloomFilterEnabled) {
+        result = 0;
+        
+      } else if(this.bloomFilterEnabled) {
+        result = -1;
+        
+      } else {
+        result = 1;
+      }
+    }
+    
+    return result;
+  }
+
+}

+ 6 - 4
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HGlobals.java

@@ -25,12 +25,14 @@ public class HGlobals implements HConstants {
   static HTableDescriptor metaTableDesc = null;
 
   static {
-    rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString(), 1);
-    rootTableDesc.addFamily(COLUMN_FAMILY);
+    rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString());
+    rootTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1,
+        HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false));
     
     rootRegionInfo = new HRegionInfo(0L, rootTableDesc, null, null);
     
-    metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString(), 1);
-    metaTableDesc.addFamily(COLUMN_FAMILY);
+    metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString());
+    metaTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1,
+        HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false));
   }
 }

File diff suppressed because it is too large
+ 724 - 256
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java


+ 8 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java

@@ -41,6 +41,14 @@ public interface HMasterInterface extends VersionedProtocol {
   public void createTable(HTableDescriptor desc) throws IOException;
   public void deleteTable(Text tableName) throws IOException;
   
+  public void addColumn(Text tableName, HColumnDescriptor column) throws IOException;
+  public void deleteColumn(Text tableName, Text columnName) throws IOException;
+  
+  public void mergeRegions(Text regionName1, Text regionName2) throws IOException;
+  
+  public void enableTable(Text tableName) throws IOException;
+  public void disableTable(Text tableName) throws IOException;
+  
   /**
    * Shutdown an HBase cluster.
    */

+ 1 - 1
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java

@@ -30,12 +30,12 @@ public class HMsg implements Writable {
   public static final byte MSG_CALL_SERVER_STARTUP = 4;
   public static final byte MSG_REGIONSERVER_STOP = 5;
   public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
-  public static final byte MSG_REGION_CLOSE_AND_DELETE = 7;
 
   public static final byte MSG_REPORT_OPEN = 100;
   public static final byte MSG_REPORT_CLOSE = 101;
   public static final byte MSG_REGION_SPLIT = 102;
   public static final byte MSG_NEW_REGION = 103;
+  public static final byte MSG_REPORT_EXITING = 104;
 
   byte msg;
   HRegionInfo info;

+ 5 - 13
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java

@@ -333,12 +333,12 @@ public class HRegion implements HConstants {
     }
 
     // Load in all the HStores.
-    for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
-        it.hasNext(); ) { 
-      Text colFamily = HStoreKey.extractFamily(it.next());
+    for(Map.Entry<Text, HColumnDescriptor> e :
+        this.regionInfo.tableDesc.families().entrySet()) {
+      
+      Text colFamily = HStoreKey.extractFamily(e.getKey());
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
-          colFamily, this.regionInfo.tableDesc.getMaxVersions(), fs,
-          oldLogFile, conf));
+          e.getValue(), fs, oldLogFile, conf));
     }
 
     // Get rid of any splits or merges that were lost in-progress
@@ -378,14 +378,6 @@ public class HRegion implements HConstants {
     return closed;
   }
   
-  /** Closes and deletes this HRegion. Called when doing a table deletion, for example */
-  public void closeAndDelete() throws IOException {
-    LOG.info("deleting region: " + regionInfo.regionName);
-    close();
-    deleteRegion(fs, dir, regionInfo.regionName);
-    LOG.info("region deleted: " + regionInfo.regionName);
-  }
-  
   /**
    * Close down this HRegion.  Flush the cache, shut down each HStore, don't 
    * service any more calls.

+ 8 - 7
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java

@@ -35,6 +35,7 @@ public class HRegionInfo implements Writable {
   public Text startKey;
   public Text endKey;
   public Text regionName;
+  public boolean offLine;
   
   public HRegionInfo() {
     this.regionId = 0;
@@ -42,16 +43,12 @@ public class HRegionInfo implements Writable {
     this.startKey = new Text();
     this.endKey = new Text();
     this.regionName = new Text();
+    this.offLine = false;
   }
   
-  public HRegionInfo(final byte [] serializedBytes) {
+  public HRegionInfo(final byte [] serializedBytes) throws IOException {
     this();
-    try {
-      readFields(new DataInputStream(
-        new ByteArrayInputStream(serializedBytes)));
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
+    readFields(new DataInputStream(new ByteArrayInputStream(serializedBytes)));
   }
 
   public HRegionInfo(long regionId, HTableDescriptor tableDesc,
@@ -79,6 +76,8 @@ public class HRegionInfo implements Writable {
     this.regionName = new Text(tableDesc.getName() + "_" +
       (startKey == null ? "" : startKey.toString()) + "_" +
       regionId);
+    
+    this.offLine = false;
   }
   
   @Override
@@ -98,6 +97,7 @@ public class HRegionInfo implements Writable {
     startKey.write(out);
     endKey.write(out);
     regionName.write(out);
+    out.writeBoolean(offLine);
   }
   
   public void readFields(DataInput in) throws IOException {
@@ -106,5 +106,6 @@ public class HRegionInfo implements Writable {
     this.startKey.readFields(in);
     this.endKey.readFields(in);
     this.regionName.readFields(in);
+    this.offLine = in.readBoolean();
   }
 }

+ 7 - 28
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java

@@ -602,6 +602,13 @@ public class HRegionServer
         }
       }
     }
+    try {
+      HMsg[] exitMsg = { new HMsg(HMsg.MSG_REPORT_EXITING) };
+      hbaseMaster.regionServerReport(info, exitMsg);
+      
+    } catch(IOException e) {
+      LOG.warn(e);
+    }
     try {
       LOG.info("stopping server at: " + info.getServerAddress().toString());
 
@@ -747,13 +754,6 @@ public class HRegionServer
             closeRegion(msg.getRegionInfo(), false);
             break;
 
-          case HMsg.MSG_REGION_CLOSE_AND_DELETE:
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_CLOSE_AND_DELETE");
-            }
-            closeAndDeleteRegion(msg.getRegionInfo());
-            break;
-
           default:
             throw new IOException("Impossible state during msg processing.  Instruction: " + msg);
           }
@@ -799,27 +799,6 @@ public class HRegionServer
     }
   }
 
-  private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
-    this.lock.writeLock().lock();
-    HRegion region = null;
-    try {
-      region = regions.remove(info.regionName);
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-    if(region != null) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("deleting region " + info.regionName);
-      }
-      
-      region.closeAndDelete();
-      
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("region " + info.regionName + " deleted");
-      }
-    }
-  }
-
   /** Called either when the master tells us to restart or from stop() */
   private void closeAllRegions() {
     Vector<HRegion> regionsToClose = new Vector<HRegion>();

+ 6 - 6
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java

@@ -101,10 +101,10 @@ public class HRegiondirReader {
   private HTableDescriptor getTableDescriptor(final FileSystem fs,
       final Path d, final String tableName)
   throws IOException {
-    HTableDescriptor desc = new HTableDescriptor(tableName, 1);
+    HTableDescriptor desc = new HTableDescriptor(tableName);
     Text [] families = getFamilies(fs, d);
     for (Text f: families) {
-      desc.addFamily(f);
+      desc.addFamily(new HColumnDescriptor(f.toString()));
     }
     return desc;
   }
@@ -163,7 +163,7 @@ public class HRegiondirReader {
   private void dump(final HRegionInfo info) throws IOException {
     HRegion r = new HRegion(this.parentdir, null,
         FileSystem.get(this.conf), conf, info, null, null);
-    Text [] families = info.tableDesc.families().toArray(new Text [] {});
+    Text [] families = info.tableDesc.families().keySet().toArray(new Text [] {});
     HInternalScannerInterface scanner = r.getScanner(families, new Text());
     HStoreKey key = new HStoreKey();
     TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
@@ -183,8 +183,8 @@ public class HRegiondirReader {
     // followed by cell content.
     while(scanner.next(key, results)) {
       for (Map.Entry<Text, BytesWritable> es: results.entrySet()) {
-    	Text colname = es.getKey();
-    	BytesWritable colvalue = es.getValue();
+      Text colname = es.getKey();
+      BytesWritable colvalue = es.getValue();
         Object value = null;
         byte[] bytes = new byte[colvalue.getSize()];
         if (colname.toString().equals("info:regioninfo")) {
@@ -219,4 +219,4 @@ public class HRegiondirReader {
       }
     }
   }
-}
+}

+ 45 - 30
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java

@@ -52,8 +52,9 @@ public class HStore {
 
   Path dir;
   Text regionName;
-  Text colFamily;
-  int maxVersions;
+  HColumnDescriptor family;
+  Text familyName;
+  SequenceFile.CompressionType compression;
   FileSystem fs;
   Configuration conf;
   Path mapdir;
@@ -98,23 +99,37 @@ public class HStore {
    * <p>It's assumed that after this constructor returns, the reconstructionLog
    * file will be deleted (by whoever has instantiated the HStore).
    */
-  public HStore(Path dir, Text regionName, Text colFamily, int maxVersions, 
+  public HStore(Path dir, Text regionName, HColumnDescriptor family, 
       FileSystem fs, Path reconstructionLog, Configuration conf)
   throws IOException {  
     this.dir = dir;
     this.regionName = regionName;
-    this.colFamily = colFamily;
-    this.maxVersions = maxVersions;
+    this.family = family;
+    this.familyName = HStoreKey.extractFamily(this.family.getName());
+    this.compression = SequenceFile.CompressionType.NONE;
+    
+    if(family.getCompression() != HColumnDescriptor.CompressionType.NONE) {
+      if(family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) {
+        this.compression = SequenceFile.CompressionType.BLOCK;
+        
+      } else if(family.getCompression() == HColumnDescriptor.CompressionType.RECORD) {
+        this.compression = SequenceFile.CompressionType.RECORD;
+        
+      } else {
+        assert(false);
+      }
+    }
+    
     this.fs = fs;
     this.conf = conf;
 
-    this.mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
+    this.mapdir = HStoreFile.getMapDir(dir, regionName, familyName);
     fs.mkdirs(mapdir);
-    this.loginfodir = HStoreFile.getInfoDir(dir, regionName, colFamily);
+    this.loginfodir = HStoreFile.getInfoDir(dir, regionName, familyName);
     fs.mkdirs(loginfodir);
 
     if(LOG.isDebugEnabled()) {
-      LOG.debug("starting HStore for " + regionName + "/"+ colFamily);
+      LOG.debug("starting HStore for " + regionName + "/"+ familyName);
     }
     
     // Either restart or get rid of any leftover compaction work.  Either way, 
@@ -123,7 +138,7 @@ public class HStore {
 
     this.compactdir = new Path(dir, COMPACTION_DIR);
     Path curCompactStore =
-      HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+      HStoreFile.getHStoreDir(compactdir, regionName, familyName);
     if(fs.exists(curCompactStore)) {
       processReadyCompaction();
       fs.delete(curCompactStore);
@@ -134,7 +149,7 @@ public class HStore {
     // corresponding one in 'loginfodir'. Without a corresponding log info
     // file, the entry in 'mapdir' must be deleted.
     Vector<HStoreFile> hstoreFiles 
-      = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
+      = HStoreFile.loadHStoreFiles(conf, dir, regionName, familyName, fs);
     for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
       HStoreFile hsf = it.next();
       mapFiles.put(hsf.loadInfo(fs), hsf);
@@ -187,7 +202,7 @@ public class HStore {
           Text column = val.getColumn();
           if (!key.getRegionName().equals(this.regionName) ||
               column.equals(HLog.METACOLUMN) ||
-              HStoreKey.extractFamily(column).equals(this.colFamily)) {
+              HStoreKey.extractFamily(column).equals(this.familyName)) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("Passing on edit " + key.getRegionName() + ", "
                   + key.getRegionName() + ", " + column.toString() + ": "
@@ -230,12 +245,12 @@ public class HStore {
         new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf));
     }
     
-    LOG.info("HStore online for " + this.regionName + "/" + this.colFamily);
+    LOG.info("HStore online for " + this.regionName + "/" + this.familyName);
   }
 
   /** Turn off all the MapFile readers */
   public void close() throws IOException {
-    LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
+    LOG.info("closing HStore for " + this.regionName + "/" + this.familyName);
     this.lock.obtainWriteLock();
     try {
       for (MapFile.Reader map: maps.values()) {
@@ -244,7 +259,7 @@ public class HStore {
       maps.clear();
       mapFiles.clear();
       
-      LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily);
+      LOG.info("HStore closed for " + this.regionName + "/" + this.familyName);
     } finally {
       this.lock.releaseWriteLock();
     }
@@ -276,13 +291,13 @@ public class HStore {
     
     synchronized(flushLock) {
       if(LOG.isDebugEnabled()) {
-        LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
+        LOG.debug("flushing HStore " + this.regionName + "/" + this.familyName);
       }
       
       // A. Write the TreeMap out to the disk
 
       HStoreFile flushedFile 
-        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
+        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
       
       Path mapfile = flushedFile.getMapFilePath();
       if(LOG.isDebugEnabled()) {
@@ -290,17 +305,17 @@ public class HStore {
       }
       
       MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(), 
-          HStoreKey.class, BytesWritable.class);
+          HStoreKey.class, BytesWritable.class, compression);
       
       try {
         for (Map.Entry<HStoreKey, BytesWritable> es: inputCache.entrySet()) {
           HStoreKey curkey = es.getKey();
-          if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
+          if (this.familyName.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
             out.append(curkey, es.getValue());
           }
         }
         if(LOG.isDebugEnabled()) {
-          LOG.debug("HStore " + this.regionName + "/" + this.colFamily + " flushed");
+          LOG.debug("HStore " + this.regionName + "/" + this.familyName + " flushed");
         }
         
       } finally {
@@ -325,7 +340,7 @@ public class HStore {
           mapFiles.put(logCacheFlushId, flushedFile);
           if(LOG.isDebugEnabled()) {
             LOG.debug("HStore available for " + this.regionName + "/"
-                + this.colFamily + " flush id=" + logCacheFlushId);
+                + this.familyName + " flush id=" + logCacheFlushId);
           }
         
         } finally {
@@ -373,10 +388,10 @@ public class HStore {
   void compactHelper(boolean deleteSequenceInfo) throws IOException {
     synchronized(compactLock) {
       if(LOG.isDebugEnabled()) {
-        LOG.debug("started compaction of " + this.regionName + "/" + this.colFamily);
+        LOG.debug("started compaction of " + this.regionName + "/" + this.familyName);
       }
       
-      Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+      Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
       fs.mkdirs(curCompactStore);
       
       try {
@@ -409,11 +424,11 @@ public class HStore {
         }
         
         HStoreFile compactedOutputFile 
-          = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
+          = new HStoreFile(conf, compactdir, regionName, familyName, -1);
         
         if(toCompactFiles.size() == 1) {
           if(LOG.isDebugEnabled()) {
-            LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
+            LOG.debug("nothing to compact for " + this.regionName + "/" + this.familyName);
           }
           
           HStoreFile hsf = toCompactFiles.elementAt(0);
@@ -426,7 +441,7 @@ public class HStore {
 
         MapFile.Writer compactedOut = new MapFile.Writer(conf, fs, 
             compactedOutputFile.getMapFilePath().toString(), HStoreKey.class, 
-            BytesWritable.class);
+            BytesWritable.class, compression);
         
         try {
 
@@ -507,7 +522,7 @@ public class HStore {
               timesSeen = 1;
             }
             
-            if(timesSeen <= maxVersions) {
+            if(timesSeen <= family.getMaxVersions()) {
 
               // Keep old versions until we have maxVersions worth.
               // Then just skip them.
@@ -592,7 +607,7 @@ public class HStore {
         processReadyCompaction();
         
         if(LOG.isDebugEnabled()) {
-          LOG.debug("compaction complete for " + this.regionName + "/" + this.colFamily);
+          LOG.debug("compaction complete for " + this.regionName + "/" + this.familyName);
         }
 
       } finally {
@@ -625,7 +640,7 @@ public class HStore {
     // 1. Acquiring the write-lock
 
 
-    Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+    Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
     this.lock.obtainWriteLock();
     try {
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
@@ -714,10 +729,10 @@ public class HStore {
       }
       
       HStoreFile compactedFile 
-        = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
+        = new HStoreFile(conf, compactdir, regionName, familyName, -1);
       
       HStoreFile finalCompactedFile 
-        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
+        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
       
       fs.rename(compactedFile.getMapFilePath(), finalCompactedFile.getMapFilePath());
       

+ 41 - 69
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java

@@ -19,7 +19,9 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Iterator;
-import java.util.TreeSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -27,101 +29,73 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 
 /**
- * HTableDescriptor contains various facts about an HTable, like
- * column families, maximum number of column versions, etc.
+ * HTableDescriptor contains the name of an HTable, and its
+ * column families.
  */
 public class HTableDescriptor implements WritableComparable {
   Text name;
-  int maxVersions;
-  TreeSet<Text> families = new TreeSet<Text>();
+  TreeMap<Text, HColumnDescriptor> families;
   
   /**
    * Legal table names can only contain 'word characters':
    * i.e. <code>[a-zA-Z_0-9]</code>.
    * 
-   * Lets be restrictive until a reason to be otherwise.
+   * Let's be restrictive until a reason to be otherwise.
    */
   private static final Pattern LEGAL_TABLE_NAME =
     Pattern.compile("[\\w-]+");
   
-  /**
-   * Legal family names can only contain 'word characters' and
-   * end in a colon.
-   */
-  private static final Pattern LEGAL_FAMILY_NAME =
-    Pattern.compile("\\w+:");
-
   public HTableDescriptor() {
     this.name = new Text();
-    this.families.clear();
+    this.families = new TreeMap<Text, HColumnDescriptor>();
   }
 
   /**
    * Constructor.
    * @param name Table name.
-   * @param maxVersions Number of versions of a column to keep.
    * @throws IllegalArgumentException if passed a table name
    * that is made of other than 'word' characters: i.e.
    * <code>[a-zA-Z_0-9]
    */
-  public HTableDescriptor(String name, int maxVersions) {
+  public HTableDescriptor(String name) {
     Matcher m = LEGAL_TABLE_NAME.matcher(name);
     if (m == null || !m.matches()) {
-      throw new IllegalArgumentException("Table names can only " +
-          "contain 'word characters': i.e. [a-zA-Z_0-9");
-    }
-    if (maxVersions <= 0) {
-      // TODO: Allow maxVersion of 0 to be the way you say
-      // "Keep all versions".  Until there is support, consider
-      // 0 -- or < 0 -- a configuration error.
-      throw new IllegalArgumentException("Maximum versions " +
-        "must be positive");
+      throw new IllegalArgumentException(
+          "Table names can only contain 'word characters': i.e. [a-zA-Z_0-9");
     }
     this.name = new Text(name);
-    this.maxVersions = maxVersions;
+    this.families = new TreeMap<Text, HColumnDescriptor>();
   }
 
   public Text getName() {
     return name;
   }
 
-  public int getMaxVersions() {
-    return maxVersions;
-  }
-
   /**
    * Add a column family.
-   * @param family Column family name to add.  Column family names
-   * must end in a <code>:</code>
-   * @throws IllegalArgumentException if passed a table name
-   * that is made of other than 'word' characters: i.e.
-   * <code>[a-zA-Z_0-9]
+   * @param family HColumnDescriptor of familyto add.
    */
-  public void addFamily(Text family) {
-    String familyStr = family.toString();
-    Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
-    if (m == null || !m.matches()) {
-      throw new IllegalArgumentException("Family names can " +
-          "only contain 'word characters' and must end with a " +
-          "':'");
-    }
-    families.add(family);
+  public void addFamily(HColumnDescriptor family) {
+    families.put(family.getName(), family);
   }
 
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
-    return families.contains(family);
+    return families.containsKey(family);
   }
 
-  /** All the column families in this table. */
-  public TreeSet<Text> families() {
+  /** All the column families in this table.
+   * 
+   *  TODO: What is this used for? Seems Dangerous to let people play with our
+   *  private members.
+   */
+  public TreeMap<Text, HColumnDescriptor> families() {
     return families;
   }
 
   @Override
   public String toString() {
-    return "name: " + this.name.toString() +
-      ", maxVersions: " + this.maxVersions + ", families: " + this.families;
+    return "name: " + this.name.toString() + ", families: " + this.families;
   }
   
   @Override
@@ -133,10 +107,9 @@ public class HTableDescriptor implements WritableComparable {
   public int hashCode() {
     // TODO: Cache.
     int result = this.name.hashCode();
-    result ^= Integer.valueOf(this.maxVersions).hashCode();
     if (this.families != null && this.families.size() > 0) {
-      for (Text family: this.families) {
-        result ^= family.hashCode();
+      for (Map.Entry<Text,HColumnDescriptor> e: this.families.entrySet()) {
+        result ^= e.hashCode();
       }
     }
     return result;
@@ -148,22 +121,21 @@ public class HTableDescriptor implements WritableComparable {
 
   public void write(DataOutput out) throws IOException {
     name.write(out);
-    out.writeInt(maxVersions);
     out.writeInt(families.size());
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<HColumnDescriptor> it = families.values().iterator();
+        it.hasNext(); ) {
       it.next().write(out);
     }
   }
 
   public void readFields(DataInput in) throws IOException {
     this.name.readFields(in);
-    this.maxVersions = in.readInt();
     int numCols = in.readInt();
     families.clear();
     for(int i = 0; i < numCols; i++) {
-      Text t = new Text();
-      t.readFields(in);
-      families.add(t);
+      HColumnDescriptor c = new HColumnDescriptor();
+      c.readFields(in);
+      families.put(c.getName(), c);
     }
   }
 
@@ -172,24 +144,24 @@ public class HTableDescriptor implements WritableComparable {
   //////////////////////////////////////////////////////////////////////////////
 
   public int compareTo(Object o) {
-    HTableDescriptor htd = (HTableDescriptor) o;
-    int result = name.compareTo(htd.name);
+    HTableDescriptor other = (HTableDescriptor) o;
+    int result = name.compareTo(other.name);
+    
     if(result == 0) {
-      result = maxVersions - htd.maxVersions;
+      result = families.size() - other.families.size();
     }
     
-    if(result == 0) {
-      result = families.size() - htd.families.size();
+    if(result == 0 && families.size() != other.families.size()) {
+      result = Integer.valueOf(families.size()).compareTo(
+          Integer.valueOf(other.families.size()));
     }
     
     if(result == 0) {
-      Iterator<Text> it2 = htd.families.iterator();
-      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
-        Text family1 = it.next();
-        Text family2 = it2.next();
-        result = family1.compareTo(family2);
+      for(Iterator<HColumnDescriptor> it = families.values().iterator(),
+          it2 = other.families.values().iterator(); it.hasNext(); ) {
+        result = it.next().compareTo(it2.next());
         if(result != 0) {
-          return result;
+          break;
         }
       }
     }

+ 29 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java

@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class InvalidColumnNameException extends IOException {
+  private static final long serialVersionUID = 1L << 29 - 1L;
+  public InvalidColumnNameException() {
+    super();
+  }
+
+  public InvalidColumnNameException(String s) {
+    super(s);
+  }
+}

+ 29 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java

@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class MasterNotRunningException extends IOException {
+  private static final long serialVersionUID = 1L << 23 - 1L;
+  public MasterNotRunningException() {
+    super();
+  }
+
+  public MasterNotRunningException(String s) {
+    super(s);
+  }
+}

+ 30 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java

@@ -0,0 +1,30 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class NoServerForRegionException extends IOException {
+  private static final long serialVersionUID = 1L << 11 - 1L;
+
+  public NoServerForRegionException() {
+    super();
+  }
+
+  public NoServerForRegionException(String s) {
+    super(s);
+  }
+}

+ 29 - 0
src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java

@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class TableNotDisabledException extends IOException {
+  private static final long serialVersionUID = 1L << 19 - 1L;
+  public TableNotDisabledException() {
+    super();
+  }
+
+  public TableNotDisabledException(String s) {
+    super(s);
+  }
+}

+ 2 - 2
src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java

@@ -53,8 +53,8 @@ public class EvaluationClient implements HConstants {
   private static HTableDescriptor tableDescriptor;
   
   static {
-    tableDescriptor = new HTableDescriptor("TestTable", 1);
-    tableDescriptor.addFamily(COLUMN_FAMILY);
+    tableDescriptor = new HTableDescriptor("TestTable");
+    tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY.toString()));
   }
   
   private static enum Test {RANDOM_READ,

+ 3 - 3
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java

@@ -71,9 +71,9 @@ public class TestGet extends HBaseTestCase {
       Path dir = new Path("/hbase");
       fs.mkdirs(dir);
       
-      HTableDescriptor desc = new HTableDescriptor("test", 1);
-      desc.addFamily(CONTENTS);
-      desc.addFamily(HConstants.COLUMN_FAMILY);
+      HTableDescriptor desc = new HTableDescriptor("test");
+      desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
+      desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
       
       HRegionInfo info = new HRegionInfo(0L, desc, null, null);
       Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);

+ 5 - 11
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.Set;
 import java.util.TreeMap;
-import java.util.TreeSet;
 
 import org.apache.hadoop.io.Text;
 
@@ -71,9 +71,9 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
 
   private void setup() throws IOException {
     client = new HClient(conf);
-    desc = new HTableDescriptor("test", 3);
-    desc.addFamily(new Text(CONTENTS));
-    desc.addFamily(new Text(ANCHOR));
+    desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
+    desc.addFamily(new HColumnDescriptor(ANCHOR.toString()));
     client.createTable(desc);
   }
       
@@ -182,7 +182,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
     HTableDescriptor[] tables = client.listTables();
     assertEquals(1, tables.length);
     assertEquals(desc.getName(), tables[0].getName());
-    TreeSet<Text> families = tables[0].families();
+    Set<Text> families = tables[0].families().keySet();
     assertEquals(2, families.size());
     assertTrue(families.contains(new Text(CONTENTS)));
     assertTrue(families.contains(new Text(ANCHOR)));
@@ -193,11 +193,5 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
     // Delete the table we created
 
     client.deleteTable(desc.getName());
-    try {
-      Thread.sleep(30000);                    // Wait for table to be deleted
-
-    } catch(InterruptedException e) {
-    }
   }
-  
 }

+ 3 - 3
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java

@@ -102,9 +102,9 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
     oldlogfile = new Path(parentdir, "oldlogfile");
 
     log = new HLog(fs, newlogdir, conf);
-    desc = new HTableDescriptor("test", 3);
-    desc.addFamily(new Text("contents:"));
-    desc.addFamily(new Text("anchor:"));
+    desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor("contents:"));
+    desc.addFamily(new HColumnDescriptor("anchor:"));
     region = new HRegion(parentdir, log, fs, conf, 
         new HRegionInfo(1, desc, null, null), null, oldlogfile);
   }

+ 79 - 0
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java

@@ -0,0 +1,79 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.io.Text;
+
+public class TestMasterAdmin extends HBaseClusterTestCase {
+  private static final Text COLUMN_NAME = new Text("col1:");
+  private static HTableDescriptor testDesc;
+  static {
+    testDesc = new HTableDescriptor("testadmin1");
+    testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
+  }
+  
+  private HClient client;
+
+  public TestMasterAdmin() {
+    super(true);
+    client = new HClient(conf);
+  }
+  
+  public void testMasterAdmin() {
+    try {
+      client.createTable(testDesc);
+      client.disableTable(testDesc.getName());
+      
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+
+    try {
+      try {
+        client.openTable(testDesc.getName());
+
+      } catch(IllegalStateException e) {
+        // Expected
+      }
+
+      client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:"));
+      client.enableTable(testDesc.getName());
+      try {
+        client.deleteColumn(testDesc.getName(), new Text("col2:"));
+        
+      } catch(TableNotDisabledException e) {
+        // Expected
+      }
+
+      client.disableTable(testDesc.getName());
+      client.deleteColumn(testDesc.getName(), new Text("col2:"));
+      
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail();
+      
+    } finally {
+      try {
+        client.deleteTable(testDesc.getName());
+        
+      } catch(Exception e) {
+        e.printStackTrace();
+        fail();
+      }
+    }
+  }
+}

+ 3 - 3
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java

@@ -50,8 +50,8 @@ public class TestTable extends HBaseClusterTestCase {
       fail();
     }
 
-    HTableDescriptor desc = new HTableDescriptor("test", 1);
-    desc.addFamily(HConstants.COLUMN_FAMILY);
+    HTableDescriptor desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
 
     try {
       client.createTable(desc);
@@ -73,5 +73,5 @@ public class TestTable extends HBaseClusterTestCase {
       e.printStackTrace();
       fail();
     }
-}
+  }
 }

+ 16 - 7
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java

@@ -15,15 +15,24 @@ public class TestToString extends TestCase {
   }
   
   public void testHRegionInfo() throws Exception {
-    HTableDescriptor htd = new HTableDescriptor("hank", 10);
-    htd.addFamily(new Text("hankfamily:"));
-    htd.addFamily(new Text("hankotherfamily:"));
-    assertEquals("Table descriptor", htd.toString(),
-     "name: hank, maxVersions: 10, families: [hankfamily:, hankotherfamily:]");
+    HTableDescriptor htd = new HTableDescriptor("hank");
+    htd.addFamily(new HColumnDescriptor("hankfamily:"));
+    htd.addFamily(new HColumnDescriptor(new Text("hankotherfamily:"), 10,
+        HColumnDescriptor.CompressionType.BLOCK, true, 1000, false));
+    assertEquals("Table descriptor", "name: hank, families: "
+        + "{hankfamily:=(hankfamily:, max versions: 3, compression: none, "
+        + "in memory: false, max value length: 2147483647, bloom filter:false), "
+        + "hankotherfamily:=(hankotherfamily:, max versions: 10, "
+        + "compression: block, in memory: true, max value length: 1000, "
+        + "bloom filter:false)}", htd.toString());
     HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10"));
     assertEquals("HRegionInfo", 
-        "regionname: hank__-1, startKey: <>, tableDesc: {name: hank, " +
-        "maxVersions: 10, families: [hankfamily:, hankotherfamily:]}",
+        "regionname: hank__-1, startKey: <>, tableDesc: {" + "name: hank, "
+        + "families: {hankfamily:=(hankfamily:, max versions: 3, "
+        + "compression: none, in memory: false, max value length: 2147483647, "
+        + "bloom filter:false), hankotherfamily:=(hankotherfamily:, "
+        + "max versions: 10, compression: block, in memory: true, max value "
+        + "length: 1000, bloom filter:false)}}",
         hri.toString());
   }
 }

Some files were not shown because too many files changed in this diff