From 9293bf26ea898ed1cf195ad9c0ef0f7a9cc2e087 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 6 Feb 2015 14:06:42 -0800 Subject: [PATCH] HBASE-12980 Delete of a table may not clean all rows from hbase:meta Conflicts: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java --- .../master/handler/DeleteTableHandler.java | 47 ++++++++++++++--- .../handler/TestEnableTableHandler.java | 52 +++++++++++++++++++ 2 files changed, 92 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 00bbcb8414f..93dcc84b48b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -18,31 +18,37 @@ */ package org.apache.hadoop.hbase.master.handler; -import java.io.InterruptedIOException; import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.regionserver.HRegion; @InterfaceAudience.Private public class DeleteTableHandler extends TableEventHandler { @@ -119,6 +125,9 @@ public class DeleteTableHandler extends TableEventHandler { // 5. If entry for this table in zk, and up in AssignmentManager, remove it. LOG.debug("Marking '" + tableName + "' as deleted."); am.getTableStateManager().setDeletedTable(tableName); + + // 6.Clean any remaining rows for this table. + cleanAnyRemainingRows(); } if (cpHost != null) { @@ -126,6 +135,30 @@ public class DeleteTableHandler extends TableEventHandler { } } + /** + * There may be items for this table still up in hbase:meta in the case where the + * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta + * that have to do with this table. See HBASE-12980. + * @throws IOException + */ + private void cleanAnyRemainingRows() throws IOException { + Scan tableScan = MetaTableAccessor.getScanForTableName(tableName); + try (Table metaTable = + this.masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { + List deletes = new ArrayList(); + try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { + for (Result result : resScanner) { + deletes.add(new Delete(result.getRow())); + } + } + if (!deletes.isEmpty()) { + LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName + + " from " + TableName.META_TABLE_NAME); + metaTable.delete(deletes); + } + } + } + /** * Removes the table from hbase:meta and archives the HDFS files. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java index 327c11e19ae..817ed5b8316 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java @@ -23,6 +23,8 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -44,6 +46,13 @@ import org.junit.experimental.categories.Category; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.IOException; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; + @Category({ MediumTests.class }) public class TestEnableTableHandler { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -98,4 +107,47 @@ public class TestEnableTableHandler { assertEquals(tableName, onlineRegions.get(1).getTable()); } + /** + * We were only clearing rows that had a hregioninfo column in hbase:meta. Mangled rows that + * were missing the hregioninfo because of error were being left behind messing up any + * subsequent table made with the same name. HBASE-12980 + * @throws IOException + * @throws InterruptedException + */ + @Test(timeout=60000) + public void testDeleteForSureClearsAllTableRowsFromMeta() + throws IOException, InterruptedException { + final TableName tableName = TableName.valueOf("testDeleteForSureClearsAllTableRowsFromMeta"); + final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + final HMaster m = cluster.getMaster(); + final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILYNAME)); + admin.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); + // Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR + // content from a few of the rows. + Scan metaScannerForMyTable = MetaTableAccessor.getScanForTableName(tableName); + try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) { + for (Result result : scanner) { + // Just delete one row. + Delete d = new Delete(result.getRow()); + d.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + metaTable.delete(d); + break; + } + } + admin.disableTable(tableName); + TEST_UTIL.waitTableDisabled(tableName.getName()); + // Presume this synchronous all is. + admin.deleteTable(tableName); + int rowCount = 0; + try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) { + for (Result result : scanner) { + rowCount++; + } + } + assertEquals(0, rowCount); + } + } }