HBASE-12980 Delete of a table may not clean all rows from hbase:meta

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java
This commit is contained in:
stack 2015-02-06 14:06:42 -08:00
parent 073badfd7f
commit 9293bf26ea
2 changed files with 92 additions and 7 deletions

View File

@ -18,31 +18,37 @@
*/
package org.apache.hadoop.hbase.master.handler;
import java.io.InterruptedIOException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.regionserver.HRegion;
@InterfaceAudience.Private
public class DeleteTableHandler extends TableEventHandler {
@ -119,6 +125,9 @@ public class DeleteTableHandler extends TableEventHandler {
// 5. If entry for this table in zk, and up in AssignmentManager, remove it.
LOG.debug("Marking '" + tableName + "' as deleted.");
am.getTableStateManager().setDeletedTable(tableName);
// 6.Clean any remaining rows for this table.
cleanAnyRemainingRows();
}
if (cpHost != null) {
@ -126,6 +135,30 @@ public class DeleteTableHandler extends TableEventHandler {
}
}
/**
* There may be items for this table still up in hbase:meta in the case where the
* info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
* that have to do with this table. See HBASE-12980.
* @throws IOException
*/
private void cleanAnyRemainingRows() throws IOException {
Scan tableScan = MetaTableAccessor.getScanForTableName(tableName);
try (Table metaTable =
this.masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
List<Delete> deletes = new ArrayList<Delete>();
try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
for (Result result : resScanner) {
deletes.add(new Delete(result.getRow()));
}
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName +
" from " + TableName.META_TABLE_NAME);
metaTable.delete(deletes);
}
}
}
/**
* Removes the table from hbase:meta and archives the HDFS files.
*/

View File

@ -23,6 +23,8 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -44,6 +46,13 @@ import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
@Category({ MediumTests.class })
public class TestEnableTableHandler {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@ -98,4 +107,47 @@ public class TestEnableTableHandler {
assertEquals(tableName, onlineRegions.get(1).getTable());
}
/**
* We were only clearing rows that had a hregioninfo column in hbase:meta. Mangled rows that
* were missing the hregioninfo because of error were being left behind messing up any
* subsequent table made with the same name. HBASE-12980
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout=60000)
public void testDeleteForSureClearsAllTableRowsFromMeta()
throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf("testDeleteForSureClearsAllTableRowsFromMeta");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
// Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR
// content from a few of the rows.
Scan metaScannerForMyTable = MetaTableAccessor.getScanForTableName(tableName);
try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) {
for (Result result : scanner) {
// Just delete one row.
Delete d = new Delete(result.getRow());
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
metaTable.delete(d);
break;
}
}
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
// Presume this synchronous all is.
admin.deleteTable(tableName);
int rowCount = 0;
try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) {
for (Result result : scanner) {
rowCount++;
}
}
assertEquals(0, rowCount);
}
}
}