HBASE-22806 Recreating a deleted column family brings back the deleted cells (#530)

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Pankaj 2019-08-25 06:16:16 +05:30 committed by Michael Stack
parent a12545380b
commit ae107bdb96
2 changed files with 65 additions and 8 deletions

View File

@ -113,14 +113,6 @@ public class ModifyTableProcedure
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
@ -131,6 +123,14 @@ public class ModifyTableProcedure
if (env.getAssignmentManager().isTableEnabled(getTableName())) {
addChildProcedure(new ReopenTableRegionsProcedure(getTableName()));
}
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
return Flow.NO_MORE_STATE;
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);

View File

@ -46,6 +46,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
@ -101,6 +102,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
import org.apache.hadoop.hbase.util.TableDescriptorChecker;
import org.junit.AfterClass;
@ -6759,4 +6761,59 @@ public class TestFromClientSide {
TEST_UTIL.getAdmin().modifyTable(newDesc);
}
@Test(timeout = 60000)
public void testModifyTableWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, true);
}
@Test(timeout = 60000)
public void testDeleteCFWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, false);
}
/**
* Create table and validate online schema modification
* @param tableName Table name
* @param modifyTable Modify table if true otherwise delete column family
* @throws IOException in case of failures
*/
private void createTableAndValidateTableSchemaModification(TableName tableName,
boolean modifyTable) throws Exception {
Admin admin = TEST_UTIL.getAdmin();
// Create table with two Cfs
byte[] cf1 = Bytes.toBytes("cf1");
byte[] cf2 = Bytes.toBytes("cf2");
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build();
admin.createTable(tableDesc);
Table t = TEST_UTIL.getConnection().getTable(tableName);
// Insert few records and flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
admin.flush(tableName);
Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName);
List<Path> regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir);
assertTrue(regionDirs.size() == 1);
List<Path> familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue(familyDirs.size() == 2);
// Insert record but dont flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
if (modifyTable) {
tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build();
admin.modifyTable(tableDesc);
} else {
admin.deleteColumnFamily(tableName, cf2);
}
// After table modification or delete family there should be only one CF in FS
familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue("CF dir count should be 1, but was " + familyDirs.size(), familyDirs.size() == 1);
}
}