HBASE-22806 Recreating a deleted column family brings back the deleted cells (#530)

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Pankaj 2019-08-25 06:16:16 +05:30 committed by stack
parent b4bb551704
commit 902d4c244f
2 changed files with 66 additions and 9 deletions

View File

@ -101,14 +101,6 @@ public class ModifyTableProcedure
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
@ -119,6 +111,14 @@ public class ModifyTableProcedure
if (env.getAssignmentManager().isTableEnabled(getTableName())) {
addChildProcedure(new ReopenTableRegionsProcedure(getTableName()));
}
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
return Flow.NO_MORE_STATE;
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);

View File

@ -43,6 +43,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
@ -100,6 +101,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.AfterClass;
@ -6901,4 +6903,59 @@ public class TestFromClientSide {
TEST_UTIL.getAdmin().modifyTable(newDesc);
}
@Test(timeout = 60000)
public void testModifyTableWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, true);
}
@Test(timeout = 60000)
public void testDeleteCFWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, false);
}
/**
* Create table and validate online schema modification
* @param tableName Table name
* @param modifyTable Modify table if true otherwise delete column family
* @throws IOException in case of failures
*/
private void createTableAndValidateTableSchemaModification(TableName tableName,
boolean modifyTable) throws Exception {
Admin admin = TEST_UTIL.getAdmin();
// Create table with two Cfs
byte[] cf1 = Bytes.toBytes("cf1");
byte[] cf2 = Bytes.toBytes("cf2");
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build();
admin.createTable(tableDesc);
Table t = TEST_UTIL.getConnection().getTable(tableName);
// Insert few records and flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
admin.flush(tableName);
Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName);
List<Path> regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir);
assertTrue(regionDirs.size() == 1);
List<Path> familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue(familyDirs.size() == 2);
// Insert record but dont flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
if (modifyTable) {
tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build();
admin.modifyTable(tableDesc);
} else {
admin.deleteColumnFamily(tableName, cf2);
}
// After table modification or delete family there should be only one CF in FS
familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue("CF dir count should be 1, but was " + familyDirs.size(), familyDirs.size() == 1);
}
}