HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker configurations to TableDescriptor for existing tables (#3700)
Signed-off-by: Duo Zhang <zhangduo@apache.org> Reviewed-by: Wellington Ramos Chevreuil <wchevreuil@apache.org>
This commit is contained in:
parent
80b04229a2
commit
c4325ff088
|
@ -440,6 +440,11 @@ public class TableDescriptorBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
public TableDescriptorBuilder removeValue(final String key) {
|
||||
desc.removeValue(key);
|
||||
return this;
|
||||
}
|
||||
|
||||
public TableDescriptorBuilder removeValue(Bytes key) {
|
||||
desc.removeValue(key);
|
||||
return this;
|
||||
|
@ -788,6 +793,17 @@ public class TableDescriptorBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove metadata represented by the key from the {@link #values} map
|
||||
*
|
||||
* @param key Key whose key and value we're to remove from TableDescriptor
|
||||
* parameters.
|
||||
* @return the modifyable TD
|
||||
*/
|
||||
public ModifyableTableDescriptor removeValue(final String key) {
|
||||
return setValue(key, (String) null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove metadata represented by the key from the {@link #values} map
|
||||
*
|
||||
|
|
|
@ -584,3 +584,14 @@ enum ClaimReplicationQueuesState {
|
|||
CLAIM_REPLICATION_QUEUES_DISPATCH = 1;
|
||||
CLAIM_REPLICATION_QUEUES_FINISH = 2;
|
||||
}
|
||||
|
||||
|
||||
enum ModifyTableDescriptorState {
|
||||
MODIFY_TABLE_DESCRIPTOR_PREPARE = 1;
|
||||
MODIFY_TABLE_DESCRIPTOR_UPDATE = 2;
|
||||
}
|
||||
|
||||
message ModifyTableDescriptorStateData {
|
||||
required TableSchema unmodified_table_schema = 1;
|
||||
optional TableSchema modified_table_schema = 2;
|
||||
}
|
||||
|
|
|
@ -130,6 +130,7 @@ import org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
|
|||
import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
|
||||
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
|
||||
import org.apache.hadoop.hbase.master.locking.LockManager;
|
||||
import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
|
||||
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
|
||||
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
|
||||
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
|
||||
|
@ -355,6 +356,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;
|
||||
private MobCompactionChore mobCompactChore;
|
||||
private MasterMobCompactionThread mobCompactThread;
|
||||
private RollingUpgradeChore rollingUpgradeChore;
|
||||
// used to synchronize the mobCompactionStates
|
||||
private final IdLock mobCompactionLock = new IdLock();
|
||||
// save the information of mob compactions in tables.
|
||||
|
@ -1212,6 +1214,9 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
LOG.debug("Balancer post startup initialization complete, took " + (
|
||||
(EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
|
||||
}
|
||||
|
||||
this.rollingUpgradeChore = new RollingUpgradeChore(this);
|
||||
getChoreService().scheduleChore(rollingUpgradeChore);
|
||||
}
|
||||
|
||||
private void createMissingCFsInMetaDuringUpgrade(
|
||||
|
@ -1695,6 +1700,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
shutdownChore(snapshotCleanerChore);
|
||||
shutdownChore(hbckChore);
|
||||
shutdownChore(regionsRecoveryChore);
|
||||
shutdownChore(rollingUpgradeChore);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master.migrate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ScheduledChore;
|
||||
import org.apache.hadoop.hbase.Stoppable;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.regionserver.storefiletracker.MigrateStoreFileTrackerProcedure;
|
||||
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* To avoid too many migrating/upgrade threads to be submitted at the time during master
|
||||
* initialization, RollingUpgradeChore handles all rolling-upgrade tasks.
|
||||
* */
|
||||
@InterfaceAudience.Private
|
||||
public class RollingUpgradeChore extends ScheduledChore {
|
||||
|
||||
static final String ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY =
|
||||
"hbase.master.rolling.upgrade.chore.period.secs";
|
||||
static final int DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS = 10; // 10 seconds by default
|
||||
|
||||
static final String ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY =
|
||||
"hbase.master.rolling.upgrade.chore.delay.secs";
|
||||
static final long DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS = 30; // 30 seconds
|
||||
|
||||
static final int CONCURRENT_PROCEDURES_COUNT = 5;
|
||||
|
||||
private final static Logger LOG = LoggerFactory.getLogger(RollingUpgradeChore.class);
|
||||
ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
|
||||
private TableDescriptors tableDescriptors;
|
||||
private List<MigrateStoreFileTrackerProcedure> processingProcs = new ArrayList<>();
|
||||
|
||||
public RollingUpgradeChore(MasterServices masterServices) {
|
||||
this(masterServices.getConfiguration(), masterServices.getMasterProcedureExecutor(),
|
||||
masterServices.getTableDescriptors(), masterServices);
|
||||
}
|
||||
|
||||
private RollingUpgradeChore(Configuration conf,
|
||||
ProcedureExecutor<MasterProcedureEnv> procedureExecutor, TableDescriptors tableDescriptors,
|
||||
Stoppable stopper) {
|
||||
super(RollingUpgradeChore.class.getSimpleName(), stopper, conf
|
||||
.getInt(ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY,
|
||||
DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS), conf
|
||||
.getLong(ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY,
|
||||
DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS),
|
||||
TimeUnit.SECONDS);
|
||||
this.procedureExecutor = procedureExecutor;
|
||||
this.tableDescriptors = tableDescriptors;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void chore() {
|
||||
if (isCompletelyMigrateSFT(CONCURRENT_PROCEDURES_COUNT)) {
|
||||
LOG.info("All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore!");
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isCompletelyMigrateSFT(int concurrentCount){
|
||||
Iterator<MigrateStoreFileTrackerProcedure> iter = processingProcs.iterator();
|
||||
while(iter.hasNext()){
|
||||
MigrateStoreFileTrackerProcedure proc = iter.next();
|
||||
if(procedureExecutor.isFinished(proc.getProcId())){
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
// No new migration procedures will be submitted until
|
||||
// all procedures executed last time are completed.
|
||||
if (!processingProcs.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Map<String, TableDescriptor> migrateSFTTables;
|
||||
try {
|
||||
migrateSFTTables = tableDescriptors.getAll().entrySet().stream().filter(entry -> {
|
||||
TableDescriptor td = entry.getValue();
|
||||
return StringUtils.isEmpty(td.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
|
||||
}).limit(concurrentCount).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to migrate StoreFileTracker", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (migrateSFTTables.isEmpty()) {
|
||||
LOG.info("There is no table to migrate StoreFileTracker!");
|
||||
return true;
|
||||
}
|
||||
|
||||
for (Map.Entry<String, TableDescriptor> entry : migrateSFTTables.entrySet()) {
|
||||
TableDescriptor tableDescriptor = entry.getValue();
|
||||
MigrateStoreFileTrackerProcedure proc =
|
||||
new MigrateStoreFileTrackerProcedure(procedureExecutor.getEnvironment(), tableDescriptor);
|
||||
procedureExecutor.submitProcedure(proc);
|
||||
processingProcs.add(proc);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master.procedure;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableDescriptorState;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableDescriptorStateData;
|
||||
|
||||
/**
|
||||
* The procedure will only update the table descriptor without reopening all the regions.
|
||||
* <p/>
|
||||
* It is usually used for migrating when upgrading, where we need to add something into the table
|
||||
* descriptor, such as the rs group information.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class ModifyTableDescriptorProcedure
|
||||
extends AbstractStateMachineTableProcedure<ModifyTableDescriptorState> {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ModifyTableDescriptorProcedure.class);
|
||||
|
||||
private TableDescriptor unmodifiedTableDescriptor;
|
||||
private TableDescriptor modifiedTableDescriptor;
|
||||
|
||||
protected ModifyTableDescriptorProcedure() {
|
||||
}
|
||||
|
||||
protected ModifyTableDescriptorProcedure(MasterProcedureEnv env, TableDescriptor unmodified) {
|
||||
super(env);
|
||||
this.unmodifiedTableDescriptor = unmodified;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TableName getTableName() {
|
||||
return unmodifiedTableDescriptor.getTableName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TableOperationType getTableOperationType() {
|
||||
return TableOperationType.EDIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sub class should implement this method to modify the table descriptor, such as storing the rs
|
||||
* group information.
|
||||
* <p/>
|
||||
* Since the migrating is asynchronouns, it is possible that users have already changed the rs
|
||||
* group for a table, in this case we do not need to modify the table descriptor any more, then
|
||||
* you could just return {@link Optional#empty()}.
|
||||
*/
|
||||
protected abstract Optional<TableDescriptor> modify(MasterProcedureEnv env,
|
||||
TableDescriptor current) throws IOException;
|
||||
|
||||
@Override
|
||||
protected Flow executeFromState(MasterProcedureEnv env, ModifyTableDescriptorState state)
|
||||
throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
|
||||
try {
|
||||
switch (state) {
|
||||
case MODIFY_TABLE_DESCRIPTOR_PREPARE:
|
||||
Optional<TableDescriptor> modified = modify(env, unmodifiedTableDescriptor);
|
||||
if (modified.isPresent()) {
|
||||
modifiedTableDescriptor = modified.get();
|
||||
setNextState(ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_UPDATE);
|
||||
return Flow.HAS_MORE_STATE;
|
||||
} else {
|
||||
// do not need to modify
|
||||
return Flow.NO_MORE_STATE;
|
||||
}
|
||||
case MODIFY_TABLE_DESCRIPTOR_UPDATE:
|
||||
env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
|
||||
return Flow.NO_MORE_STATE;
|
||||
default:
|
||||
throw new UnsupportedOperationException("unhandled state=" + state);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (isRollbackSupported(state)) {
|
||||
setFailure("master-modify-table-descriptor", e);
|
||||
} else {
|
||||
LOG.warn("Retriable error trying to modify table descriptor={} (in state={})",
|
||||
getTableName(), state, e);
|
||||
}
|
||||
}
|
||||
return Flow.HAS_MORE_STATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void rollbackState(MasterProcedureEnv env, ModifyTableDescriptorState state)
|
||||
throws IOException, InterruptedException {
|
||||
if (state == ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE) {
|
||||
return;
|
||||
}
|
||||
throw new UnsupportedOperationException("unhandled state=" + state);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isRollbackSupported(ModifyTableDescriptorState state) {
|
||||
return state == ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ModifyTableDescriptorState getState(int stateId) {
|
||||
return ModifyTableDescriptorState.forNumber(stateId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getStateId(ModifyTableDescriptorState state) {
|
||||
return state.getNumber();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ModifyTableDescriptorState getInitialState() {
|
||||
return ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
|
||||
super.serializeStateData(serializer);
|
||||
ModifyTableDescriptorStateData.Builder builder = ModifyTableDescriptorStateData.newBuilder()
|
||||
.setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
|
||||
if (modifiedTableDescriptor != null) {
|
||||
builder.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
|
||||
}
|
||||
serializer.serialize(builder.build());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
|
||||
super.deserializeStateData(serializer);
|
||||
ModifyTableDescriptorStateData data =
|
||||
serializer.deserialize(ModifyTableDescriptorStateData.class);
|
||||
unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(data.getUnmodifiedTableSchema());
|
||||
if (data.hasModifiedTableSchema()) {
|
||||
modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(data.getModifiedTableSchema());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver.storefiletracker;
|
||||
|
||||
import java.util.Optional;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.master.procedure.ModifyTableDescriptorProcedure;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Procedure for migrating StoreFileTracker information to table descriptor.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MigrateStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure {
|
||||
|
||||
public MigrateStoreFileTrackerProcedure(){}
|
||||
|
||||
public MigrateStoreFileTrackerProcedure(MasterProcedureEnv env, TableDescriptor unmodified) {
|
||||
super(env, unmodified);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Optional<TableDescriptor> modify(MasterProcedureEnv env, TableDescriptor current) {
|
||||
if (StringUtils.isEmpty(current.getValue(StoreFileTrackerFactory.TRACKER_IMPL))) {
|
||||
TableDescriptor td =
|
||||
StoreFileTrackerFactory.updateWithTrackerConfigs(env.getMasterConfiguration(), current);
|
||||
return Optional.of(td);
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master.migrate;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category(MediumTests.class)
|
||||
public class TestMigrateStoreFileTracker {
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestMigrateStoreFileTracker.class);
|
||||
private final static String[] tables = new String[] { "t1", "t2", "t3", "t4", "t5", "t6" };
|
||||
private final static String famStr = "f1";
|
||||
private final static byte[] fam = Bytes.toBytes(famStr);
|
||||
|
||||
private HBaseTestingUtility HTU;
|
||||
private Configuration conf;
|
||||
private HTableDescriptor tableDescriptor;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = HBaseConfiguration.create();
|
||||
//Speed up the launch of RollingUpgradeChore
|
||||
conf.setInt(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY, 1);
|
||||
conf.setLong(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY, 1);
|
||||
HTU = new HBaseTestingUtility(conf);
|
||||
HTU.startMiniCluster();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
HTU.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMigrateStoreFileTracker() throws IOException, InterruptedException {
|
||||
//create tables to test
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
tableDescriptor = HTU.createTableDescriptor(tables[i]);
|
||||
tableDescriptor.addFamily(new HColumnDescriptor(fam));
|
||||
HTU.createTable(tableDescriptor, null);
|
||||
}
|
||||
TableDescriptors tableDescriptors = HTU.getMiniHBaseCluster().getMaster().getTableDescriptors();
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
TableDescriptor tdAfterCreated = tableDescriptors.get(TableName.valueOf(tables[i]));
|
||||
//make sure that TRACKER_IMPL was set by default after tables have been created.
|
||||
Assert.assertNotNull(tdAfterCreated.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
|
||||
//Remove StoreFileTracker impl from tableDescriptor
|
||||
TableDescriptor tdRemovedSFT = TableDescriptorBuilder.newBuilder(tdAfterCreated)
|
||||
.removeValue(StoreFileTrackerFactory.TRACKER_IMPL).build();
|
||||
tableDescriptors.update(tdRemovedSFT);
|
||||
}
|
||||
HTU.getMiniHBaseCluster().stopMaster(0).join();
|
||||
HTU.getMiniHBaseCluster().startMaster();
|
||||
HTU.getMiniHBaseCluster().waitForActiveAndReadyMaster(30000);
|
||||
//wait until all tables have been migrated
|
||||
TableDescriptors tds = HTU.getMiniHBaseCluster().getMaster().getTableDescriptors();
|
||||
HTU.waitFor(30000, () -> {
|
||||
try {
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
TableDescriptor td = tds.get(TableName.valueOf(tables[i]));
|
||||
if (StringUtils.isEmpty(td.getValue(StoreFileTrackerFactory.TRACKER_IMPL))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue