HBASE-19433 ChangeSplitPolicyAction modifies an immutable HTableDescriptor

This commit is contained in:
tedyu 2017-12-07 19:44:14 -08:00
parent 033e64a8b1
commit 5034411438
4 changed files with 86 additions and 12 deletions

View File

@ -294,6 +294,11 @@
<artifactId>junit</artifactId> <artifactId>junit</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.core</groupId> <groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId> <artifactId>jackson-databind</artifactId>

View File

@ -18,9 +18,10 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy;
@ -49,10 +50,11 @@ public class ChangeSplitPolicyAction extends Action {
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
LOG.info("Performing action: Change split policy of table " + tableName); LOG.info("Performing action: Change split policy of table " + tableName);
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName); TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
String chosenPolicy = possiblePolicies[random.nextInt(possiblePolicies.length)]; String chosenPolicy = possiblePolicies[random.nextInt(possiblePolicies.length)];
tableDescriptor.setRegionSplitPolicyClassName(chosenPolicy); builder.setRegionSplitPolicyClassName(chosenPolicy);
LOG.info("Changing " + tableName + " split policy to " + chosenPolicy); LOG.info("Changing " + tableName + " split policy to " + chosenPolicy);
admin.modifyTable(tableName, tableDescriptor); admin.modifyTable(builder.build());
} }
} }

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@Category({MediumTests.class})
public class TestChangeSplitPolicyAction extends Action {
private final static IntegrationTestingUtility TEST_UTIL = new IntegrationTestingUtility();
private static ChangeSplitPolicyAction action;
private Admin admin;
private TableName tableName = TableName.valueOf("ChangeSplitPolicyAction");
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(2);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Before
public void setUp() throws Exception {
this.admin = TEST_UTIL.getAdmin();
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
admin.createTable(builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("fam")).build());
}
@Test
public void testChangeSplitPolicyAction() throws Exception {
ActionContext ctx = Mockito.mock(ActionContext.class);
Mockito.when(ctx.getHBaseIntegrationTestingUtility()).thenReturn(TEST_UTIL);
Mockito.when(ctx.getHBaseCluster()).thenReturn(TEST_UTIL.getHBaseCluster());
action = new ChangeSplitPolicyAction(tableName);
action.init(ctx);
action.perform();
}
}

View File

@ -34,8 +34,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.NamespaceExistException; import org.apache.hadoop.hbase.NamespaceExistException;
@ -50,12 +48,15 @@ import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingMetaAction;
import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction; import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction;
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
import org.apache.hadoop.hbase.ipc.FatalConnectionException; import org.apache.hadoop.hbase.ipc.FatalConnectionException;
@ -232,15 +233,17 @@ public class IntegrationTestMTTR {
} }
// Create the table. If this fails then fail everything. // Create the table. If this fails then fail everything.
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); TableDescriptor tableDescriptor = util.getAdmin().getDescriptor(tableName);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
// Make the max file size huge so that splits don't happen during the test. // Make the max file size huge so that splits don't happen during the test.
tableDescriptor.setMaxFileSize(Long.MAX_VALUE); builder.setMaxFileSize(Long.MAX_VALUE);
HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY); ColumnFamilyDescriptorBuilder colDescriptorBldr =
descriptor.setMaxVersions(1); ColumnFamilyDescriptorBuilder.newBuilder(FAMILY);
tableDescriptor.addFamily(descriptor); colDescriptorBldr.setMaxVersions(1);
util.getAdmin().createTable(tableDescriptor); builder.addColumnFamily(colDescriptorBldr.build());
util.getAdmin().createTable(builder.build());
// Setup the table for LoadTestTool // Setup the table for LoadTestTool
int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"}); int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"});