HBASE-11333 Remove deprecated class MetaMigrationConvertingToPB

This commit is contained in:
stack 2014-08-07 13:08:17 -07:00
parent b2cc061460
commit 8372d9694d
4 changed files with 0 additions and 639 deletions

View File

@ -1,176 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
/**
* A tool to migrate the data stored in hbase:meta table to pbuf serialization.
* Supports migrating from 0.92.x and 0.94.x to 0.96.x for the catalog table.
* @deprecated will be removed for the major release after 0.96.
*/
@Deprecated
public class MetaMigrationConvertingToPB {
private static final Log LOG = LogFactory.getLog(MetaMigrationConvertingToPB.class);
private static class ConvertToPBMetaVisitor implements Visitor {
private final MasterServices services;
private long numMigratedRows;
public ConvertToPBMetaVisitor(MasterServices services) {
this.services = services;
numMigratedRows = 0;
}
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
// Check info:regioninfo, info:splitA, and info:splitB. Make sure all
// have migrated HRegionInfos.
byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
// Presumes that an edit updating all three cells either succeeds or
// doesn't -- that we don't have case of info:regioninfo migrated but not
// info:splitA.
if (isMigrated(hriBytes)) return true;
// OK. Need to migrate this row in meta.
//This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
//writable serialization
HRegionInfo hri = parseFrom(hriBytes);
// Now make a put to write back to meta.
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
// Now migrate info:splitA and info:splitB if they are not null
migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
MetaTableAccessor.putToMetaTable(this.services.getShortCircuitConnection(), p);
if (LOG.isDebugEnabled()) {
LOG.debug("Migrated " + Bytes.toString(p.getRow()));
}
numMigratedRows++;
return true;
}
}
static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
throws IOException {
byte [] hriSplitBytes = getBytes(r, which);
if (!isMigrated(hriSplitBytes)) {
//This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
//writable serialization
HRegionInfo hri = parseFrom(hriSplitBytes);
p.addImmutable(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
}
}
static HRegionInfo parseFrom(byte[] hriBytes) throws IOException {
try {
return HRegionInfo.parseFrom(hriBytes);
} catch (DeserializationException ex) {
throw new IOException(ex);
}
}
/**
* @param r Result to dig in.
* @param qualifier Qualifier to look at in the passed <code>r</code>.
* @return Bytes for an HRegionInfo or null if no bytes or empty bytes found.
*/
static byte [] getBytes(final Result r, final byte [] qualifier) {
byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
if (hriBytes == null || hriBytes.length <= 0) return null;
return hriBytes;
}
static boolean isMigrated(final byte [] hriBytes) {
if (hriBytes == null || hriBytes.length <= 0) return true;
return ProtobufUtil.isPBMagicPrefix(hriBytes);
}
/**
* Converting writable serialization to PB, if it is needed.
* @param services MasterServices to get a handle on master
* @return num migrated rows
* @throws IOException or RuntimeException if something goes wrong
*/
public static long updateMetaIfNecessary(final MasterServices services)
throws IOException {
if (isMetaTableUpdated(services.getShortCircuitConnection())) {
LOG.info("META already up-to date with PB serialization");
return 0;
}
LOG.info("META has Writable serializations, migrating hbase:meta to PB serialization");
try {
long rows = updateMeta(services);
LOG.info("META updated with PB serialization. Total rows updated: " + rows);
return rows;
} catch (IOException e) {
LOG.warn("Update hbase:meta with PB serialization failed." + "Master startup aborted.");
throw e;
}
}
/**
* Update hbase:meta rows, converting writable serialization to PB
* @return num migrated rows
*/
static long updateMeta(final MasterServices masterServices) throws IOException {
LOG.info("Starting update of META");
ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
MetaTableAccessor.fullScan(masterServices.getShortCircuitConnection(), v);
LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
return v.numMigratedRows;
}
/**
* @param hConnection connection to be used
* @return True if the meta table has been migrated.
* @throws IOException
*/
static boolean isMetaTableUpdated(final HConnection hConnection) throws IOException {
List<Result> results = MetaTableAccessor.fullScanOfMeta(hConnection);
if (results == null || results.isEmpty()) {
LOG.info("hbase:meta doesn't have any entries to update.");
return true;
}
for (Result r : results) {
byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (!isMigrated(value)) {
return false;
}
}
return true;
}
}

View File

@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.MetaMigrationConvertingToPB;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
@ -572,11 +571,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.serverManager.processDeadServer(tmpServer, true);
}
// Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
// in meta. This must happen before we assign all user regions or else the assignment will
// fail.
MetaMigrationConvertingToPB.updateMetaIfNecessary(this);
// Fix up assignment manager status
status.setStatus("Starting assignment manager");
this.assignmentManager.joinCluster();

View File

@ -1,25 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
TestMetaMigrationConvertToPB uses the file TestMetaMigrationConvertToPB.tgz for testing
upgrade to 0.96 from 0.92/0.94 cluster data. The files are untarred to the local
filesystem, and copied over to a minidfscluster. However, since the directory
name hbase:meta causes problems on Windows, it has been renamed to -META- inside
the .tgz file. After untarring and copying the contents to minidfs,
TestMetaMigrationConvertToPB.setUpBeforeClass() renames the file back to hbase:meta
See https://issues.apache.org/jira/browse/HBASE-6821.

View File

@ -1,432 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test migration that changes HRI serialization into PB. Tests by bringing up a cluster from actual
* data from a 0.92 cluster, as well as manually downgrading and then upgrading the hbase:meta info.
* @deprecated Remove after 0.96
*/
@Category(MediumTests.class)
@Deprecated
public class TestMetaMigrationConvertingToPB {
static final Log LOG = LogFactory.getLog(TestMetaMigrationConvertingToPB.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static String TESTTABLE = "TestTable";
private final static int ROW_COUNT = 100;
private final static int REGION_COUNT = 9; //initial number of regions of the TestTable
private static final int META_VERSION_092 = 0;
/*
* This test uses a tgz file named "TestMetaMigrationConvertingToPB.tgz" under
* hbase-server/src/test/data which contains file data from a 0.92 cluster.
* The cluster has a table named "TestTable", which has 100 rows. 0.94 has same
* hbase:meta structure, so it should be the same.
*
* hbase(main):001:0> create 'TestTable', 'f1'
* hbase(main):002:0> for i in 1..100
* hbase(main):003:1> put 'TestTable', "row#{i}", "f1:c1", i
* hbase(main):004:1> end
*
* There are 9 regions in the table
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Start up our mini cluster on top of an 0.92 root.dir that has data from
// a 0.92 hbase run -- it has a table with 100 rows in it -- and see if
// we can migrate from 0.92
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.startMiniDFSCluster(1);
Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
// Untar our test dir.
File untar = untar(new File(testdir.toString()));
// Now copy the untar up into hdfs so when we start hbase, we'll run from it.
Configuration conf = TEST_UTIL.getConfiguration();
FsShell shell = new FsShell(conf);
FileSystem fs = FileSystem.get(conf);
// find where hbase will root itself, so we can copy filesystem there
Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
if (!fs.isDirectory(hbaseRootDir.getParent())) {
// mkdir at first
fs.mkdirs(hbaseRootDir.getParent());
}
doFsCommand(shell,
new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
// windows fix: tgz file has hbase:meta directory renamed as -META- since the original
// is an illegal name under windows. So we rename it back.
// See src/test/data//TestMetaMigrationConvertingToPB.README and
// https://issues.apache.org/jira/browse/HBASE-6821
doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
new Path(hbaseRootDir, ".META.").toString()});
// See whats in minihdfs.
doFsCommand(shell, new String [] {"-lsr", "/"});
//upgrade to namespace as well
Configuration toolConf = TEST_UTIL.getConfiguration();
conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
TEST_UTIL.startMiniHBaseCluster(1, 1);
// Assert we are running against the copied-up filesystem. The copied-up
// rootdir should have had a table named 'TestTable' in it. Assert it
// present.
HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
ResultScanner scanner = t.getScanner(new Scan());
int count = 0;
while (scanner.next() != null) {
count++;
}
// Assert that we find all 100 rows that are in the data we loaded. If
// so then we must have migrated it from 0.90 to 0.92.
Assert.assertEquals(ROW_COUNT, count);
scanner.close();
t.close();
}
private static File untar(final File testdir) throws IOException {
// Find the src data under src/test/data
final String datafile = "TestMetaMigrationConvertToPB";
String srcTarFile =
System.getProperty("project.build.testSourceDirectory", "src/test") +
File.separator + "data" + File.separator + datafile + ".tgz";
File homedir = new File(testdir.toString());
File tgtUntarDir = new File(homedir, datafile);
if (tgtUntarDir.exists()) {
if (!FileUtil.fullyDelete(tgtUntarDir)) {
throw new IOException("Failed delete of " + tgtUntarDir.toString());
}
}
LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
FileUtil.unTar(new File(srcTarFile), homedir);
Assert.assertTrue(tgtUntarDir.exists());
return tgtUntarDir;
}
private static void doFsCommand(final FsShell shell, final String [] args)
throws Exception {
// Run the 'put' command.
int errcode = shell.run(args);
if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testMetaUpdatedFlagInROOT() throws Exception {
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
boolean metaUpdated = MetaMigrationConvertingToPB.
isMetaTableUpdated(master.getShortCircuitConnection());
assertEquals(true, metaUpdated);
verifyMetaRowsAreUpdated(master.getShortCircuitConnection());
}
@Test
public void testMetaMigration() throws Exception {
LOG.info("Starting testMetaMigration");
final byte [] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration"));
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
byte[][] regionNames = new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")};
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(),
regionNames);
HConnection masterHConnection =
TEST_UTIL.getMiniHBaseCluster().getMaster().getShortCircuitConnection();
// Erase the current version of root meta for this test.
undoVersionInRoot();
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testMetaMigration");
long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
TEST_UTIL.getHBaseCluster().getMaster());
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
// Should be one entry only and it should be for the table we just added.
assertEquals(regionNames.length, numMigratedRows);
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
verifyMetaRowsAreUpdated(masterHConnection);
}
/**
* This test assumes a master crash/failure during the meta migration process
* and attempts to continue the meta migration process when a new master takes over.
* When a master dies during the meta migration we will have some rows of
* META.CatalogFamily updated with PB serialization and some
* still hanging with writable serialization. When the backup master/ or
* fresh start of master attempts the migration it will encounter some rows of META
* already updated with new HRI and some still legacy. This test will simulate this
* scenario and validates that the migration process can safely skip the updated
* rows and migrate any pending rows at startup.
* @throws Exception
*/
@Test
public void testMasterCrashDuringMetaMigration() throws Exception {
final byte[] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf
("testMasterCrashDuringMetaMigration"));
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
// Create 10 New regions.
createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10);
// Create 10 Legacy regions.
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(), 10);
HConnection masterHConnection =
TEST_UTIL.getMiniHBaseCluster().getMaster().getShortCircuitConnection();
// Erase the current version of root meta for this test.
undoVersionInRoot();
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
long numMigratedRows =
MetaMigrationConvertingToPB.updateMetaIfNecessary(
TEST_UTIL.getHBaseCluster().getMaster());
assertEquals(numMigratedRows, 10);
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
verifyMetaRowsAreUpdated(masterHConnection);
LOG.info("END testMasterCrashDuringMetaMigration");
}
/**
* Verify that every hbase:meta row is updated
*/
void verifyMetaRowsAreUpdated(HConnection hConnection)
throws IOException {
List<Result> results = MetaTableAccessor.fullScan(hConnection);
assertTrue(results.size() >= REGION_COUNT);
for (Result result : results) {
byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
assertTrue(hriBytes != null && hriBytes.length > 0);
assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER);
if (splitA != null && splitA.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
}
byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER);
if (splitB != null && splitB.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
}
}
}
/** Changes the version of hbase:meta to 0 to simulate 0.92 and 0.94 clusters*/
private void undoVersionInRoot() throws IOException {
Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
Bytes.toBytes(META_VERSION_092));
// TODO wire this MetaEditor.putToRootTable(ct, p);
LOG.info("Downgraded -ROOT- meta version=" + META_VERSION_092);
}
/**
* Inserts multiple regions into hbase:meta using Writable serialization instead of PB
*/
public int createMultiRegionsWithWritableSerialization(final Configuration c,
final byte[] tableName, int numRegions) throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithWritableSerialization(c, tableName, regionStartKeys);
}
public int createMultiRegionsWithWritableSerialization(final Configuration c,
final byte[] tableName, byte [][] startKeys)
throws IOException {
return createMultiRegionsWithWritableSerialization(c,
TableName.valueOf(tableName), startKeys);
}
/**
* Inserts multiple regions into hbase:meta using Writable serialization instead of PB
*/
public int createMultiRegionsWithWritableSerialization(final Configuration c,
final TableName tableName, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, TableName.META_TABLE_NAME);
List<HRegionInfo> newRegions
= new ArrayList<HRegionInfo>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
Put put = new Put(hri.getRegionName());
put.setDurability(Durability.SKIP_WAL);
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
getBytes(hri)); //this is the old Writable serialization
//also add the region as it's daughters
put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
getBytes(hri)); //this is the old Writable serialization
put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
getBytes(hri)); //this is the old Writable serialization
meta.put(put);
LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
meta.close();
return count;
}
@Deprecated
private byte[] getBytes(HRegionInfo hri) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
try {
hri.write(out);
return out.getData();
} finally {
if (out != null) {
out.close();
}
}
}
/**
* Inserts multiple regions into hbase:meta using PB serialization
*/
int createMultiRegionsWithPBSerialization(final Configuration c,
final byte[] tableName, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithPBSerialization(c, tableName, regionStartKeys);
}
/**
* Inserts multiple regions into hbase:meta using PB serialization
*/
int createMultiRegionsWithPBSerialization(final Configuration c, final byte[] tableName,
byte [][] startKeys) throws IOException {
return createMultiRegionsWithPBSerialization(c,
TableName.valueOf(tableName), startKeys);
}
int createMultiRegionsWithPBSerialization(final Configuration c,
final TableName tableName,
byte [][] startKeys) throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, TableName.META_TABLE_NAME);
List<HRegionInfo> newRegions
= new ArrayList<HRegionInfo>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
put.setDurability(Durability.SKIP_WAL);
meta.put(put);
LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
meta.close();
return count;
}
}