HBASE-4388 Second start after migration from 90 to trunk crashes

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1190027 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-10-27 21:05:55 +00:00
parent 3f2ddc2b17
commit 421ae8504c
14 changed files with 647 additions and 457 deletions

View File

@ -409,6 +409,7 @@ Release 0.92.0 - Unreleased
HBASE-4645 Edits Log recovery losing data across column families
HBASE-4634 "test.build.data" property overused leading to write data at the
wrong place (nkeywal)
HBASE-4388 Second start after migration from 90 to trunk crashes
TESTS
HBASE-4450 test for number of blocks read: to serve as baseline for expected

View File

@ -220,11 +220,6 @@ public final class HConstants {
// be the first to be reassigned if the server(s) they are being served by
// should go down.
//
// New stuff. Making a slow transition.
//
/** The root table's name.*/
public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
@ -255,6 +250,22 @@ public final class HConstants {
/** The upper-half split region column qualifier */
public static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB");
/**
* The meta table version column qualifier.
* We keep current version of the meta table in this column in <code>-ROOT-</code>
* table: i.e. in the 'info:v' column.
*/
public static final byte [] META_VERSION_QUALIFIER = Bytes.toBytes("v");
/**
* The current version of the meta table.
* Before this the meta had HTableDescriptor serialized into the HRegionInfo;
* i.e. pre-hbase 0.92. There was no META_VERSION column in the root table
* in this case. The presence of a version and its value being zero indicates
* meta is up-to-date.
*/
public static final short META_VERSION = 0;
// Other constants
/**

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.IOException;
import java.util.Arrays;
@ -43,8 +44,11 @@ import org.apache.hadoop.io.WritableComparable;
* Contains HRegion id, start and end keys, a reference to this
* HRegions' table descriptor, etc.
*/
public class HRegionInfo extends VersionedWritable implements WritableComparable<HRegionInfo>{
private static final byte VERSION = 0;
public class HRegionInfo extends VersionedWritable
implements WritableComparable<HRegionInfo> {
// VERSION == 0 when HRegionInfo had an HTableDescriptor inside it.
public static final byte VERSION_PRE_092 = 0;
public static final byte VERSION = 1;
private static final Log LOG = LogFactory.getLog(HRegionInfo.class);
/**
@ -159,7 +163,6 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
// Current TableName
private byte[] tableName = null;
private String tableNameAsString = null;
private void setHashCode() {
int result = Arrays.hashCode(this.regionName);
@ -710,7 +713,29 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
// Read the single version byte. We don't ask the super class do it
// because freaks out if its not the current classes' version. This method
// can deserialize version 0 and version 1 of HRI.
byte version = in.readByte();
if (version == 0) {
// This is the old HRI that carried an HTD. Migrate it. The below
// was copied from the old 0.90 HRI readFields.
this.endKey = Bytes.readByteArray(in);
this.offLine = in.readBoolean();
this.regionId = in.readLong();
this.regionName = Bytes.readByteArray(in);
this.regionNameStr = Bytes.toStringBinary(this.regionName);
this.split = in.readBoolean();
this.startKey = Bytes.readByteArray(in);
try {
HTableDescriptor htd = new HTableDescriptor();
htd.readFields(in);
this.tableName = htd.getName();
} catch(EOFException eofe) {
throw new IOException("HTD not found in input buffer", eofe);
}
this.hashCode = in.readInt();
} else if (getVersion() == VERSION) {
this.endKey = Bytes.readByteArray(in);
this.offLine = in.readBoolean();
this.regionId = in.readLong();
@ -720,6 +745,9 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
this.startKey = Bytes.readByteArray(in);
this.tableName = Bytes.readByteArray(in);
this.hashCode = in.readInt();
} else {
throw new IOException("Non-migratable/unknown version=" + getVersion());
}
}
//

View File

@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.util.Writables;
/**
* Writes region and assignment information to <code>.META.</code>.
* TODO: Put MetaReader and MetaEditor together; doesn't make sense having
* them distinct.
*/
public class MetaEditor {
// TODO: Strip CatalogTracker from this class. Its all over and in the end
@ -77,14 +79,12 @@ public class MetaEditor {
/**
* Put the passed <code>p</code> to a catalog table.
* @param ct CatalogTracker on whose back we will ride the edit.
* @param regionName Name of the catalog table to put too.
* @param p Put to add
* @throws IOException
*/
static void putToCatalogTable(final CatalogTracker ct,
final byte [] regionName, final Put p)
static void putToCatalogTable(final CatalogTracker ct, final Put p)
throws IOException {
HTable t = MetaReader.getCatalogHTable(ct, regionName);
HTable t = MetaReader.getCatalogHTable(ct, p.getRow());
put(t, p);
}
@ -254,10 +254,9 @@ public class MetaEditor {
private static void updateLocation(final CatalogTracker catalogTracker,
HRegionInfo regionInfo, ServerName sn)
throws IOException {
final byte [] regionName = regionInfo.getRegionName();
Put put = new Put(regionInfo.getRegionName());
addLocation(put, sn);
putToCatalogTable(catalogTracker, regionName, put);
putToCatalogTable(catalogTracker, put);
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
" with server=" + sn);
}

View File

@ -18,8 +18,9 @@
package org.apache.hadoop.hbase.catalog;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -43,36 +44,19 @@ import org.apache.hadoop.hbase.util.Writables;
public class MetaMigrationRemovingHTD {
private static final Log LOG = LogFactory.getLog(MetaMigrationRemovingHTD.class);
/** The metaupdated column qualifier */
public static final byte [] META_MIGRATION_QUALIFIER =
Bytes.toBytes("metamigrated");
/**
* Update legacy META rows, removing HTD from HRI.
* @param masterServices
* @return List of table descriptors.
* @throws IOException
*/
public static List<HTableDescriptor> updateMetaWithNewRegionInfo(
public static Set<HTableDescriptor> updateMetaWithNewRegionInfo(
final MasterServices masterServices)
throws IOException {
final List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
HRegionInfo090x hrfm = MetaMigrationRemovingHTD.getHRegionInfoForMigration(r);
if (hrfm == null) return true;
htds.add(hrfm.getTableDesc());
masterServices.getMasterFileSystem()
.createTableDescriptor(hrfm.getTableDesc());
updateHRI(masterServices.getCatalogTracker(), false, hrfm);
return true;
}
};
MigratingVisitor v = new MigratingVisitor(masterServices);
MetaReader.fullScan(masterServices.getCatalogTracker(), v);
MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(masterServices.getCatalogTracker(), true);
return htds;
updateRootWithMetaMigrationStatus(masterServices.getCatalogTracker());
return v.htds;
}
/**
@ -81,25 +65,114 @@ public class MetaMigrationRemovingHTD {
* @return List of table descriptors
* @throws IOException
*/
public static List<HTableDescriptor> updateRootWithNewRegionInfo(
static Set<HTableDescriptor> updateRootWithNewRegionInfo(
final MasterServices masterServices)
throws IOException {
final List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
Visitor v = new Visitor() {
MigratingVisitor v = new MigratingVisitor(masterServices);
MetaReader.fullScan(masterServices.getCatalogTracker(), v, null, true);
return v.htds;
}
/**
* Meta visitor that migrates the info:regioninfo as it visits.
*/
static class MigratingVisitor implements Visitor {
private final MasterServices services;
final Set<HTableDescriptor> htds = new HashSet<HTableDescriptor>();
MigratingVisitor(final MasterServices services) {
this.services = services;
}
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
HRegionInfo090x hrfm = MetaMigrationRemovingHTD.getHRegionInfoForMigration(r);
if (hrfm == null) return true;
htds.add(hrfm.getTableDesc());
masterServices.getMasterFileSystem().createTableDescriptor(
hrfm.getTableDesc());
updateHRI(masterServices.getCatalogTracker(), true, hrfm);
// Check info:regioninfo, info:splitA, and info:splitB. Make sure all
// have migrated HRegionInfos... that there are no leftover 090 version
// HRegionInfos.
byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
// Presumes that an edit updating all three cells either succeeds or
// doesn't -- that we don't have case of info:regioninfo migrated but not
// info:splitA.
if (isMigrated(hriBytes)) return true;
// OK. Need to migrate this row in meta.
HRegionInfo090x hri090 = getHRegionInfo090x(hriBytes);
HTableDescriptor htd = hri090.getTableDesc();
if (htd == null) {
LOG.warn("A 090 HRI has null HTD? Continuing; " + hri090.toString());
return true;
}
};
MetaReader.fullScan(masterServices.getCatalogTracker(), v, null, true);
return htds;
if (!this.htds.contains(htd)) {
// If first time we are adding a table, then write it out to fs.
// Presumes that first region in table has THE table's schema which
// might not be too bad of a presumption since it'll be first region
// 'altered'
this.services.getMasterFileSystem().createTableDescriptor(htd);
this.htds.add(htd);
}
// This will 'migrate' the hregioninfo from 090 version to 092.
HRegionInfo hri = new HRegionInfo(hri090);
// Now make a put to write back to meta.
Put p = new Put(hri.getRegionName());
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
// Now check info:splitA and info:splitB if present. Migrate these too.
checkSplit(r, p, HConstants.SPLITA_QUALIFIER);
checkSplit(r, p, HConstants.SPLITB_QUALIFIER);
// Below we fake out putToCatalogTable
MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
LOG.info("Migrated " + Bytes.toString(p.getRow()));
return true;
}
}
static void checkSplit(final Result r, final Put p, final byte [] which)
throws IOException {
byte [] hriSplitBytes = getBytes(r, which);
if (!isMigrated(hriSplitBytes)) {
// This will convert the HRI from 090 to 092 HRI.
HRegionInfo hri = Writables.getHRegionInfo(hriSplitBytes);
p.add(HConstants.CATALOG_FAMILY, which, Writables.getBytes(hri));
}
}
/**
* @param r Result to dig in.
* @param qualifier Qualifier to look at in the passed <code>r</code>.
* @return Bytes for an HRegionInfo or null if no bytes or empty bytes found.
*/
static byte [] getBytes(final Result r, final byte [] qualifier) {
byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
if (hriBytes == null || hriBytes.length <= 0) return null;
return hriBytes;
}
/**
* @param r Result to look in.
* @param qualifier What to look at in the passed result.
* @return Either a 090 vintage HRegionInfo OR null if no HRegionInfo or
* the HRegionInfo is up to date and not in need of migration.
* @throws IOException
*/
static HRegionInfo090x get090HRI(final Result r, final byte [] qualifier)
throws IOException {
byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
if (hriBytes == null || hriBytes.length <= 0) return null;
if (isMigrated(hriBytes)) return null;
return getHRegionInfo090x(hriBytes);
}
static boolean isMigrated(final byte [] hriBytes) {
if (hriBytes == null || hriBytes.length <= 0) return true;
// Else, what version this HRegionInfo instance is at. The first byte
// is the version byte in a serialized HRegionInfo. If its same as our
// current HRI, then nothing to do.
if (hriBytes[0] == HRegionInfo.VERSION) return true;
if (hriBytes[0] == HRegionInfo.VERSION_PRE_092) return false;
// Unknown version. Return true that its 'migrated' but log warning.
// Should 'never' happen.
assert false: "Unexpected version; bytes=" + Bytes.toStringBinary(hriBytes);
return true;
}
/**
@ -115,82 +188,20 @@ public class MetaMigrationRemovingHTD {
}
/**
* Update the metamigrated flag in -ROOT-.
* Update the version flag in -ROOT-.
* @param catalogTracker
* @param metaUpdated
* @throws IOException
*/
public static void updateRootWithMetaMigrationStatus(
CatalogTracker catalogTracker, boolean metaUpdated)
public static void updateRootWithMetaMigrationStatus(final CatalogTracker catalogTracker)
throws IOException {
Put p = new Put(HRegionInfo.ROOT_REGIONINFO.getRegionName());
MetaMigrationRemovingHTD.addMetaUpdateStatus(p, metaUpdated);
MetaEditor.putToRootTable(catalogTracker, p);
LOG.info("Updated -ROOT- row with metaMigrated status = " + metaUpdated);
Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
MetaEditor.putToRootTable(catalogTracker, setMetaVersion(p));
LOG.info("Updated -ROOT- meta version=" + HConstants.META_VERSION);
}
static void updateHRI(final CatalogTracker ct, final boolean rootTable,
final HRegionInfo090x hRegionInfo090x)
throws IOException {
HRegionInfo regionInfo = new HRegionInfo(hRegionInfo090x);
Put p = new Put(regionInfo.getRegionName());
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(regionInfo));
if (rootTable) {
MetaEditor.putToRootTable(ct, p);
} else {
MetaEditor.putToMetaTable(ct, p);
}
LOG.info("Updated region " + regionInfo + " to " +
(rootTable? "-ROOT-": ".META."));
}
/**
* @deprecated Going away in 0.94; used for migrating to 0.92 only.
*/
public static HRegionInfo090x getHRegionInfoForMigration(
Result data) throws IOException {
HRegionInfo090x info = null;
byte [] bytes =
data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) return null;
try {
info = Writables.getHRegionInfoForMigration(bytes);
} catch(IOException ioe) {
if (ioe.getMessage().equalsIgnoreCase("HTD not found in input buffer")) {
return null;
} else {
throw ioe;
}
}
LOG.info("Current INFO from scan results = " + info);
return info;
}
public static List<HRegionInfo090x> fullScanMetaAndPrintHRIM(
CatalogTracker catalogTracker)
throws IOException {
final List<HRegionInfo090x> regions =
new ArrayList<HRegionInfo090x>();
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
LOG.info("fullScanMetaAndPrint1.Current Meta Result: " + r);
HRegionInfo090x hrim = getHRegionInfoForMigration(r);
LOG.info("fullScanMetaAndPrint.HRIM Print= " + hrim);
regions.add(hrim);
return true;
}
};
MetaReader.fullScan(catalogTracker, v);
return regions;
}
static Put addMetaUpdateStatus(final Put p, final boolean metaUpdated) {
p.add(HConstants.CATALOG_FAMILY,
MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER,
Bytes.toBytes(metaUpdated));
static Put setMetaVersion(final Put p) {
p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
Bytes.toBytes(HConstants.META_VERSION));
return p;
}
@ -201,22 +212,27 @@ public class MetaMigrationRemovingHTD {
// Public because used in tests
public static boolean isMetaHRIUpdated(final MasterServices services)
throws IOException {
boolean metaUpdated = false;
List<Result> results =
MetaReader.fullScanOfRoot(services.getCatalogTracker());
List<Result> results = MetaReader.fullScanOfRoot(services.getCatalogTracker());
if (results == null || results.isEmpty()) {
LOG.info("metaUpdated = NULL.");
return metaUpdated;
LOG.info("Not migrated");
return false;
}
// Presume only the one result.
// Presume only the one result because we only support on meta region.
Result r = results.get(0);
byte [] metaMigrated = r.getValue(HConstants.CATALOG_FAMILY,
MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER);
if (metaMigrated != null && metaMigrated.length > 0) {
metaUpdated = Bytes.toBoolean(metaMigrated);
short version = getMetaVersion(r);
boolean migrated = version >= HConstants.META_VERSION;
LOG.info("Meta version=" + version + "; migrated=" + migrated);
return migrated;
}
LOG.info("Meta updated status = " + metaUpdated);
return metaUpdated;
/**
* @param r Result to look at
* @return Current meta table version or -1 if no version found.
*/
static short getMetaVersion(final Result r) {
byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
HConstants.META_VERSION_QUALIFIER);
return value == null || value.length <= 0? -1: Bytes.toShort(value);
}
/**
@ -239,4 +255,21 @@ public class MetaMigrationRemovingHTD {
"Master startup aborted.");
}
}
/**
* Get HREgionInfoForMigration serialized from bytes.
* @param bytes serialized bytes
* @return An instance of a 090 HRI or null if we failed deserialize
*/
public static HRegionInfo090x getHRegionInfo090x(final byte [] bytes) {
if (bytes == null || bytes.length == 0) return null;
HRegionInfo090x hri = null;
try {
hri = (HRegionInfo090x)Writables.getWritable(bytes, new HRegionInfo090x());
} catch (IOException ioe) {
LOG.warn("Failed deserialize as a 090 HRegionInfo); bytes=" +
Bytes.toStringBinary(bytes), ioe);
}
return hri;
}
}

View File

@ -63,17 +63,17 @@ public class MetaReader {
}
/**
* @param regionName
* @return True if <code>regionName</code> is from <code>.META.</code> table.
* @param row
* @return True if <code>row</code> is row of <code>-ROOT-</code> table.
*/
private static boolean isMetaRegion(final byte [] regionName) {
if (regionName.length < META_REGION_PREFIX.length + 2 /* ',', + '1' */) {
private static boolean isRootTableRow(final byte [] row) {
if (row.length < META_REGION_PREFIX.length + 2 /* ',', + '1' */) {
// Can't be meta table region.
return false;
}
// Compare the prefix of regionName. If it matches META_REGION_PREFIX prefix,
// then this is region from .META. table.
return Bytes.equals(regionName, 0, META_REGION_PREFIX.length,
// Compare the prefix of row. If it matches META_REGION_PREFIX prefix,
// then this is row from -ROOT_ table.
return Bytes.equals(row, 0, META_REGION_PREFIX.length,
META_REGION_PREFIX, 0, META_REGION_PREFIX.length);
}
@ -199,14 +199,14 @@ public class MetaReader {
/**
* Callers should call close on the returned {@link HTable} instance.
* @param catalogTracker
* @param regionName
* @param row Row we are putting
* @return
* @throws IOException
*/
static HTable getCatalogHTable(final CatalogTracker catalogTracker,
final byte [] regionName)
final byte [] row)
throws IOException {
return isMetaRegion(regionName)?
return isRootTableRow(row)?
getRootHTable(catalogTracker):
getMetaHTable(catalogTracker);
}

View File

@ -409,17 +409,6 @@ public class MasterFileSystem {
}
}
/**
* Get table info path for a table.
* @param tableName
* @return Table info path
*/
private Path getTableInfoPath(byte[] tableName) {
Path tablePath = new Path(this.rootdir, Bytes.toString(tableName));
Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
return tableInfoPath;
}
/**
* Create new HTableDescriptor in HDFS.
*

View File

@ -3125,16 +3125,15 @@ public class HRegion implements HeapSize { // , Writable{
byte[] row = r.getRegionName();
Integer lid = meta.obtainRowLock(row);
try {
final List<KeyValue> edits = new ArrayList<KeyValue>(1);
final long now = EnvironmentEdgeManager.currentTimeMillis();
final List<KeyValue> edits = new ArrayList<KeyValue>(2);
edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(),
HConstants.REGIONINFO_QUALIFIER, now,
Writables.getBytes(r.getRegionInfo())));
// Set into the root table the version of the meta table.
edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(true)));
HConstants.META_VERSION_QUALIFIER, now,
Bytes.toBytes(HConstants.META_VERSION)));
meta.put(HConstants.CATALOG_FAMILY, edits);
} finally {
meta.releaseRowLock(lid);

View File

@ -216,16 +216,4 @@ public class Writables {
}
return tgt;
}
/**
* Get HREgionInfoForMigration serialized from bytes.
* @param bytes serialized bytes
* @return HRegionInfoForMigration
* @throws IOException
*/
public static HRegionInfo090x getHRegionInfoForMigration(final byte [] bytes)
throws IOException {
return (HRegionInfo090x)getWritable(bytes, new HRegionInfo090x());
}
}

Binary file not shown.

View File

@ -53,14 +53,13 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
@ -1038,97 +1037,6 @@ public class HBaseTestingUtility {
return count;
}
public int createMultiRegionsWithLegacyHRI(final Configuration c,
final HTableDescriptor htd,
final byte [] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithLegacyHRI(c, htd, family, regionStartKeys);
}
public int createMultiRegionsWithLegacyHRI(final Configuration c,
final HTableDescriptor htd,
final byte[] columnFamily, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
if(!htd.hasFamily(columnFamily)) {
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
htd.addFamily(hcd);
}
List<HRegionInfo090x> newRegions
= new ArrayList<HRegionInfo090x>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo090x hri = new HRegionInfo090x(htd,
startKeys[i], startKeys[j]);
Put put = new Put(hri.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
meta.put(put);
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
return count;
}
public int createMultiRegionsWithNewHRI(final Configuration c,
final HTableDescriptor htd,
final byte [] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithNewHRI(c, htd, family, regionStartKeys);
}
public int createMultiRegionsWithNewHRI(final Configuration c, final HTableDescriptor htd,
final byte[] columnFamily, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
if(!htd.hasFamily(columnFamily)) {
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
htd.addFamily(hcd);
}
List<HRegionInfo> newRegions
= new ArrayList<HRegionInfo>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getName(),
startKeys[i], startKeys[j]);
Put put = new Put(hri.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
meta.put(put);
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
return count;
}
/**
* Create rows in META for regions of the specified table with the specified
* start keys. The first startKey should be a 0 length byte array if you

View File

@ -1,178 +0,0 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import junit.framework.AssertionFailedError;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.apache.hadoop.hbase.util.Writables;
import java.util.List;
public class TestMetaMigration {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static MiniHBaseCluster miniHBaseCluster = null;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
miniHBaseCluster = TEST_UTIL.startMiniCluster(1);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testHRegionInfoForMigration() throws Exception {
LOG.info("Starting testHRegionInfoForMigration");
HTableDescriptor htd = new HTableDescriptor("testMetaMigration");
htd.addFamily(new HColumnDescriptor("family"));
HRegionInfo090x hrim = new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW);
LOG.info("INFO 1 = " + hrim);
byte[] bytes = Writables.getBytes(hrim);
LOG.info(" BYtes.toString = " + Bytes.toString(bytes));
LOG.info(" HTD bytes = " + Bytes.toString(Writables.getBytes(hrim.getTableDesc())));
HRegionInfo090x info = Writables.getHRegionInfoForMigration(bytes);
LOG.info("info = " + info);
LOG.info("END testHRegionInfoForMigration");
}
@Test
public void testMetaUpdatedFlagInROOT() throws Exception {
LOG.info("Starting testMetaUpdatedFlagInROOT");
boolean metaUpdated =
MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster());
assertEquals(true, metaUpdated);
LOG.info("END testMetaUpdatedFlagInROOT");
}
@Test
public void testMetaMigration() throws Exception {
LOG.info("Starting testMetaWithLegacyHRI");
final byte[] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor("testMetaMigration");
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
TEST_UTIL.createMultiRegionsWithLegacyHRI(conf, htd, FAMILY,
new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")});
CatalogTracker ct = miniHBaseCluster.getMaster().getCatalogTracker();
// just for this test set it to false.
MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct, false);
MetaReader.fullScanMetaAndPrint(ct);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
List<HTableDescriptor> htds = MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo(
TEST_UTIL.getHBaseCluster().getMaster());
MetaReader.fullScanMetaAndPrint(ct);
assertEquals(3, htds.size());
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated =
MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster());
assertEquals(true, metaUpdated);
LOG.info("END testMetaWithLegacyHRI");
}
/**
* This test assumes a master crash/failure during the meta migration process
* and attempts to continue the meta migration process when a new master takes over.
* When a master dies during the meta migration we will have some rows of
* META.CatalogFamily updated with new HRI, (i.e HRI with out HTD) and some
* still hanging with legacy HRI. (i.e HRI with HTD). When the backup master/ or
* fresh start of master attempts the migration it will encouter some rows of META
* already updated with new HRI and some still legacy. This test will simulate this
* scenario and validates that the migration process can safely skip the updated
* rows and migrate any pending rows at startup.
* @throws Exception
*/
@Test
public void testMasterCrashDuringMetaMigration() throws Exception {
LOG.info("Starting testMasterCrashDuringMetaMigration");
final byte[] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration");
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
// Create 10 New regions.
TEST_UTIL.createMultiRegionsWithNewHRI(conf, htd, FAMILY, 10);
// Create 10 Legacy regions.
TEST_UTIL.createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, 10);
CatalogTracker ct = miniHBaseCluster.getMaster().getCatalogTracker();
// just for this test set it to false.
MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct, false);
//MetaReader.fullScanMetaAndPrint(ct);
LOG.info("MEta Print completed.testUpdatesOnMetaWithLegacyHRI");
List<HTableDescriptor> htds = MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo(
TEST_UTIL.getHBaseCluster().getMaster());
assertEquals(10, htds.size());
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated =
MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster());
assertEquals(true, metaUpdated);
LOG.info("END testMetaWithLegacyHRI");
}
public static void assertEquals(int expected,
int actual) {
if (expected != actual) {
throw new AssertionFailedError("expected:<" +
expected + "> but was:<" +
actual + ">");
}
}
public static void assertEquals(boolean expected,
boolean actual) {
if (expected != actual) {
throw new AssertionFailedError("expected:<" +
expected + "> but was:<" +
actual + ">");
}
}
}

View File

@ -0,0 +1,354 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import junit.framework.Assert;
import junit.framework.AssertionFailedError;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test migration that removes HTableDescriptor from HRegionInfo moving the
* meta version from no version to {@link MetaReader#META_VERSION}.
*/
public class TestMetaMigrationRemovingHTD {
static final Log LOG = LogFactory.getLog(TestMetaMigrationRemovingHTD.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static String TESTTABLE = "TestTable";
private final static int ROWCOUNT = 100;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Start up our mini cluster on top of an 0.90 root.dir that has data from
// a 0.90 hbase run -- it has a table with 100 rows in it -- and see if
// we can migrate from 0.90.
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.startMiniDFSCluster(1);
Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationRemovingHTD");
// Untar our test dir.
File untar = untar(new File(testdir.toString()));
// Now copy the untar up into hdfs so when we start hbase, we'll run from it.
Configuration conf = TEST_UTIL.getConfiguration();
FsShell shell = new FsShell(conf);
FileSystem fs = FileSystem.get(conf);
// Minihbase roots itself in user home directory up in minidfs.
Path homedir = fs.getHomeDirectory();
doFsCommand(shell,
new String [] {"-put", untar.toURI().toString(), homedir.toString()});
// See whats in minihdfs.
doFsCommand(shell, new String [] {"-lsr", "/"});
TEST_UTIL.startMiniHBaseCluster(1, 1);
// Assert we are running against the copied-up filesystem. The copied-up
// rootdir should have had a table named 'TestTable' in it. Assert it
// present.
HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
ResultScanner scanner = t.getScanner(new Scan());
int count = 0;
while (scanner.next() != null) {
count++;
}
// Assert that we find all 100 rows that are in the data we loaded. If
// so then we must have migrated it from 0.90 to 0.92.
Assert.assertEquals(ROWCOUNT, count);
}
private static File untar(final File testdir) throws IOException {
// Find the src data under src/test/data
final String datafile = "hbase-4388-root.dir";
String srcTarFile =
System.getProperty("project.build.testSourceDirectory", "src/test") +
File.separator + "data" + File.separator + datafile + ".tgz";
File homedir = new File(testdir.toString());
File tgtUntarDir = new File(homedir, datafile);
if (tgtUntarDir.exists()) {
if (!FileUtil.fullyDelete(tgtUntarDir)) {
throw new IOException("Failed delete of " + tgtUntarDir.toString());
}
}
LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
FileUtil.unTar(new File(srcTarFile), homedir);
Assert.assertTrue(tgtUntarDir.exists());
return tgtUntarDir;
}
private static void doFsCommand(final FsShell shell, final String [] args)
throws Exception {
// Run the 'put' command.
int errcode = shell.run(args);
if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testMetaUpdatedFlagInROOT() throws Exception {
boolean metaUpdated = MetaMigrationRemovingHTD.
isMetaHRIUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster());
assertEquals(true, metaUpdated);
}
@Test
public void testMetaMigration() throws Exception {
LOG.info("Starting testMetaWithLegacyHRI");
final byte [] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor("testMetaMigration");
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
createMultiRegionsWithLegacyHRI(conf, htd, FAMILY,
new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")});
CatalogTracker ct =
TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
// Erase the current version of root meta for this test.
undoVersionInMeta();
MetaReader.fullScanMetaAndPrint(ct);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
Set<HTableDescriptor> htds =
MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo(
TEST_UTIL.getHBaseCluster().getMaster());
MetaReader.fullScanMetaAndPrint(ct);
// Should be one entry only and it should be for the table we just added.
assertEquals(1, htds.size());
assertTrue(htds.contains(htd));
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated =
MetaMigrationRemovingHTD.isMetaHRIUpdated(
TEST_UTIL.getMiniHBaseCluster().getMaster());
assertEquals(true, metaUpdated);
}
/**
* This test assumes a master crash/failure during the meta migration process
* and attempts to continue the meta migration process when a new master takes over.
* When a master dies during the meta migration we will have some rows of
* META.CatalogFamily updated with new HRI, (i.e HRI with out HTD) and some
* still hanging with legacy HRI. (i.e HRI with HTD). When the backup master/ or
* fresh start of master attempts the migration it will encouter some rows of META
* already updated with new HRI and some still legacy. This test will simulate this
* scenario and validates that the migration process can safely skip the updated
* rows and migrate any pending rows at startup.
* @throws Exception
*/
@Test
public void testMasterCrashDuringMetaMigration() throws Exception {
final byte[] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration");
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
// Create 10 New regions.
createMultiRegionsWithNewHRI(conf, htd, FAMILY, 10);
// Create 10 Legacy regions.
createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, 10);
CatalogTracker ct =
TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
// Erase the current version of root meta for this test.
undoVersionInMeta();
MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct);
//MetaReader.fullScanMetaAndPrint(ct);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
Set<HTableDescriptor> htds =
MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo(
TEST_UTIL.getHBaseCluster().getMaster());
assertEquals(1, htds.size());
assertTrue(htds.contains(htd));
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated = MetaMigrationRemovingHTD.
isMetaHRIUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster());
assertEquals(true, metaUpdated);
LOG.info("END testMetaWithLegacyHRI");
}
private void undoVersionInMeta() throws IOException {
Delete d = new Delete(HRegionInfo.ROOT_REGIONINFO.getRegionName());
// Erase the current version of root meta for this test.
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER);
HTable rootTable =
new HTable(TEST_UTIL.getConfiguration(), HConstants.ROOT_TABLE_NAME);
try {
rootTable.delete(d);
} finally {
rootTable.close();
}
}
public static void assertEquals(int expected, int actual) {
if (expected != actual) {
throw new AssertionFailedError("expected:<" +
expected + "> but was:<" +
actual + ">");
}
}
public static void assertEquals(boolean expected, boolean actual) {
if (expected != actual) {
throw new AssertionFailedError("expected:<" +
expected + "> but was:<" +
actual + ">");
}
}
/**
* @param c
* @param htd
* @param family
* @param numRegions
* @return
* @throws IOException
* @deprecated Just for testing migration of meta from 0.90 to 0.92... will be
* removed thereafter
*/
public int createMultiRegionsWithLegacyHRI(final Configuration c,
final HTableDescriptor htd, final byte [] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithLegacyHRI(c, htd, family, regionStartKeys);
}
/**
* @param c
* @param htd
* @param columnFamily
* @param startKeys
* @return
* @throws IOException
* @deprecated Just for testing migration of meta from 0.90 to 0.92... will be
* removed thereafter
*/
public int createMultiRegionsWithLegacyHRI(final Configuration c,
final HTableDescriptor htd, final byte[] columnFamily, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
if(!htd.hasFamily(columnFamily)) {
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
htd.addFamily(hcd);
}
List<HRegionInfo090x> newRegions
= new ArrayList<HRegionInfo090x>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo090x hri = new HRegionInfo090x(htd,
startKeys[i], startKeys[j]);
Put put = new Put(hri.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
meta.put(put);
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
return count;
}
int createMultiRegionsWithNewHRI(final Configuration c,
final HTableDescriptor htd, final byte [] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
for (int i=0;i<splitKeys.length;i++) {
regionStartKeys[i+1] = splitKeys[i];
}
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegionsWithNewHRI(c, htd, family, regionStartKeys);
}
int createMultiRegionsWithNewHRI(final Configuration c, final HTableDescriptor htd,
final byte[] columnFamily, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
if(!htd.hasFamily(columnFamily)) {
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
htd.addFamily(hcd);
}
List<HRegionInfo> newRegions
= new ArrayList<HRegionInfo>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getName(),
startKeys[i], startKeys[j]);
Put put = new Put(hri.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
meta.put(put);
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
return count;
}
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.migration;
import java.io.IOException;
import junit.framework.Assert;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.Test;
/**
* Migration tests that do not need spin up of a cluster.
* @deprecated Remove after we release 0.92
*/
public class TestMigrationFrom090To092 {
@Test
public void testMigrateHRegionInfoFromVersion0toVersion1()
throws IOException {
HTableDescriptor htd =
getHTableDescriptor("testMigrateHRegionInfoFromVersion0toVersion1");
HRegionInfo090x ninety =
new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
byte [] bytes = Writables.getBytes(ninety);
// Now deserialize into an HRegionInfo
HRegionInfo hri = Writables.getHRegionInfo(bytes);
Assert.assertEquals(hri.getTableNameAsString(),
ninety.getTableDesc().getNameAsString());
Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion());
}
private HTableDescriptor getHTableDescriptor(final String name) {
HTableDescriptor htd = new HTableDescriptor(name);
htd.addFamily(new HColumnDescriptor("family"));
return htd;
}
}