HBASE-1215 part 5 of migration; adds a migration test to unit suite with verification that migration worked

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@796542 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-07-21 21:18:52 +00:00
parent c66bdea9bb
commit 2b98e7e256
4 changed files with 89 additions and 47 deletions

View File

@ -391,7 +391,8 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
/** @return compression type being used for the column family */
@TOJSON
public Compression.Algorithm getCompression() {
return Compression.Algorithm.valueOf(getValue(COMPRESSION));
String n = getValue(COMPRESSION);
return Compression.Algorithm.valueOf(n.toUpperCase());
}
/** @return maximum number of versions */

View File

@ -49,8 +49,8 @@ public interface HConstants {
* Version 6 enables blockcaching on catalog tables.
* Version 7 introduces hfile -- hbase 0.19 to 0.20..
*/
public static final String FILE_SYSTEM_VERSION = "6";
// public static final String FILE_SYSTEM_VERSION = "7";
// public static final String FILE_SYSTEM_VERSION = "6";
public static final String FILE_SYSTEM_VERSION = "7";
// Configuration parameters

View File

@ -21,6 +21,8 @@
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
@ -29,6 +31,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@ -40,6 +43,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.migration.nineteen.io.BloomFilterMapFile;
import org.apache.hadoop.hbase.migration.nineteen.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -249,6 +253,7 @@ public class Migrate extends Configured implements Tool {
}
// TOOD: Verify all has been brought over from old to new layout.
final MetaUtils utils = new MetaUtils(this.conf);
final List<HRegionInfo> metas = new ArrayList<HRegionInfo>();
try {
rewriteHRegionInfo(utils.getRootRegion().getRegionInfo());
// Scan the root region
@ -259,29 +264,53 @@ public class Migrate extends Configured implements Tool {
migrationNeeded = true;
return false;
}
metas.add(info);
rewriteHRegionInfo(utils.getRootRegion(), info);
return true;
}
});
LOG.info("TODO: Note on make sure not using old hbase-default.xml");
/*
* hbase.master / hbase.master.hostname are obsolete, that's replaced by
hbase.cluster.distributed. This config must be set to "true" to have a
fully-distributed cluster and the server lines in zoo.cfg must not
point to "localhost".
The clients must have a valid zoo.cfg in their classpath since we
don't provide the master address.
hbase.master.dns.interface and hbase.master.dns.nameserver should be
set to control the master's address (not mandatory).
*/
LOG.info("TODO: Note on zookeeper config. before starting:");
// Scan meta.
for (HRegionInfo hri: metas) {
final HRegionInfo metahri = hri;
utils.scanMetaRegion(hri, new MetaUtils.ScannerListener() {
public boolean processRow(HRegionInfo info) throws IOException {
if (readOnly && !migrationNeeded) {
migrationNeeded = true;
return false;
}
rewriteHRegionInfo(utils.getMetaRegion(metahri), info);
return true;
}
});
}
cleanOldLogFiles(hbaseRootDir);
} finally {
utils.shutdown();
}
}
/*
* Remove old log files.
* @param fs
* @param hbaseRootDir
* @throws IOException
*/
private void cleanOldLogFiles(final Path hbaseRootDir)
throws IOException {
FileStatus [] oldlogfiles = fs.listStatus(hbaseRootDir, new PathFilter () {
public boolean accept(Path p) {
return p.getName().startsWith("log_");
}
});
// Return if nothing to do.
if (oldlogfiles.length <= 0) return;
LOG.info("Removing " + oldlogfiles.length + " old logs file clutter");
for (int i = 0; i < oldlogfiles.length; i++) {
fs.delete(oldlogfiles[i].getPath(), true);
LOG.info("Deleted: " + oldlogfiles[i].getPath());
}
}
/*
* Rewrite all under hbase root dir.
* Presumes that {@link FSUtils#isMajorCompactedPre020(FileSystem, Path)}
@ -390,7 +419,6 @@ set to control the master's address (not mandatory).
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(oldHri));
mr.put(put);
LOG.info("Enabled blockcache on " + oldHri.getRegionNameAsString());
}
/*
@ -399,26 +427,25 @@ set to control the master's address (not mandatory).
*/
private boolean rewriteHRegionInfo(final HRegionInfo hri) {
boolean result = false;
// Set flush size at 32k if a catalog table.
int catalogMemStoreFlushSize = 32 * 1024;
if (hri.isMetaRegion() &&
hri.getTableDesc().getMemStoreFlushSize() != catalogMemStoreFlushSize) {
hri.getTableDesc().setMemStoreFlushSize(catalogMemStoreFlushSize);
result = true;
}
HColumnDescriptor hcd =
hri.getTableDesc().getFamily(HConstants.CATALOG_FAMILY);
if (hcd == null) {
LOG.info("No info family in: " + hri.getRegionNameAsString());
return result;
}
// Set blockcache enabled.
// Set block cache on all tables.
hcd.setBlockCacheEnabled(true);
// Set compression to none. Previous was 'none'. Needs to be upper-case.
// Any other compression we are turning off. Have user enable it.
hcd.setCompressionType(Algorithm.NONE);
return true;
// TODO: Rewrite MEMCACHE_FLUSHSIZE as MEMSTORE_FLUSHSIZE name has changed.
// TODO: Remove tableindexer 'index' attribute index from TableDescriptor (See HBASE-1586)
// TODO: TODO: Move of in-memory parameter from table to column family (from HTD to HCD).
// TODO: Purge isInMemory, etc., methods from HTD as part of migration.
// TODO: Set the .META. and -ROOT- to flush at 16k? 32k?
// TODO: Enable block cache on all tables
// TODO: Clean up old region log files (HBASE-698)
}

View File

@ -36,18 +36,21 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
/**
* Runs migration of filesystem from hbase 0.19 to hbase 0.20.
* Not part of general test suite because takes time.
*/
public class MigrationTest extends HBaseTestCase {
private static final Log LOG = LogFactory.getLog(MigrationTest.class);
public class TestMigration extends HBaseTestCase {
private static final Log LOG = LogFactory.getLog(TestMigration.class);
// Expected count of rows in migrated table.
private static final int EXPECTED_COUNT = 3;
@ -55,8 +58,9 @@ public class MigrationTest extends HBaseTestCase {
/**
* Test migration.
* @throws IOException
* @throws InterruptedException
*/
public void testMigration() throws IOException {
public void testMigration() throws IOException, InterruptedException {
Path rootdir = getUnitTestdir(getName());
Path hbasedir = loadTestData(fs, rootdir);
assertTrue(fs.exists(hbasedir));
@ -66,6 +70,7 @@ public class MigrationTest extends HBaseTestCase {
this.conf.set("hbase.rootdir", uri);
int result = migrator.run(new String [] {"upgrade"});
assertEquals(0, result);
verify();
}
/*
@ -92,42 +97,50 @@ public class MigrationTest extends HBaseTestCase {
* Verify can read the migrated table.
* @throws IOException
*/
private void verify() throws IOException {
private void verify() throws IOException, InterruptedException {
// Delete any cached connections. Need to do this because connection was
// created earlier when no master was around. The fact that there was no
// master gets cached. Need to delete so we go get master afresh.
HConnectionManager.deleteConnectionInfo(conf, false);
LOG.info("Start a cluster against migrated FS");
// Up number of retries. Needed while cluster starts up. Its been set to 1
// above.
final int retries = 5;
this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER_KEY, retries);
// Note that this is done before we create the MiniHBaseCluster because we
// need to edit the config to add the ZooKeeper servers.
MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster();
int clientPort =
zooKeeperCluster.startup(new java.io.File(this.testDir.toString()));
conf.set("hbase.zookeeper.property.clientPort",
Integer.toString(clientPort));
MiniHBaseCluster cluster = new MiniHBaseCluster(this.conf, 1);
try {
HBaseAdmin hb = new HBaseAdmin(this.conf);
assertTrue(hb.isMasterRunning());
HTableDescriptor [] tables = hb.listTables();
assertEquals(2, tables.length);
boolean foundTable = false;
/*
// Just look at table 'a'.
final String tablenameStr = "a";
final byte [] tablename = Bytes.toBytes(tablenameStr);
for (int i = 0; i < tables.length; i++) {
if (Bytes.equals(Bytes.toBytes(TABLENAME), tables[i].getName())) {
byte [] tableName = tables[i].getName();
if (Bytes.equals(tablename, tables[i].getName())) {
foundTable = true;
break;
}
}
assertTrue(foundTable);
LOG.info(TABLENAME + " exists. Now waiting till startcode " +
LOG.info(tablenameStr + " exists. Now waiting till startcode " +
"changes before opening a scanner");
waitOnStartCodeChange(retries);
// Delete again so we go get it all fresh.
HConnectionManager.deleteConnectionInfo(conf, false);
HTable t = new HTable(this.conf, TABLENAME);
HTable t = new HTable(this.conf, tablename);
int count = 0;
LOG.info("OPENING SCANNER");
Scan scan = new Scan();
scan.addColumns(TABLENAME_COLUMNS);
ResultScanner s = t.getScanner(scan);
try {
for (Result r: s) {
@ -135,19 +148,19 @@ public class MigrationTest extends HBaseTestCase {
break;
}
count++;
if (count % 1000 == 0 && count > 0) {
LOG.info("Iterated over " + count + " rows.");
}
}
assertEquals(EXPECTED_COUNT, count);
} finally {
s.close();
}
*/
} finally {
HConnectionManager.deleteConnectionInfo(conf, false);
cluster.shutdown();
try {
zooKeeperCluster.shutdown();
} catch (IOException e) {
LOG.warn("Shutting down ZooKeeper cluster", e);
}
}
}
@ -220,6 +233,7 @@ public class MigrationTest extends HBaseTestCase {
}
}
@SuppressWarnings("unused")
private void listPaths(FileSystem filesystem, Path dir, int rootdirlength)
throws IOException {
FileStatus[] stats = filesystem.listStatus(dir);