HBASE-1215 [migration] 0.19.0 -> 0.20.0 migration (hfile, HCD changes, HSK changes) -- part 1

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@794140 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-07-15 05:38:40 +00:00
parent 3e6fa5f0b8
commit 10c33cc48d
6 changed files with 130 additions and 41 deletions

View File

@ -47,8 +47,9 @@ public interface HConstants {
* Version 4 supports only one kind of bloom filter.
* Version 5 changes versions in catalog table regions.
* Version 6 enables blockcaching on catalog tables.
* Version 7 introduces hfile -- hbase 0.19 to 0.20..
*/
public static final String FILE_SYSTEM_VERSION = "6";
public static final String FILE_SYSTEM_VERSION = "7";
// Configuration parameters

View File

@ -29,8 +29,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -265,4 +267,35 @@ public class FSUtils {
HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
return fs.exists(rootRegionDir);
}
/**
* Runs through the hbase rootdir and checks all stores have only
* one file in them -- that is, they've been major compacted. Looks
* at root and meta tables too.
* @param fs
* @param c
* @return True if this hbase install is major compacted.
* @throws IOException
*/
public static boolean isMajorCompacted(final FileSystem fs,
final HBaseConfiguration c)
throws IOException {
// Presumes any directory under hbase.rootdir is a table.
FileStatus [] directories =
fs.listStatus(new Path(c.get(HConstants.HBASE_DIR)), new PathFilter() {
public boolean accept(Path p) {
boolean isdir = false;
try {
isdir = fs.getFileStatus(p).isDir();
} catch (IOException e) {
e.printStackTrace();
}
return isdir;
}
});
for (int i = 0; i < directories.length; i++) {
}
}
}

View File

@ -26,8 +26,10 @@ import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@ -86,7 +88,7 @@ public class Migrate extends Configured implements Tool {
private static final float HBASE_0_1_VERSION = 0.1f;
// Filesystem version we can migrate from
private static final int PREVIOUS_VERSION = 4;
private static final int PREVIOUS_VERSION = 6;
private static final String MIGRATION_LINK =
" See http://wiki.apache.org/hadoop/Hbase/HowToMigrate for more information.";
@ -185,14 +187,14 @@ public class Migrate extends Configured implements Tool {
if (version == HBASE_0_1_VERSION ||
Integer.valueOf(versionStr).intValue() < PREVIOUS_VERSION) {
String msg = "Cannot upgrade from " + versionStr + " to " +
HConstants.FILE_SYSTEM_VERSION + " you must install hbase-0.2.x, run " +
HConstants.FILE_SYSTEM_VERSION + " you must install an earlier hbase, run " +
"the upgrade tool, reinstall this version and run this utility again." +
MIGRATION_LINK;
System.out.println(msg);
throw new IOException(msg);
}
migrate4To6();
migrate6to7();
if (!readOnly) {
// Set file system version
@ -209,17 +211,31 @@ public class Migrate extends Configured implements Tool {
}
}
// Move the fileystem version from 4 to 6.
// In here we rewrite the catalog table regions so they keep 10 versions
// instead of 1.
private void migrate4To6() throws IOException {
// Move the fileystem version from 6 to 7.
private void migrate6to7() throws IOException {
if (this.readOnly && this.migrationNeeded) {
return;
}
// Before we start, make sure all is major compacted.
if (!allMajorCompacted()) {
String msg = "All tables must be major compacted before the migration can begin." +
MIGRATION_LINK;
System.out.println(msg);
throw new IOException(msg);
}
final MetaUtils utils = new MetaUtils(this.conf);
try {
// These two operations are effectively useless. -ROOT- is hardcode,
// at least until hbase 0.20.0 when we store it out in ZK.
// Preperation
// TODO: Fail if not all major compacted first
// TODO: Set the .META. and -ROOT- to flush at 16k? 32k?
// TODO: Enable block cache on all tables
// TODO: Rewrite MEMCACHE_FLUSHSIZE as MEMSTORE_FLUSHSIZE name has changed.
// TODO: Remove tableindexer 'index' attribute index from TableDescriptor (See HBASE-1586)
// TODO: TODO: Move of in-memory parameter from table to column family (from HTD to HCD).
// TODO: Purge isInMemory, etc., methods from HTD as part of migration.
// TODO: Clean up old region log files (HBASE-698)
updateVersions(utils.getRootRegion().getRegionInfo());
enableBlockCache(utils.getRootRegion().getRegionInfo());
// Scan the root region
@ -235,11 +251,52 @@ public class Migrate extends Configured implements Tool {
return true;
}
});
LOG.info("TODO: Note on make sure not using old hbase-default.xml");
/*
* hbase.master / hbase.master.hostname are obsolete, that's replaced by
hbase.cluster.distributed. This config must be set to "true" to have a
fully-distributed cluster and the server lines in zoo.cfg must not
point to "localhost".
The clients must have a valid zoo.cfg in their classpath since we
don't provide the master address.
hbase.master.dns.interface and hbase.master.dns.nameserver should be
set to control the master's address (not mandatory).
*/
LOG.info("TODO: Note on zookeeper config. before starting:");
} finally {
utils.shutdown();
}
}
/**
* Runs through the hbase rootdir and checks all stores have only
* one file in them -- that is, they've been major compacted. Looks
* at root and meta tables too.
* @param fs
* @param c
* @return True if this hbase install is major compacted.
* @throws IOException
*/
public static boolean isMajorCompacted(final FileSystem fs,
final HBaseConfiguration c)
throws IOException {
FileStatus [] directories =
fs.listStatus(new Path(c.get(HConstants.HBASE_DIR)), new PathFilter() {
public boolean accept(Path p) {
boolean isdir = false;
try {
isdir = fs.getFileStatus(p).isDir();
} catch (IOException e) {
e.printStackTrace();
}
return isdir;
}
});
}
/*
* Enable blockcaching on catalog tables.
* @param mr

Binary file not shown.

View File

@ -20,14 +20,13 @@
package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -42,30 +41,32 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.ResultScanner;
/**
* Runs migration of filesystem from hbase 0.x to 0.x
* Runs migration of filesystem from hbase 0.19 to hbase 0.20.
* Not part of general test suite because takes time.
*/
public class MigrationTest extends HBaseTestCase {
private static final Log LOG = LogFactory.getLog(MigrationTest.class);
// This is the name of the table that is in the data file.
private static final String TABLENAME = "TestUpgrade";
// The table has two columns
private static final byte [][] TABLENAME_COLUMNS =
{Bytes.toBytes("column_a:"), Bytes.toBytes("column_b:")};
// Expected count of rows in migrated table.
private static final int EXPECTED_COUNT = 17576;
private static final int EXPECTED_COUNT = 3;
/**
* Test migration. To be used in future migrations
* Test migration.
* @throws IOException
*/
public void testUpgrade() throws IOException {
public void testMigration() throws IOException {
Path rootdir = getUnitTestdir(getName());
FileSystem fs = FileSystem.get(this.conf);
Path hbasedir = loadTestData(fs, rootdir);
assertTrue(fs.exists(hbasedir));
listPaths(fs, hbasedir, -1);
Migrate migrator = new Migrate(this.conf);
Path qualified = fs.makeQualified(hbasedir);
String uri = qualified.toString();
this.conf.set("hbase.rootdir", uri);
migrator.run(new String [] {"upgrade"});
}
/*
@ -74,24 +75,18 @@ public class MigrationTest extends HBaseTestCase {
* @param rootDir
* @throws IOException
*/
private void loadTestData(final FileSystem dfs, final Path rootDir)
private Path loadTestData(final FileSystem dfs, final Path rootDir)
throws IOException {
FileSystem localfs = FileSystem.getLocal(conf);
// Get path for zip file. If running this test in eclipse, define
// the system property src.testdata for your test run.
String srcTestdata = System.getProperty("src.testdata");
if (srcTestdata == null) {
throw new NullPointerException("Define src.test system property");
String hbasedir = "hbase-0.19-two-small-tables";
InputStream is = this.getClass().getClassLoader().
getResourceAsStream("data/" + hbasedir + ".zip");
ZipInputStream zip = new ZipInputStream(is);
try {
unzip(zip, dfs, rootDir);
} finally {
zip.close();
}
Path data = new Path(srcTestdata, "HADOOP-2478-testdata-v0.1.zip");
if (!localfs.exists(data)) {
throw new FileNotFoundException(data.toString());
}
FSDataInputStream hs = localfs.open(data);
ZipInputStream zip = new ZipInputStream(hs);
unzip(zip, dfs, rootDir);
zip.close();
hs.close();
return new Path(rootDir, hbasedir);
}
/*
@ -116,6 +111,7 @@ public class MigrationTest extends HBaseTestCase {
assertTrue(hb.isMasterRunning());
HTableDescriptor [] tables = hb.listTables();
boolean foundTable = false;
/*
for (int i = 0; i < tables.length; i++) {
if (Bytes.equals(Bytes.toBytes(TABLENAME), tables[i].getName())) {
foundTable = true;
@ -148,6 +144,8 @@ public class MigrationTest extends HBaseTestCase {
} finally {
s.close();
}
*/
} finally {
HConnectionManager.deleteConnectionInfo(conf, false);
cluster.shutdown();

Binary file not shown.