HBASE-462 Update migration tool

Other miscellaneous changes included:

IdentityTableReduce
- Added SuppressWarnings("unused") for reporter argument
- Removed unnecessary cast.
AbstractMergeTestBase
- Removed unnecessary compaction
StaticTestEnvironment
- Change logging level for client connections which are too noisy in most cases
TestBloomFilters
- Removed unnecessary config settings
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.
TestScannerAPI
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@630394 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-02-23 06:11:44 +00:00
parent c180c47e3f
commit 30e6c8c070
10 changed files with 151 additions and 112 deletions

View File

@ -25,6 +25,7 @@ Hbase Change Log
HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
that reach the client even after retries that reach the client even after retries
HBASE-460 TestMigrate broken when HBase moved to subproject HBASE-460 TestMigrate broken when HBase moved to subproject
HBASE-462 Update migration tool
IMPROVEMENTS IMPROVEMENTS
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling

View File

@ -25,7 +25,8 @@ import org.apache.hadoop.io.Text;
* HConstants holds a bunch of HBase-related constants * HConstants holds a bunch of HBase-related constants
*/ */
public interface HConstants { public interface HConstants {
/** long constant for zero */
static final Long ZERO_L = Long.valueOf(0L); static final Long ZERO_L = Long.valueOf(0L);
// For migration // For migration
@ -34,7 +35,7 @@ public interface HConstants {
static final String VERSION_FILE_NAME = "hbase.version"; static final String VERSION_FILE_NAME = "hbase.version";
/** version of file system */ /** version of file system */
static final String FILE_SYSTEM_VERSION = "0.1"; static final String FILE_SYSTEM_VERSION = "2";
// Configuration parameters // Configuration parameters

View File

@ -39,11 +39,12 @@ public class IdentityTableReduce extends TableReduce<Text, MapWritable> {
*/ */
@Override @Override
public void reduce(Text key, Iterator<MapWritable> values, public void reduce(Text key, Iterator<MapWritable> values,
OutputCollector<Text, MapWritable> output, Reporter reporter) OutputCollector<Text, MapWritable> output,
@SuppressWarnings("unused") Reporter reporter)
throws IOException { throws IOException {
while(values.hasNext()) { while(values.hasNext()) {
MapWritable r = (MapWritable)values.next(); MapWritable r = values.next();
output.collect(key, r); output.collect(key, r);
} }
} }

View File

@ -148,6 +148,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
/** Name of master server */ /** Name of master server */
public static final String MASTER = "master"; public static final String MASTER = "master";
/** @return InfoServer object */
public InfoServer getInfoServer() { public InfoServer getInfoServer() {
return infoServer; return infoServer;
} }
@ -270,16 +271,21 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
try { try {
// Make sure the root directory exists! // Make sure the root directory exists!
if(! fs.exists(rootdir)) { if(! fs.exists(rootdir)) {
fs.mkdirs(rootdir); fs.mkdirs(rootdir);
FSUtils.setVersion(fs, rootdir); FSUtils.setVersion(fs, rootdir);
} else if (!FSUtils.checkVersion(fs, rootdir)) { } else {
// Output on stdout so user sees it in terminal. String fsversion = FSUtils.checkVersion(fs, rootdir);
String message = "The HBase data files stored on the FileSystem are " + if (fsversion == null ||
"from an earlier version of HBase. You need to run " + fsversion.compareTo(FILE_SYSTEM_VERSION) != 0) {
"'${HBASE_HOME}/bin/hbase migrate' to bring your installation" + // Output on stdout so user sees it in terminal.
String message = "The HBase data files stored on the FileSystem " +
"are from an earlier version of HBase. You need to run " +
"'${HBASE_HOME}/bin/hbase migrate' to bring your installation " +
"up-to-date."; "up-to-date.";
System.out.println("WARNING! " + message + " Master shutting down..."); // Output on stdout so user sees it in terminal.
throw new IOException(message); System.out.println("WARNING! " + message + " Master shutting down...");
throw new IOException(message);
}
} }
if (!fs.exists(rootRegionDir)) { if (!fs.exists(rootRegionDir)) {

View File

@ -81,20 +81,19 @@ public class FSUtils {
* *
* @param fs * @param fs
* @param rootdir * @param rootdir
* @return true if the current file system is the correct version * @return null if no version file exists, version string otherwise.
* @throws IOException * @throws IOException
*/ */
public static boolean checkVersion(FileSystem fs, Path rootdir) throws IOException { public static String checkVersion(FileSystem fs, Path rootdir) throws IOException {
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
boolean versionOk = false; String version = null;
if (fs.exists(versionFile)) { if (fs.exists(versionFile)) {
FSDataInputStream s = FSDataInputStream s =
fs.open(new Path(rootdir, HConstants.VERSION_FILE_NAME)); fs.open(new Path(rootdir, HConstants.VERSION_FILE_NAME));
String version = DataInputStream.readUTF(s); version = DataInputStream.readUTF(s);
s.close(); s.close();
versionOk = version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0;
} }
return versionOk; return version;
} }
/** /**

View File

@ -25,6 +25,8 @@ import java.io.FileNotFoundException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -64,7 +66,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
/** /**
* Perform a file system upgrade to convert older file layouts to that * Perform a file system upgrade to convert older file layouts to that
* supported by HADOOP-2478 * supported by HADOOP-2478, and then to the form supported by HBASE-69
*/ */
public class Migrate extends Configured implements Tool { public class Migrate extends Configured implements Tool {
static final Log LOG = LogFactory.getLog(Migrate.class); static final Log LOG = LogFactory.getLog(Migrate.class);
@ -96,10 +98,11 @@ public class Migrate extends Configured implements Tool {
options.put("prompt", ACTION.PROMPT); options.put("prompt", ACTION.PROMPT);
} }
private FileSystem fs = null;
private Path rootdir = null;
private boolean readOnly = false; private boolean readOnly = false;
private boolean migrationNeeded = false; private boolean migrationNeeded = false;
private boolean newRootRegion = false; private boolean newRootRegion = false;
private ACTION logFiles = ACTION.IGNORE;
private ACTION otherFiles = ACTION.IGNORE; private ACTION otherFiles = ACTION.IGNORE;
private BufferedReader reader = null; private BufferedReader reader = null;
@ -127,7 +130,7 @@ public class Migrate extends Configured implements Tool {
} }
try { try {
FileSystem fs = FileSystem.get(conf); // get DFS handle fs = FileSystem.get(conf); // get DFS handle
LOG.info("Verifying that file system is available..."); LOG.info("Verifying that file system is available...");
if (!FSUtils.isFileSystemAvailable(fs)) { if (!FSUtils.isFileSystemAvailable(fs)) {
@ -148,8 +151,7 @@ public class Migrate extends Configured implements Tool {
LOG.info("Starting upgrade" + (readOnly ? " check" : "")); LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
Path rootdir = rootdir = fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
if (!fs.exists(rootdir)) { if (!fs.exists(rootdir)) {
throw new FileNotFoundException("HBase root directory " + throw new FileNotFoundException("HBase root directory " +
@ -158,40 +160,28 @@ public class Migrate extends Configured implements Tool {
// See if there is a file system version file // See if there is a file system version file
if (FSUtils.checkVersion(fs, rootdir)) { String version = FSUtils.checkVersion(fs, rootdir);
if (version != null &&
version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) {
LOG.info("No upgrade necessary."); LOG.info("No upgrade necessary.");
return 0; return 0;
} }
// check to see if new root region dir exists // Get contents of root directory
FileStatus[] rootFiles = getRootDirFiles();
checkNewRootRegionDirExists(fs, rootdir); if (version == null) {
migrateFromNoVersion(rootFiles);
// check for "extra" files and for old upgradable regions migrateToV2(rootFiles);
} else if (version.compareTo("0.1") == 0) {
extraFiles(fs, rootdir); migrateToV2(rootFiles);
} else if (version.compareTo("2") == 0) {
if (!newRootRegion) { // Nothing to do (yet)
// find root region } else {
throw new IOException("Unrecognized version: " + version);
Path rootRegion = new Path(rootdir,
OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
if (!fs.exists(rootRegion)) {
throw new IOException("Cannot find root region " +
rootRegion.toString());
} else if (readOnly) {
migrationNeeded = true;
} else {
migrateRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
scanRootRegion(fs, rootdir);
// scan for left over regions
extraRegions(fs, rootdir);
}
} }
if (!readOnly) { if (!readOnly) {
// set file system version // set file system version
LOG.info("Setting file system version."); LOG.info("Setting file system version.");
@ -207,21 +197,85 @@ public class Migrate extends Configured implements Tool {
} }
} }
private void checkNewRootRegionDirExists(FileSystem fs, Path rootdir) private void migrateFromNoVersion(FileStatus[] rootFiles) throws IOException {
throws IOException { LOG.info("No file system version found. Checking to see if file system " +
Path rootRegionDir = "is at revision 0.1");
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
newRootRegion = fs.exists(rootRegionDir); // check to see if new root region dir exists
migrationNeeded = !newRootRegion;
checkNewRootRegionDirExists();
// check for unrecovered region server log files
checkForUnrecoveredLogFiles(rootFiles);
// check for "extra" files and for old upgradable regions
extraFiles(rootFiles);
if (!newRootRegion) {
// find root region
Path rootRegion = new Path(rootdir,
OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
if (!fs.exists(rootRegion)) {
throw new IOException("Cannot find root region " +
rootRegion.toString());
} else if (readOnly) {
migrationNeeded = true;
} else {
migrateRegionDir(HConstants.ROOT_TABLE_NAME, rootRegion);
scanRootRegion();
// scan for left over regions
extraRegions();
}
}
} }
// Check for files that should not be there or should be migrated private void migrateToV2(FileStatus[] rootFiles) throws IOException {
private void extraFiles(FileSystem fs, Path rootdir) throws IOException { LOG.info("Checking to see if file system is at revision 2.");
checkForUnrecoveredLogFiles(rootFiles);
}
private FileStatus[] getRootDirFiles() throws IOException {
FileStatus[] stats = fs.listStatus(rootdir); FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) { if (stats == null || stats.length == 0) {
throw new IOException("No files found under root directory " + throw new IOException("No files found under root directory " +
rootdir.toString()); rootdir.toString());
} }
return stats;
}
private void checkNewRootRegionDirExists() throws IOException {
Path rootRegionDir =
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
newRootRegion = fs.exists(rootRegionDir);
migrationNeeded = !newRootRegion;
}
private void checkForUnrecoveredLogFiles(FileStatus[] rootFiles)
throws IOException {
List<String> unrecoveredLogs = new ArrayList<String>();
for (int i = 0; i < rootFiles.length; i++) {
String name = rootFiles[i].getPath().getName();
if (name.startsWith("log_")) {
unrecoveredLogs.add(name);
}
}
if (unrecoveredLogs.size() != 0) {
throw new IOException("There are " + unrecoveredLogs.size() +
" unrecovered region server logs. Please uninstall this version of " +
"HBase, re-install the previous version, start your cluster and " +
"shut it down cleanly, so that all region server logs are recovered" +
" and deleted.");
}
}
// Check for files that should not be there or should be migrated
private void extraFiles(FileStatus[] stats) throws IOException {
for (int i = 0; i < stats.length; i++) { for (int i = 0; i < stats.length; i++) {
String name = stats[i].getPath().getName(); String name = stats[i].getPath().getName();
if (name.startsWith(OLD_PREFIX)) { if (name.startsWith(OLD_PREFIX)) {
@ -234,31 +288,27 @@ public class Migrate extends Configured implements Tool {
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
extraFile(otherFiles, "Old region format can not be upgraded: " + extraFile(otherFiles, "Old region format can not be upgraded: " +
name, fs, stats[i].getPath()); name, stats[i].getPath());
} }
} else { } else {
// Since the new root region directory exists, we assume that this // Since the new root region directory exists, we assume that this
// directory is not necessary // directory is not necessary
extraFile(otherFiles, "Old region directory found: " + name, fs, extraFile(otherFiles, "Old region directory found: " + name,
stats[i].getPath()); stats[i].getPath());
} }
} else { } else {
// File name does not start with "hregion_" // File name does not start with "hregion_"
if (name.startsWith("log_")) { if (!newRootRegion) {
String message = "Unrecovered region server log file " + name +
" this file can be recovered by the master when it starts.";
extraFile(logFiles, message, fs, stats[i].getPath());
} else if (!newRootRegion) {
// new root region directory does not exist. This is an extra file // new root region directory does not exist. This is an extra file
String message = "Unrecognized file " + name; String message = "Unrecognized file " + name;
extraFile(otherFiles, message, fs, stats[i].getPath()); extraFile(otherFiles, message, stats[i].getPath());
} }
} }
} }
} }
private void extraFile(ACTION action, String message, FileSystem fs, private void extraFile(ACTION action, String message, Path p)
Path p) throws IOException { throws IOException {
if (action == ACTION.ABORT) { if (action == ACTION.ABORT) {
throw new IOException(message + " aborting"); throw new IOException(message + " aborting");
@ -277,8 +327,8 @@ public class Migrate extends Configured implements Tool {
} }
} }
private void migrateRegionDir(FileSystem fs, Path rootdir, Text tableName, private void migrateRegionDir(Text tableName, Path oldPath)
Path oldPath) throws IOException { throws IOException {
// Create directory where table will live // Create directory where table will live
@ -323,7 +373,7 @@ public class Migrate extends Configured implements Tool {
} }
} }
private void scanRootRegion(FileSystem fs, Path rootdir) throws IOException { private void scanRootRegion() throws IOException {
HLog log = new HLog(fs, new Path(rootdir, HConstants.HREGION_LOGDIR_NAME), HLog log = new HLog(fs, new Path(rootdir, HConstants.HREGION_LOGDIR_NAME),
conf, null); conf, null);
@ -354,12 +404,12 @@ public class Migrate extends Configured implements Tool {
// First move the meta region to where it should be and rename // First move the meta region to where it should be and rename
// subdirectories as necessary // subdirectories as necessary
migrateRegionDir(fs, rootdir, HConstants.META_TABLE_NAME, migrateRegionDir(HConstants.META_TABLE_NAME,
new Path(rootdir, OLD_PREFIX + info.getEncodedName())); new Path(rootdir, OLD_PREFIX + info.getEncodedName()));
// Now scan and process the meta table // Now scan and process the meta table
scanMetaRegion(fs, rootdir, log, info); scanMetaRegion(log, info);
} }
} finally { } finally {
@ -375,8 +425,7 @@ public class Migrate extends Configured implements Tool {
} }
} }
private void scanMetaRegion(FileSystem fs, Path rootdir, HLog log, private void scanMetaRegion(HLog log, HRegionInfo info) throws IOException {
HRegionInfo info) throws IOException {
HRegion metaRegion = new HRegion( HRegion metaRegion = new HRegion(
new Path(rootdir, info.getTableDesc().getName().toString()), log, fs, new Path(rootdir, info.getTableDesc().getName().toString()), log, fs,
@ -402,7 +451,7 @@ public class Migrate extends Configured implements Tool {
// Move the region to where it should be and rename // Move the region to where it should be and rename
// subdirectories as necessary // subdirectories as necessary
migrateRegionDir(fs, rootdir, region.getTableDesc().getName(), migrateRegionDir(region.getTableDesc().getName(),
new Path(rootdir, OLD_PREFIX + region.getEncodedName())); new Path(rootdir, OLD_PREFIX + region.getEncodedName()));
results.clear(); results.clear();
@ -417,7 +466,7 @@ public class Migrate extends Configured implements Tool {
} }
} }
private void extraRegions(FileSystem fs, Path rootdir) throws IOException { private void extraRegions() throws IOException {
FileStatus[] stats = fs.listStatus(rootdir); FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) { if (stats == null || stats.length == 0) {
throw new IOException("No files found under root directory " + throw new IOException("No files found under root directory " +
@ -436,7 +485,7 @@ public class Migrate extends Configured implements Tool {
message = message =
"Region not in meta table and no other regions reference it " + name; "Region not in meta table and no other regions reference it " + name;
} }
extraFile(otherFiles, message, fs, stats[i].getPath()); extraFile(otherFiles, message, stats[i].getPath());
} }
} }
} }
@ -444,18 +493,11 @@ public class Migrate extends Configured implements Tool {
@SuppressWarnings("static-access") @SuppressWarnings("static-access")
private int parseArgs(String[] args) { private int parseArgs(String[] args) {
Options opts = new Options(); Options opts = new Options();
Option logFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription(
"disposition of unrecovered region server logs: {abort|ignore|delete|prompt}")
.create("logfiles");
Option extraFiles = OptionBuilder.withArgName(ACTIONS) Option extraFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg() .hasArg()
.withDescription("disposition of 'extra' files: {abort|ignore|delete|prompt}") .withDescription("disposition of 'extra' files: {abort|ignore|delete|prompt}")
.create("extrafiles"); .create("extrafiles");
opts.addOption(logFiles);
opts.addOption(extraFiles); opts.addOption(extraFiles);
GenericOptionsParser parser = GenericOptionsParser parser =
@ -474,21 +516,12 @@ public class Migrate extends Configured implements Tool {
} }
if (readOnly) { if (readOnly) {
this.logFiles = ACTION.IGNORE;
this.otherFiles = ACTION.IGNORE; this.otherFiles = ACTION.IGNORE;
} else { } else {
CommandLine commandLine = parser.getCommandLine(); CommandLine commandLine = parser.getCommandLine();
ACTION action = null; ACTION action = null;
if (commandLine.hasOption("logfiles")) {
action = options.get(commandLine.getOptionValue("logfiles"));
if (action == null) {
usage();
return -1;
}
this.logFiles = action;
}
if (commandLine.hasOption("extrafiles")) { if (commandLine.hasOption("extrafiles")) {
action = options.get(commandLine.getOptionValue("extrafiles")); action = options.get(commandLine.getOptionValue("extrafiles"));
if (action == null) { if (action == null) {
@ -506,9 +539,6 @@ public class Migrate extends Configured implements Tool {
System.err.println(" check perform upgrade checks only."); System.err.println(" check perform upgrade checks only.");
System.err.println(" upgrade perform upgrade checks and modify hbase.\n"); System.err.println(" upgrade perform upgrade checks and modify hbase.\n");
System.err.println(" Options are:"); System.err.println(" Options are:");
System.err.println(" -logfiles={abort|ignore|delete|prompt}");
System.err.println(" action to take when unrecovered region");
System.err.println(" server log files are found.\n");
System.err.println(" -extrafiles={abort|ignore|delete|prompt}"); System.err.println(" -extrafiles={abort|ignore|delete|prompt}");
System.err.println(" action to take if \"extra\" files are found.\n"); System.err.println(" action to take if \"extra\" files are found.\n");
System.err.println(" -conf <configuration file> specify an application configuration file"); System.err.println(" -conf <configuration file> specify an application configuration file");

View File

@ -146,7 +146,6 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
r.flushcache(); r.flushcache();
} }
} }
region.compactIfNeeded();
region.close(); region.close();
region.getLog().closeAndDelete(); region.getLog().closeAndDelete();
region.getRegionInfo().setOffline(true); region.getRegionInfo().setOffline(true);

View File

@ -114,9 +114,10 @@ public class StaticTestEnvironment {
LOG.setLevel(logLevel); LOG.setLevel(logLevel);
if (!debugging) { if (!debugging) {
// Turn off all the filter logging unless debug is set. // Turn off all the and connection logging unless debug is set.
// It is way too noisy. // It is way too noisy.
Logger.getLogger("org.apache.hadoop.hbase.filter").setLevel(Level.INFO); Logger.getLogger("org.apache.hadoop.hbase.filter").setLevel(Level.INFO);
Logger.getLogger("org.apache.hadoop.hbase.client").setLevel(Level.INFO);
} }
// Enable mapreduce loggging for the mapreduce jobs. // Enable mapreduce loggging for the mapreduce jobs.
Logger.getLogger("org.apache.hadoop.mapred").setLevel(Level.DEBUG); Logger.getLogger("org.apache.hadoop.mapred").setLevel(Level.DEBUG);

View File

@ -26,6 +26,8 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.BatchUpdate;
/** Tests per-column bloom filters */ /** Tests per-column bloom filters */
public class TestBloomFilters extends HBaseClusterTestCase { public class TestBloomFilters extends HBaseClusterTestCase {
static final Log LOG = LogFactory.getLog(TestBloomFilters.class); static final Log LOG = LogFactory.getLog(TestBloomFilters.class);
@ -145,8 +147,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
/** constructor */ /** constructor */
public TestBloomFilters() { public TestBloomFilters() {
super(); super();
conf.set("hbase.hregion.memcache.flush.size", "100");// flush cache every 100 bytes
conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too
} }
/** /**
@ -191,9 +191,9 @@ public class TestBloomFilters extends HBaseClusterTestCase {
for(int i = 0; i < 100; i++) { for(int i = 0; i < 100; i++) {
Text row = rows[i]; Text row = rows[i];
String value = row.toString(); String value = row.toString();
long lockid = table.startUpdate(rows[i]); BatchUpdate b = new BatchUpdate(row);
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid); table.commit(b);
} }
try { try {
// Give cache flusher and log roller a chance to run // Give cache flusher and log roller a chance to run
@ -257,9 +257,9 @@ public class TestBloomFilters extends HBaseClusterTestCase {
for(int i = 0; i < 100; i++) { for(int i = 0; i < 100; i++) {
Text row = rows[i]; Text row = rows[i];
String value = row.toString(); String value = row.toString();
long lockid = table.startUpdate(rows[i]); BatchUpdate b = new BatchUpdate(row);
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid); table.commit(b);
} }
try { try {
// Give cache flusher and log roller a chance to run // Give cache flusher and log roller a chance to run

View File

@ -30,6 +30,7 @@ import java.util.TreeMap;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.BatchUpdate;
/** test the scanner API at all levels */ /** test the scanner API at all levels */
public class TestScannerAPI extends HBaseClusterTestCase { public class TestScannerAPI extends HBaseClusterTestCase {
@ -80,16 +81,16 @@ public class TestScannerAPI extends HBaseClusterTestCase {
HTable table = new HTable(conf, new Text(getName())); HTable table = new HTable(conf, new Text(getName()));
for (Map.Entry<Text, SortedMap<Text, byte[]>> row: values.entrySet()) { for (Map.Entry<Text, SortedMap<Text, byte[]>> row: values.entrySet()) {
long lockid = table.startUpdate(row.getKey()); BatchUpdate b = new BatchUpdate(row.getKey());
for (Map.Entry<Text, byte[]> val: row.getValue().entrySet()) { for (Map.Entry<Text, byte[]> val: row.getValue().entrySet()) {
table.put(lockid, val.getKey(), val.getValue()); b.put(val.getKey(), val.getValue());
} }
table.commit(lockid); table.commit(b);
} }
HRegion region = null; HRegion region = null;
try { try {
SortedMap<Text, HRegion> regions = Map<Text, HRegion> regions =
cluster.getRegionThreads().get(0).getRegionServer().getOnlineRegions(); cluster.getRegionThreads().get(0).getRegionServer().getOnlineRegions();
for (Map.Entry<Text, HRegion> e: regions.entrySet()) { for (Map.Entry<Text, HRegion> e: regions.entrySet()) {
if (!e.getValue().getRegionInfo().isMetaRegion()) { if (!e.getValue().getRegionInfo().isMetaRegion()) {