HBASE-4010 HMaster.createTable could be heavily optimized

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1138318 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-06-22 06:48:28 +00:00
parent 6dc0b59d44
commit 7f2792b11b
3 changed files with 87 additions and 25 deletions

View File

@ -47,6 +47,13 @@ import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
public class MetaEditor { public class MetaEditor {
private static final Log LOG = LogFactory.getLog(MetaEditor.class); private static final Log LOG = LogFactory.getLog(MetaEditor.class);
private static Put makePutFromRegionInfo(HRegionInfo regionInfo) throws IOException {
Put put = new Put(regionInfo.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(regionInfo));
return put;
}
/** /**
* Adds a META row for the specified new region. * Adds a META row for the specified new region.
* @param regionInfo region information * @param regionInfo region information
@ -55,14 +62,30 @@ public class MetaEditor {
public static void addRegionToMeta(CatalogTracker catalogTracker, public static void addRegionToMeta(CatalogTracker catalogTracker,
HRegionInfo regionInfo) HRegionInfo regionInfo)
throws IOException { throws IOException {
Put put = new Put(regionInfo.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(regionInfo));
catalogTracker.waitForMetaServerConnectionDefault().put( catalogTracker.waitForMetaServerConnectionDefault().put(
CatalogTracker.META_REGION, put); CatalogTracker.META_REGION, makePutFromRegionInfo(regionInfo));
LOG.info("Added region " + regionInfo.getRegionNameAsString() + " to META"); LOG.info("Added region " + regionInfo.getRegionNameAsString() + " to META");
} }
/**
* Adds a META row for each of the specified new regions.
* @param catalogTracker CatalogTracker
* @param regionInfos region information list
* @throws IOException if problem connecting or updating meta
*/
public static void addRegionsToMeta(CatalogTracker catalogTracker,
List<HRegionInfo> regionInfos)
throws IOException {
List<Put> puts = new ArrayList<Put>();
for (HRegionInfo regionInfo : regionInfos) {
puts.add(makePutFromRegionInfo(regionInfo));
LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META");
}
catalogTracker.waitForMetaServerConnectionDefault().put(
CatalogTracker.META_REGION, puts);
LOG.info("Added " + puts.size() + " regions to META");
}
/** /**
* Offline parent in meta. * Offline parent in meta.
* Used when splitting. * Used when splitting.

View File

@ -1,5 +1,5 @@
/** /**
* Copyright 2010 The Apache Software Foundation * Copyright 2011 The Apache Software Foundation
* *
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
@ -55,8 +54,8 @@ import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseRPC;
@ -76,16 +75,17 @@ import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.InfoServer;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ClusterId; import org.apache.hadoop.hbase.zookeeper.ClusterId;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
@ -975,25 +975,39 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
// do rename to move it into place? // do rename to move it into place?
FSUtils.createTableDescriptor(hTableDescriptor, conf); FSUtils.createTableDescriptor(hTableDescriptor, conf);
for (HRegionInfo newRegion : newRegions) { // 1. Set table enabling flag up in zk.
// 1. Set table enabling flag up in zk. try {
try { assignmentManager.getZKTable().setEnabledTable(tableName);
assignmentManager.getZKTable().setEnabledTable(tableName); } catch (KeeperException e) {
} catch (KeeperException e) { throw new IOException("Unable to ensure that the table will be" +
throw new IOException("Unable to ensure that the table will be" +
" enabled because of a ZooKeeper issue", e); " enabled because of a ZooKeeper issue", e);
} }
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
final int batchSize = this.conf.getInt("hbase.master.createtable.batchsize", 100);
HLog hlog = null;
for (int regionIdx = 0; regionIdx < newRegions.length; regionIdx++) {
HRegionInfo newRegion = newRegions[regionIdx];
// 2. Create HRegion // 2. Create HRegion
HRegion region = HRegion.createHRegion(newRegion, HRegion region = HRegion.createHRegion(newRegion,
fileSystemManager.getRootDir(), conf, hTableDescriptor); fileSystemManager.getRootDir(), conf, hTableDescriptor, hlog);
if (hlog == null) {
hlog = region.getLog();
}
// 3. Insert into META regionInfos.add(region.getRegionInfo());
MetaEditor.addRegionToMeta(catalogTracker, region.getRegionInfo()); if (regionIdx % batchSize == 0) {
// 3. Insert into META
MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
regionInfos.clear();
}
// 4. Close the new region to flush to disk. Close log file too. // 4. Close the new region to flush to disk. Close log file too.
region.close(); region.close();
region.getLog().closeAndDelete(); }
hlog.closeAndDelete();
if (regionInfos.size() > 0) {
MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
} }
// 5. Trigger immediate assignment of the regions in round-robin fashion // 5. Trigger immediate assignment of the regions in round-robin fashion

View File

@ -2778,19 +2778,44 @@ public class HRegion implements HeapSize { // , Writable{
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor) final Configuration conf, final HTableDescriptor hTableDescriptor)
throws IOException { throws IOException {
return createHRegion(info, rootDir, conf, hTableDescriptor, null);
}
/**
* Convenience method creating new HRegions. Used by createTable.
* The {@link HLog} for the created region needs to be closed explicitly.
* Use {@link HRegion#getLog()} to get access.
*
* @param info Info for region to create.
* @param rootDir Root directory for HBase instance
* @param conf
* @param hTableDescriptor
* @param hlog shared HLog
* @return new HRegion
*
* @throws IOException
*/
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf,
final HTableDescriptor hTableDescriptor,
final HLog hlog)
throws IOException {
LOG.info("creating HRegion " + info.getTableNameAsString() LOG.info("creating HRegion " + info.getTableNameAsString()
+ " HTD == " + hTableDescriptor + " RootDir = " + rootDir + + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
" Table name == " + info.getTableNameAsString()); " Table name == " + info.getTableNameAsString());
Path tableDir = Path tableDir =
HTableDescriptor.getTableDir(rootDir, info.getTableName()); HTableDescriptor.getTableDir(rootDir, info.getTableName());
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir); fs.mkdirs(regionDir);
HLog effectiveHLog = hlog;
if (hlog == null) {
effectiveHLog = new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf);
}
HRegion region = HRegion.newHRegion(tableDir, HRegion region = HRegion.newHRegion(tableDir,
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), effectiveHLog, fs, conf, info, hTableDescriptor, null);
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
fs, conf, info, hTableDescriptor, null);
region.initialize(); region.initialize();
return region; return region;
} }