HBASE-4010 HMaster.createTable could be heavily optimized
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1138318 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6dc0b59d44
commit
7f2792b11b
|
@ -47,6 +47,13 @@ import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
|
|||
public class MetaEditor {
|
||||
private static final Log LOG = LogFactory.getLog(MetaEditor.class);
|
||||
|
||||
private static Put makePutFromRegionInfo(HRegionInfo regionInfo) throws IOException {
|
||||
Put put = new Put(regionInfo.getRegionName());
|
||||
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||
Writables.getBytes(regionInfo));
|
||||
return put;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a META row for the specified new region.
|
||||
* @param regionInfo region information
|
||||
|
@ -55,14 +62,30 @@ public class MetaEditor {
|
|||
public static void addRegionToMeta(CatalogTracker catalogTracker,
|
||||
HRegionInfo regionInfo)
|
||||
throws IOException {
|
||||
Put put = new Put(regionInfo.getRegionName());
|
||||
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||
Writables.getBytes(regionInfo));
|
||||
catalogTracker.waitForMetaServerConnectionDefault().put(
|
||||
CatalogTracker.META_REGION, put);
|
||||
CatalogTracker.META_REGION, makePutFromRegionInfo(regionInfo));
|
||||
LOG.info("Added region " + regionInfo.getRegionNameAsString() + " to META");
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a META row for each of the specified new regions.
|
||||
* @param catalogTracker CatalogTracker
|
||||
* @param regionInfos region information list
|
||||
* @throws IOException if problem connecting or updating meta
|
||||
*/
|
||||
public static void addRegionsToMeta(CatalogTracker catalogTracker,
|
||||
List<HRegionInfo> regionInfos)
|
||||
throws IOException {
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
for (HRegionInfo regionInfo : regionInfos) {
|
||||
puts.add(makePutFromRegionInfo(regionInfo));
|
||||
LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META");
|
||||
}
|
||||
catalogTracker.waitForMetaServerConnectionDefault().put(
|
||||
CatalogTracker.META_REGION, puts);
|
||||
LOG.info("Added " + puts.size() + " regions to META");
|
||||
}
|
||||
|
||||
/**
|
||||
* Offline parent in meta.
|
||||
* Used when splitting.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
* Copyright 2011 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerLoad;
|
||||
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
|
||||
|
@ -55,8 +54,8 @@ import org.apache.hadoop.hbase.catalog.MetaEditor;
|
|||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.MetaScanner;
|
||||
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
|
@ -76,16 +75,17 @@ import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
|
|||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.replication.regionserver.Replication;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.InfoServer;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.zookeeper.ClusterId;
|
||||
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
|
||||
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
|
||||
|
@ -975,7 +975,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
|||
// do rename to move it into place?
|
||||
FSUtils.createTableDescriptor(hTableDescriptor, conf);
|
||||
|
||||
for (HRegionInfo newRegion : newRegions) {
|
||||
// 1. Set table enabling flag up in zk.
|
||||
try {
|
||||
assignmentManager.getZKTable().setEnabledTable(tableName);
|
||||
|
@ -984,16 +983,31 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
|||
" enabled because of a ZooKeeper issue", e);
|
||||
}
|
||||
|
||||
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
|
||||
final int batchSize = this.conf.getInt("hbase.master.createtable.batchsize", 100);
|
||||
HLog hlog = null;
|
||||
for (int regionIdx = 0; regionIdx < newRegions.length; regionIdx++) {
|
||||
HRegionInfo newRegion = newRegions[regionIdx];
|
||||
// 2. Create HRegion
|
||||
HRegion region = HRegion.createHRegion(newRegion,
|
||||
fileSystemManager.getRootDir(), conf, hTableDescriptor);
|
||||
fileSystemManager.getRootDir(), conf, hTableDescriptor, hlog);
|
||||
if (hlog == null) {
|
||||
hlog = region.getLog();
|
||||
}
|
||||
|
||||
regionInfos.add(region.getRegionInfo());
|
||||
if (regionIdx % batchSize == 0) {
|
||||
// 3. Insert into META
|
||||
MetaEditor.addRegionToMeta(catalogTracker, region.getRegionInfo());
|
||||
MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
|
||||
regionInfos.clear();
|
||||
}
|
||||
|
||||
// 4. Close the new region to flush to disk. Close log file too.
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
hlog.closeAndDelete();
|
||||
if (regionInfos.size() > 0) {
|
||||
MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
|
||||
}
|
||||
|
||||
// 5. Trigger immediate assignment of the regions in round-robin fashion
|
||||
|
|
|
@ -2778,6 +2778,28 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
|
||||
final Configuration conf, final HTableDescriptor hTableDescriptor)
|
||||
throws IOException {
|
||||
return createHRegion(info, rootDir, conf, hTableDescriptor, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method creating new HRegions. Used by createTable.
|
||||
* The {@link HLog} for the created region needs to be closed explicitly.
|
||||
* Use {@link HRegion#getLog()} to get access.
|
||||
*
|
||||
* @param info Info for region to create.
|
||||
* @param rootDir Root directory for HBase instance
|
||||
* @param conf
|
||||
* @param hTableDescriptor
|
||||
* @param hlog shared HLog
|
||||
* @return new HRegion
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
|
||||
final Configuration conf,
|
||||
final HTableDescriptor hTableDescriptor,
|
||||
final HLog hlog)
|
||||
throws IOException {
|
||||
LOG.info("creating HRegion " + info.getTableNameAsString()
|
||||
+ " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
|
||||
" Table name == " + info.getTableNameAsString());
|
||||
|
@ -2787,10 +2809,13 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
fs.mkdirs(regionDir);
|
||||
HLog effectiveHLog = hlog;
|
||||
if (hlog == null) {
|
||||
effectiveHLog = new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
|
||||
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf);
|
||||
}
|
||||
HRegion region = HRegion.newHRegion(tableDir,
|
||||
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
|
||||
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
|
||||
fs, conf, info, hTableDescriptor, null);
|
||||
effectiveHLog, fs, conf, info, hTableDescriptor, null);
|
||||
region.initialize();
|
||||
return region;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue