HBASE-2451 .META. by-passes cache; BLOCKCACHE=>'false'
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@934654 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b9aeb3e125
commit
2a8ed43ac1
|
@ -18,8 +18,7 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
# Script adds a table back to a running hbase.
|
||||
# Currently only works on a copied aside table.
|
||||
# You cannot parse arbitrary table name.
|
||||
# Currently only works on if table data is in place.
|
||||
#
|
||||
# To see usage for this script, run:
|
||||
#
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
# Set in_memory=true and blockcache=true on catalog tables.
|
||||
# The .META. and -ROOT- tables can be created with caching and
|
||||
# in_memory set to false. You want them set to true so that
|
||||
# these hot tables make it into cache. To see if the
|
||||
# .META. table has BLOCKCACHE set, in the shell do the following:
|
||||
#
|
||||
# hbase> scan '-ROOT-'
|
||||
#
|
||||
# Look for the 'info' column family. See if BLOCKCACHE => 'true'?
|
||||
# If not, run this script and it will set the value to true.
|
||||
# Setting cache to 'true' will only take effect on region restart
|
||||
# of if you close the .META. region -- *disruptive* -- and have
|
||||
# it deploy elsewhere. This script runs against an up and running
|
||||
# hbase instance.
|
||||
#
|
||||
# To see usage for this script, run:
|
||||
#
|
||||
# ${HBASE_HOME}/bin/hbase org.jruby.Main set_meta_block_caching.rb
|
||||
#
|
||||
include Java
|
||||
import org.apache.hadoop.hbase.util.Bytes
|
||||
import org.apache.hadoop.hbase.HConstants
|
||||
import org.apache.hadoop.hbase.HRegionInfo
|
||||
import org.apache.hadoop.hbase.client.HTable
|
||||
import org.apache.hadoop.hbase.client.Delete
|
||||
import org.apache.hadoop.hbase.client.Put
|
||||
import org.apache.hadoop.hbase.client.Scan
|
||||
import org.apache.hadoop.hbase.HTableDescriptor
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration
|
||||
import org.apache.hadoop.hbase.util.FSUtils
|
||||
import org.apache.hadoop.hbase.util.Writables
|
||||
import org.apache.hadoop.fs.Path
|
||||
import org.apache.hadoop.fs.FileSystem
|
||||
import org.apache.commons.logging.LogFactory
|
||||
|
||||
# Name of this script
|
||||
NAME = "set_meta_block_caching.rb"
|
||||
|
||||
|
||||
# Print usage for this script
|
||||
def usage
|
||||
puts 'Usage: %s.rb]' % NAME
|
||||
exit!
|
||||
end
|
||||
|
||||
# Get configuration to use.
|
||||
c = HBaseConfiguration.new()
|
||||
|
||||
# Set hadoop filesystem configuration using the hbase.rootdir.
|
||||
# Otherwise, we'll always use localhost though the hbase.rootdir
|
||||
# might be pointing at hdfs location.
|
||||
c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
|
||||
fs = FileSystem.get(c)
|
||||
|
||||
# Get a logger and a metautils instance.
|
||||
LOG = LogFactory.getLog(NAME)
|
||||
|
||||
# Check arguments
|
||||
if ARGV.size > 0
|
||||
usage
|
||||
end
|
||||
|
||||
# Clean mentions of table from .META.
|
||||
# Scan the .META. and remove all lines that begin with tablename
|
||||
metaTable = HTable.new(c, HConstants::ROOT_TABLE_NAME)
|
||||
scan = Scan.new()
|
||||
scan.addColumn(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER);
|
||||
scanner = metaTable.getScanner(scan)
|
||||
while (result = scanner.next())
|
||||
rowid = Bytes.toString(result.getRow())
|
||||
rowidStr = java.lang.String.new(rowid)
|
||||
LOG.info("Setting BLOCKCACHE and IN_MEMORY on: " + rowidStr);
|
||||
hriValue = result.getValue(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER)
|
||||
hri = Writables.getHRegionInfo(hriValue)
|
||||
family = hri.getTableDesc().getFamily(HConstants::CATALOG_FAMILY)
|
||||
family.setBlockCacheEnabled(true)
|
||||
family.setInMemory(true)
|
||||
p = Put.new(result.getRow())
|
||||
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri));
|
||||
metaTable.put(p)
|
||||
end
|
||||
scanner.close()
|
|
@ -666,7 +666,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
HConstants.ROOT_TABLE_NAME,
|
||||
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY,
|
||||
10, // Ten is arbitrary number. Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
|
||||
HConstants.FOREVER, false, HConstants.REPLICATION_SCOPE_LOCAL) });
|
||||
|
||||
/** Table descriptor for <code>.META.</code> catalog table */
|
||||
|
@ -674,7 +674,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY,
|
||||
10, // Ten is arbitrary number. Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
|
||||
HConstants.FOREVER, false, HConstants.REPLICATION_SCOPE_LOCAL),
|
||||
new HColumnDescriptor(HConstants.CATALOG_HISTORIAN_FAMILY,
|
||||
HConstants.ALL_VERSIONS, Compression.Algorithm.NONE.getName(),
|
||||
|
|
|
@ -249,19 +249,20 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
// created here in bootstap and it'll need to be cleaned up. Better to
|
||||
// not make it in first place. Turn off block caching for bootstrap.
|
||||
// Enable after.
|
||||
setBlockCaching(HRegionInfo.ROOT_REGIONINFO, false);
|
||||
setBlockCaching(HRegionInfo.FIRST_META_REGIONINFO, false);
|
||||
HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rd, c);
|
||||
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
rd, c);
|
||||
HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
|
||||
setInfoFamilyCaching(rootHRI, false);
|
||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||
setInfoFamilyCaching(metaHRI, false);
|
||||
HRegion root = HRegion.createHRegion(rootHRI, rd, c);
|
||||
HRegion meta = HRegion.createHRegion(metaHRI, rd, c);
|
||||
setInfoFamilyCaching(rootHRI, true);
|
||||
setInfoFamilyCaching(metaHRI, true);
|
||||
// Add first region from the META table to the ROOT region.
|
||||
HRegion.addRegionToMETA(root, meta);
|
||||
root.close();
|
||||
root.getLog().closeAndDelete();
|
||||
meta.close();
|
||||
meta.getLog().closeAndDelete();
|
||||
setBlockCaching(HRegionInfo.ROOT_REGIONINFO, true);
|
||||
setBlockCaching(HRegionInfo.FIRST_META_REGIONINFO, true);
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.error("bootstrap", e);
|
||||
|
@ -273,9 +274,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* @param hri Set all family block caching to <code>b</code>
|
||||
* @param b
|
||||
*/
|
||||
private static void setBlockCaching(final HRegionInfo hri, final boolean b) {
|
||||
private static void setInfoFamilyCaching(final HRegionInfo hri, final boolean b) {
|
||||
for (HColumnDescriptor hcd: hri.getTableDesc().families.values()) {
|
||||
hcd.setBlockCacheEnabled(b);
|
||||
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
||||
hcd.setBlockCacheEnabled(b);
|
||||
hcd.setInMemory(b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue