HBASE-745 scaling of one regionserver, improving memory and cpu usage
HBASE-757 TestMetaUtils failing on hudson git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@678650 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
27f37b6365
commit
d02ab894e9
20
CHANGES.txt
20
CHANGES.txt
|
@ -198,6 +198,14 @@ Trunk (unreleased changes)
|
||||||
HBASE-679 Regionserver addresses are still not right in the new tables page
|
HBASE-679 Regionserver addresses are still not right in the new tables page
|
||||||
HBASE-758 Throwing IOE read-only when should be throwing NSRE
|
HBASE-758 Throwing IOE read-only when should be throwing NSRE
|
||||||
HBASE-743 bin/hbase migrate upgrade fails when redo logs exists
|
HBASE-743 bin/hbase migrate upgrade fails when redo logs exists
|
||||||
|
HBASE-754 The JRuby shell documentation is wrong in "get" and "put"
|
||||||
|
(Jean-Daniel Cryans via Stack)
|
||||||
|
HBASE-756 In HBase shell, the put command doesn't process the timestamp
|
||||||
|
(Jean-Daniel Cryans via Stack)
|
||||||
|
HBASE-757 REST mangles table names (Sishen via Stack)
|
||||||
|
HBASE-706 On OOME, regionserver sticks around and doesn't go down with cluster
|
||||||
|
(Jean-Daniel Cryans via Stack)
|
||||||
|
HBASE-759 TestMetaUtils failing on hudson
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-559 MR example job to count table rows
|
HBASE-559 MR example job to count table rows
|
||||||
|
@ -294,18 +302,12 @@ Trunk (unreleased changes)
|
||||||
(Jean-Daniel Cryans via Stack)
|
(Jean-Daniel Cryans via Stack)
|
||||||
HBASE-731 Add a meta refresh tag to the Web ui for master and region server
|
HBASE-731 Add a meta refresh tag to the Web ui for master and region server
|
||||||
(Jean-Daniel Cryans via Stack)
|
(Jean-Daniel Cryans via Stack)
|
||||||
HBASE-706 On OOME, regionserver sticks around and doesn't go down with cluster
|
HBASE-735 hbase shell doesn't trap CTRL-C signal (Jean-Daniel Cryans via Stack)
|
||||||
(Jean-Daniel Cryans via Stack)
|
|
||||||
HBASE-735 hbase shell doesn't trap CTRL-C signal
|
|
||||||
(Jean-Daniel Cryans via Stack)
|
|
||||||
HBASE-730 On startup, rinse STARTCODE and SERVER from .META.
|
HBASE-730 On startup, rinse STARTCODE and SERVER from .META.
|
||||||
(Jean-Daniel Cryans via Stack)
|
(Jean-Daniel Cryans via Stack)
|
||||||
HBASE-738 overview.html in need of updating (Izaak Rubin via Stack)
|
HBASE-738 overview.html in need of updating (Izaak Rubin via Stack)
|
||||||
HBASE-754 The JRuby shell documentation is wrong in "get" and "put"
|
HBASE-745 scaling of one regionserver, improving memory and cpu usage (partial)
|
||||||
(Jean-Daniel Cryans via Stack)
|
(LN via Stack)
|
||||||
HBASE-756 In HBase shell, the put command doesn't process the timestamp
|
|
||||||
(Jean-Daniel Cryans via Stack)
|
|
||||||
HBASE-757 REST mangles table names (Sishen via Stack)
|
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
HBASE-47 Option to set TTL for columns in hbase
|
HBASE-47 Option to set TTL for columns in hbase
|
||||||
|
|
|
@ -153,7 +153,6 @@ class CompactSplitThread extends Thread implements HConstants {
|
||||||
}
|
}
|
||||||
t = meta;
|
t = meta;
|
||||||
}
|
}
|
||||||
LOG.info("Updating " + Bytes.toString(t.getTableName()) + " with region split info");
|
|
||||||
|
|
||||||
// Mark old region as offline and split in META.
|
// Mark old region as offline and split in META.
|
||||||
// NOTE: there is no need for retry logic here. HTable does it for us.
|
// NOTE: there is no need for retry logic here. HTable does it for us.
|
||||||
|
@ -177,9 +176,6 @@ class CompactSplitThread extends Thread implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now tell the master about the new regions
|
// Now tell the master about the new regions
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Reporting region split to master");
|
|
||||||
}
|
|
||||||
server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
|
server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
|
||||||
newRegions[1].getRegionInfo());
|
newRegions[1].getRegionInfo());
|
||||||
LOG.info("region split, META updated, and report to master all" +
|
LOG.info("region split, META updated, and report to master all" +
|
||||||
|
|
|
@ -1008,18 +1008,19 @@ public class HRegion implements HConstants {
|
||||||
*/
|
*/
|
||||||
private boolean internalFlushcache() throws IOException {
|
private boolean internalFlushcache() throws IOException {
|
||||||
final long startTime = System.currentTimeMillis();
|
final long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
// Clear flush flag.
|
// Clear flush flag.
|
||||||
this.flushRequested = false;
|
this.flushRequested = false;
|
||||||
|
|
||||||
// Record latest flush time
|
// Record latest flush time
|
||||||
this.lastFlushTime = startTime;
|
this.lastFlushTime = startTime;
|
||||||
|
// If nothing to flush, return and avoid logging start/stop flush.
|
||||||
|
if (this.memcacheSize.get() <= 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Started memcache flush for region " + this +
|
LOG.debug("Started memcache flush for region " + this +
|
||||||
". Current region memcache size " +
|
". Current region memcache size " +
|
||||||
StringUtils.humanReadableInt(this.memcacheSize.get()));
|
StringUtils.humanReadableInt(this.memcacheSize.get()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop updates while we snapshot the memcache of all stores. We only have
|
// Stop updates while we snapshot the memcache of all stores. We only have
|
||||||
// to do this for a moment. Its quick. The subsequent sequence id that
|
// to do this for a moment. Its quick. The subsequent sequence id that
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.io.EOFException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -699,6 +700,21 @@ public class HStore implements HConstants {
|
||||||
// Compaction
|
// Compaction
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @param files
|
||||||
|
* @return True if any of the files in <code>files</code> are References.
|
||||||
|
*/
|
||||||
|
private boolean hasReferences(Collection<HStoreFile> files) {
|
||||||
|
if (files != null && files.size() > 0) {
|
||||||
|
for (HStoreFile hsf: files) {
|
||||||
|
if (hsf.isReference()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compact the back-HStores. This method may take some time, so the calling
|
* Compact the back-HStores. This method may take some time, so the calling
|
||||||
* thread must be able to block for long periods.
|
* thread must be able to block for long periods.
|
||||||
|
@ -742,6 +758,40 @@ public class HStore implements HConstants {
|
||||||
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
|
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
|
||||||
return checkSplit();
|
return checkSplit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HBASE-745, preparing all store file size for incremental compacting selection.
|
||||||
|
int countOfFiles = filesToCompact.size();
|
||||||
|
long totalSize = 0;
|
||||||
|
long[] fileSizes = new long[countOfFiles];
|
||||||
|
long skipped = 0;
|
||||||
|
int point = 0;
|
||||||
|
for (int i = 0; i < countOfFiles; i++) {
|
||||||
|
HStoreFile file = filesToCompact.get(i);
|
||||||
|
Path path = file.getMapFilePath();
|
||||||
|
int len = 0;
|
||||||
|
for (FileStatus fstatus:fs.listStatus(path)) {
|
||||||
|
len += fstatus.getLen();
|
||||||
|
}
|
||||||
|
fileSizes[i] = len;
|
||||||
|
totalSize += len;
|
||||||
|
}
|
||||||
|
if (!force && !hasReferences(filesToCompact)) {
|
||||||
|
// Here we select files for incremental compaction.
|
||||||
|
// The rule is: if the largest(oldest) one is more than twice the
|
||||||
|
// size of the second, skip the largest, and continue to next...,
|
||||||
|
// until we meet the compactionThreshold limit.
|
||||||
|
for (point = 0; point < compactionThreshold - 1; point++) {
|
||||||
|
if (fileSizes[point] < fileSizes[point + 1] * 2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
skipped += fileSizes[point];
|
||||||
|
}
|
||||||
|
filesToCompact = new ArrayList<HStoreFile>(filesToCompact.subList(point,
|
||||||
|
countOfFiles));
|
||||||
|
LOG.info("Compaction size " + totalSize + ", skipped " + point +
|
||||||
|
", " + skipped);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We create a new list of MapFile.Reader objects so we don't screw up
|
* We create a new list of MapFile.Reader objects so we don't screw up
|
||||||
* the caching associated with the currently-loaded ones. Our iteration-
|
* the caching associated with the currently-loaded ones. Our iteration-
|
||||||
|
@ -794,10 +844,9 @@ public class HStore implements HConstants {
|
||||||
|
|
||||||
// Move the compaction into place.
|
// Move the compaction into place.
|
||||||
completeCompaction(filesToCompact, compactedOutputFile);
|
completeCompaction(filesToCompact, compactedOutputFile);
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Completed compaction of " + this.storeNameStr +
|
LOG.debug("Completed compaction of " + this.storeNameStr +
|
||||||
" store size is " + StringUtils.humanReadableInt(storeSize));
|
" store size is " + StringUtils.humanReadableInt(storeSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return checkSplit();
|
return checkSplit();
|
||||||
|
|
|
@ -105,6 +105,7 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
assertTrue(cellValues.length == 3);
|
assertTrue(cellValues.length == 3);
|
||||||
r.flushcache();
|
r.flushcache();
|
||||||
r.compactStores();
|
r.compactStores();
|
||||||
|
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
|
||||||
// Now assert that there are 4 versions of a record only: thats the
|
// Now assert that there are 4 versions of a record only: thats the
|
||||||
// 3 versions that should be in the compacted store and then the one more
|
// 3 versions that should be in the compacted store and then the one more
|
||||||
// we added when we flushed. But could be 3 only if the flush happened
|
// we added when we flushed. But could be 3 only if the flush happened
|
||||||
|
@ -132,6 +133,7 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
// Assert all delted.
|
// Assert all delted.
|
||||||
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||||
r.flushcache();
|
r.flushcache();
|
||||||
|
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
|
||||||
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||||
// Add a bit of data and flush it so we for sure have the compaction limit
|
// Add a bit of data and flush it so we for sure have the compaction limit
|
||||||
// for store files. Usually by this time we will have but if compaction
|
// for store files. Usually by this time we will have but if compaction
|
||||||
|
@ -140,7 +142,9 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
// content to be certain.
|
// content to be certain.
|
||||||
createSmallerStoreFile(this.r);
|
createSmallerStoreFile(this.r);
|
||||||
r.flushcache();
|
r.flushcache();
|
||||||
|
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 3);
|
||||||
r.compactStores();
|
r.compactStores();
|
||||||
|
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
|
||||||
// Assert that the first row is still deleted.
|
// Assert that the first row is still deleted.
|
||||||
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||||
assertNull(cellValues);
|
assertNull(cellValues);
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.client.HConnectionManager;
|
||||||
|
|
||||||
|
|
||||||
public class TestMetaUtils extends HBaseClusterTestCase {
|
public class TestMetaUtils extends HBaseClusterTestCase {
|
||||||
|
@ -46,12 +47,15 @@ public class TestMetaUtils extends HBaseClusterTestCase {
|
||||||
utils.addColumn(editTable, new HColumnDescriptor(newColumn));
|
utils.addColumn(editTable, new HColumnDescriptor(newColumn));
|
||||||
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
|
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
|
||||||
utils.shutdown();
|
utils.shutdown();
|
||||||
|
// Delete again so we go get it all fresh.
|
||||||
|
HConnectionManager.deleteConnectionInfo();
|
||||||
// Now assert columns were added and deleted.
|
// Now assert columns were added and deleted.
|
||||||
this.cluster = new MiniHBaseCluster(this.conf, 1);
|
this.cluster = new MiniHBaseCluster(this.conf, 1);
|
||||||
|
// Now assert columns were added and deleted.
|
||||||
HTable t = new HTable(conf, editTable);
|
HTable t = new HTable(conf, editTable);
|
||||||
HTableDescriptor htd = t.getTableDescriptor();
|
HTableDescriptor htd = t.getTableDescriptor();
|
||||||
HColumnDescriptor hcd = htd.getFamily(newColumn);
|
HColumnDescriptor hcd = htd.getFamily(newColumn);
|
||||||
assertTrue(hcd != null);
|
assertTrue(hcd != null);
|
||||||
assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
|
assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue