diff --git a/CHANGES.txt b/CHANGES.txt
index 1ae637091bd..8fdaf51a8b4 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -198,6 +198,14 @@ Trunk (unreleased changes)
HBASE-679 Regionserver addresses are still not right in the new tables page
HBASE-758 Throwing IOE read-only when should be throwing NSRE
HBASE-743 bin/hbase migrate upgrade fails when redo logs exists
+ HBASE-754 The JRuby shell documentation is wrong in "get" and "put"
+ (Jean-Daniel Cryans via Stack)
+ HBASE-756 In HBase shell, the put command doesn't process the timestamp
+ (Jean-Daniel Cryans via Stack)
+ HBASE-757 REST mangles table names (Sishen via Stack)
+ HBASE-706 On OOME, regionserver sticks around and doesn't go down with cluster
+ (Jean-Daniel Cryans via Stack)
+ HBASE-759 TestMetaUtils failing on hudson
IMPROVEMENTS
HBASE-559 MR example job to count table rows
@@ -294,18 +302,12 @@ Trunk (unreleased changes)
(Jean-Daniel Cryans via Stack)
HBASE-731 Add a meta refresh tag to the Web ui for master and region server
(Jean-Daniel Cryans via Stack)
- HBASE-706 On OOME, regionserver sticks around and doesn't go down with cluster
- (Jean-Daniel Cryans via Stack)
- HBASE-735 hbase shell doesn't trap CTRL-C signal
- (Jean-Daniel Cryans via Stack)
+ HBASE-735 hbase shell doesn't trap CTRL-C signal (Jean-Daniel Cryans via Stack)
HBASE-730 On startup, rinse STARTCODE and SERVER from .META.
(Jean-Daniel Cryans via Stack)
HBASE-738 overview.html in need of updating (Izaak Rubin via Stack)
- HBASE-754 The JRuby shell documentation is wrong in "get" and "put"
- (Jean-Daniel Cryans via Stack)
- HBASE-756 In HBase shell, the put command doesn't process the timestamp
- (Jean-Daniel Cryans via Stack)
- HBASE-757 REST mangles table names (Sishen via Stack)
+ HBASE-745 scaling of one regionserver, improving memory and cpu usage (partial)
+ (LN via Stack)
NEW FEATURES
HBASE-47 Option to set TTL for columns in hbase
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 270fbdba1be..7807fc3ff89 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -153,7 +153,6 @@ class CompactSplitThread extends Thread implements HConstants {
}
t = meta;
}
- LOG.info("Updating " + Bytes.toString(t.getTableName()) + " with region split info");
// Mark old region as offline and split in META.
// NOTE: there is no need for retry logic here. HTable does it for us.
@@ -177,9 +176,6 @@ class CompactSplitThread extends Thread implements HConstants {
}
// Now tell the master about the new regions
- if (LOG.isDebugEnabled()) {
- LOG.debug("Reporting region split to master");
- }
server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
newRegions[1].getRegionInfo());
LOG.info("region split, META updated, and report to master all" +
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c67cdeb8d37..1417e7c99a8 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1008,18 +1008,19 @@ public class HRegion implements HConstants {
*/
private boolean internalFlushcache() throws IOException {
final long startTime = System.currentTimeMillis();
-
// Clear flush flag.
this.flushRequested = false;
-
// Record latest flush time
this.lastFlushTime = startTime;
-
+ // If nothing to flush, return and avoid logging start/stop flush.
+ if (this.memcacheSize.get() <= 0) {
+ return false;
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("Started memcache flush for region " + this +
". Current region memcache size " +
StringUtils.humanReadableInt(this.memcacheSize.get()));
- }
+ }
// Stop updates while we snapshot the memcache of all stores. We only have
// to do this for a moment. Its quick. The subsequent sequence id that
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
index b8304ea4f7e..df2fcebe8fa 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -23,6 +23,7 @@ import java.io.EOFException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -699,6 +700,21 @@ public class HStore implements HConstants {
// Compaction
//////////////////////////////////////////////////////////////////////////////
+ /*
+ * @param files
+ * @return True if any of the files in files
are References.
+ */
+ private boolean hasReferences(Collection files) {
+ if (files != null && files.size() > 0) {
+ for (HStoreFile hsf: files) {
+ if (hsf.isReference()) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
/**
* Compact the back-HStores. This method may take some time, so the calling
* thread must be able to block for long periods.
@@ -742,6 +758,40 @@ public class HStore implements HConstants {
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
return checkSplit();
}
+
+ // HBASE-745, preparing all store file size for incremental compacting selection.
+ int countOfFiles = filesToCompact.size();
+ long totalSize = 0;
+ long[] fileSizes = new long[countOfFiles];
+ long skipped = 0;
+ int point = 0;
+ for (int i = 0; i < countOfFiles; i++) {
+ HStoreFile file = filesToCompact.get(i);
+ Path path = file.getMapFilePath();
+ int len = 0;
+ for (FileStatus fstatus:fs.listStatus(path)) {
+ len += fstatus.getLen();
+ }
+ fileSizes[i] = len;
+ totalSize += len;
+ }
+ if (!force && !hasReferences(filesToCompact)) {
+ // Here we select files for incremental compaction.
+ // The rule is: if the largest(oldest) one is more than twice the
+ // size of the second, skip the largest, and continue to next...,
+ // until we meet the compactionThreshold limit.
+ for (point = 0; point < compactionThreshold - 1; point++) {
+ if (fileSizes[point] < fileSizes[point + 1] * 2) {
+ break;
+ }
+ skipped += fileSizes[point];
+ }
+ filesToCompact = new ArrayList(filesToCompact.subList(point,
+ countOfFiles));
+ LOG.info("Compaction size " + totalSize + ", skipped " + point +
+ ", " + skipped);
+ }
+
/*
* We create a new list of MapFile.Reader objects so we don't screw up
* the caching associated with the currently-loaded ones. Our iteration-
@@ -794,10 +844,9 @@ public class HStore implements HConstants {
// Move the compaction into place.
completeCompaction(filesToCompact, compactedOutputFile);
-
if (LOG.isDebugEnabled()) {
LOG.debug("Completed compaction of " + this.storeNameStr +
- " store size is " + StringUtils.humanReadableInt(storeSize));
+ " store size is " + StringUtils.humanReadableInt(storeSize));
}
}
return checkSplit();
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 164337ca96b..78ab7ec237d 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -105,6 +105,7 @@ public class TestCompaction extends HBaseTestCase {
assertTrue(cellValues.length == 3);
r.flushcache();
r.compactStores();
+ assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
// Now assert that there are 4 versions of a record only: thats the
// 3 versions that should be in the compacted store and then the one more
// we added when we flushed. But could be 3 only if the flush happened
@@ -132,6 +133,7 @@ public class TestCompaction extends HBaseTestCase {
// Assert all delted.
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
r.flushcache();
+ assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
// Add a bit of data and flush it so we for sure have the compaction limit
// for store files. Usually by this time we will have but if compaction
@@ -140,7 +142,9 @@ public class TestCompaction extends HBaseTestCase {
// content to be certain.
createSmallerStoreFile(this.r);
r.flushcache();
+ assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 3);
r.compactStores();
+ assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
// Assert that the first row is still deleted.
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
assertNull(cellValues);
diff --git a/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java b/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java
index ba15837ea3f..43e62ac82f5 100644
--- a/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java
+++ b/src/test/org/apache/hadoop/hbase/util/TestMetaUtils.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HConnectionManager;
public class TestMetaUtils extends HBaseClusterTestCase {
@@ -46,12 +47,15 @@ public class TestMetaUtils extends HBaseClusterTestCase {
utils.addColumn(editTable, new HColumnDescriptor(newColumn));
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
utils.shutdown();
+ // Delete again so we go get it all fresh.
+ HConnectionManager.deleteConnectionInfo();
// Now assert columns were added and deleted.
this.cluster = new MiniHBaseCluster(this.conf, 1);
+ // Now assert columns were added and deleted.
HTable t = new HTable(conf, editTable);
HTableDescriptor htd = t.getTableDescriptor();
HColumnDescriptor hcd = htd.getFamily(newColumn);
assertTrue(hcd != null);
assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
}
-}
\ No newline at end of file
+}