HADOOP-2377 Holding open MapFile.Readers is expensive, so use less of them
git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@602199 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
11df017a67
commit
bccf1dc26f
|
@ -94,6 +94,7 @@ Trunk (unreleased changes)
|
||||||
HADOOP-2299 Support inclusive scans (Bryan Duxbury via Stack)
|
HADOOP-2299 Support inclusive scans (Bryan Duxbury via Stack)
|
||||||
HADOOP-2333 Client side retries happen at the wrong level
|
HADOOP-2333 Client side retries happen at the wrong level
|
||||||
HADOOP-2357 Compaction cleanup; less deleting + prevent possible file leaks
|
HADOOP-2357 Compaction cleanup; less deleting + prevent possible file leaks
|
||||||
|
HADOOP-2377 Holding open MapFile.Readers is expensive, so use less of them
|
||||||
|
|
||||||
|
|
||||||
Release 0.15.1
|
Release 0.15.1
|
||||||
|
|
|
@ -153,7 +153,7 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.hregion.memcache.flush.size</name>
|
<name>hbase.hregion.memcache.flush.size</name>
|
||||||
<value>16777216</value>
|
<value>67108864</value>
|
||||||
<description>
|
<description>
|
||||||
A HRegion memcache will be flushed to disk if size of the memcache
|
A HRegion memcache will be flushed to disk if size of the memcache
|
||||||
exceeds this number of bytes. Value is checked by a thread that runs
|
exceeds this number of bytes. Value is checked by a thread that runs
|
||||||
|
@ -174,11 +174,10 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.hregion.max.filesize</name>
|
<name>hbase.hregion.max.filesize</name>
|
||||||
<value>67108864</value>
|
<value>268435456</value>
|
||||||
<description>
|
<description>
|
||||||
Maximum desired file size for an HRegion. If filesize exceeds
|
Maximum desired file size for an HRegion. If filesize exceeds
|
||||||
value + (value / 2), the HRegion is split in two. Default: 64M.
|
value + (value / 2), the HRegion is split in two. Default: 256M.
|
||||||
If too large, splits will take so long, clients timeout.
|
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
|
|
|
@ -88,7 +88,7 @@ public interface HConstants {
|
||||||
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
|
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
|
||||||
|
|
||||||
/** Default maximum file size */
|
/** Default maximum file size */
|
||||||
static final long DEFAULT_MAX_FILE_SIZE = 64 * 1024 * 1024; // 64MB
|
static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
||||||
|
|
||||||
// Always store the location of the root table's HRegion.
|
// Always store the location of the root table's HRegion.
|
||||||
// This HRegion is never split.
|
// This HRegion is never split.
|
||||||
|
|
|
@ -310,9 +310,9 @@ public class HRegion implements HConstants {
|
||||||
fs.delete(merges);
|
fs.delete(merges);
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default, we flush the cache when 16M.
|
// By default, we flush the cache when 64M.
|
||||||
this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size",
|
this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size",
|
||||||
1024*1024*16);
|
1024*1024*64);
|
||||||
this.flushListener = listener;
|
this.flushListener = listener;
|
||||||
this.blockingMemcacheSize = this.memcacheFlushSize *
|
this.blockingMemcacheSize = this.memcacheFlushSize *
|
||||||
conf.getInt("hbase.hregion.memcache.block.multiplier", 2);
|
conf.getInt("hbase.hregion.memcache.block.multiplier", 2);
|
||||||
|
|
|
@ -504,14 +504,19 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
*/
|
*/
|
||||||
static Reference readSplitInfo(final Path p, final FileSystem fs)
|
static Reference readSplitInfo(final Path p, final FileSystem fs)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
Reference r = null;
|
||||||
FSDataInputStream in = fs.open(p);
|
FSDataInputStream in = fs.open(p);
|
||||||
String rn = in.readUTF();
|
try {
|
||||||
HStoreKey midkey = new HStoreKey();
|
String rn = in.readUTF();
|
||||||
midkey.readFields(in);
|
HStoreKey midkey = new HStoreKey();
|
||||||
long fid = in.readLong();
|
midkey.readFields(in);
|
||||||
boolean tmp = in.readBoolean();
|
long fid = in.readLong();
|
||||||
return new Reference(rn, fid, midkey, tmp? Range.top: Range.bottom);
|
boolean tmp = in.readBoolean();
|
||||||
|
r = new Reference(rn, fid, midkey, tmp? Range.top: Range.bottom);
|
||||||
|
} finally {
|
||||||
|
in.close();
|
||||||
|
}
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createOrFail(final FileSystem fs, final Path p)
|
private void createOrFail(final FileSystem fs, final Path p)
|
||||||
|
|
|
@ -64,15 +64,6 @@ import org.apache.log4j.Logger;
|
||||||
*
|
*
|
||||||
* <p>If number of clients > 1, we start up a MapReduce job. Each map task
|
* <p>If number of clients > 1, we start up a MapReduce job. Each map task
|
||||||
* runs an individual client. Each client does about 1GB of data.
|
* runs an individual client. Each client does about 1GB of data.
|
||||||
*
|
|
||||||
* <p>If client == 1, the test table is created and deleted at end of each run
|
|
||||||
* and the <code>sequentialWrite</code> test is run first if a test requires
|
|
||||||
* a populated test table: e.g. if you are running the
|
|
||||||
* <code>sequentialRead</code> test, the test table must hold data for it to
|
|
||||||
* read. If client > 1, and we are running clients in a map task, the table
|
|
||||||
* is not deleted at the end-of-run. Also, if running the
|
|
||||||
* <code>sequentialRead</code> or </code>randomRead</code> tests, the
|
|
||||||
* <code>sequentialWrite</code> test is not automatically run first.
|
|
||||||
*/
|
*/
|
||||||
public class PerformanceEvaluation implements HConstants {
|
public class PerformanceEvaluation implements HConstants {
|
||||||
static final Logger LOG =
|
static final Logger LOG =
|
||||||
|
@ -553,23 +544,10 @@ public class PerformanceEvaluation implements HConstants {
|
||||||
try {
|
try {
|
||||||
admin = new HBaseAdmin(this.conf);
|
admin = new HBaseAdmin(this.conf);
|
||||||
checkTable(admin);
|
checkTable(admin);
|
||||||
|
|
||||||
if (cmd.equals(RANDOM_READ) || cmd.equals(RANDOM_READ_MEM) ||
|
|
||||||
cmd.equals(SCAN) || cmd.equals(SEQUENTIAL_READ)) {
|
|
||||||
status.setStatus("Running " + SEQUENTIAL_WRITE + " first so " +
|
|
||||||
cmd + " has data to work against");
|
|
||||||
runOneClient(SEQUENTIAL_WRITE, 0, this.R, this.R, status);
|
|
||||||
}
|
|
||||||
|
|
||||||
runOneClient(cmd, 0, this.R, this.R, status);
|
runOneClient(cmd, 0, this.R, this.R, status);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("Failed", e);
|
LOG.error("Failed", e);
|
||||||
} finally {
|
}
|
||||||
LOG.info("Deleting table " + tableDescriptor.getName());
|
|
||||||
if (admin != null) {
|
|
||||||
admin.deleteTable(tableDescriptor.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void runTest(final String cmd) throws IOException {
|
private void runTest(final String cmd) throws IOException {
|
||||||
|
|
Loading…
Reference in New Issue