diff --git a/src/java/org/apache/hadoop/hbase/HConstants.java b/src/java/org/apache/hadoop/hbase/HConstants.java
index ce1399ca0d2..f409252a875 100644
--- a/src/java/org/apache/hadoop/hbase/HConstants.java
+++ b/src/java/org/apache/hadoop/hbase/HConstants.java
@@ -88,7 +88,7 @@ public interface HConstants {
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
/** Default maximum file size */
- static final long DEFAULT_MAX_FILE_SIZE = 64 * 1024 * 1024; // 64MB
+ static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
// Always store the location of the root table's HRegion.
// This HRegion is never split.
diff --git a/src/java/org/apache/hadoop/hbase/HRegion.java b/src/java/org/apache/hadoop/hbase/HRegion.java
index 62c1f9babed..a483b570ec6 100644
--- a/src/java/org/apache/hadoop/hbase/HRegion.java
+++ b/src/java/org/apache/hadoop/hbase/HRegion.java
@@ -310,9 +310,9 @@ public class HRegion implements HConstants {
fs.delete(merges);
}
- // By default, we flush the cache when 16M.
+ // By default, we flush the cache when 64M.
this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size",
- 1024*1024*16);
+ 1024*1024*64);
this.flushListener = listener;
this.blockingMemcacheSize = this.memcacheFlushSize *
conf.getInt("hbase.hregion.memcache.block.multiplier", 2);
diff --git a/src/java/org/apache/hadoop/hbase/HStoreFile.java b/src/java/org/apache/hadoop/hbase/HStoreFile.java
index 323a2bdb972..137a611ce1b 100644
--- a/src/java/org/apache/hadoop/hbase/HStoreFile.java
+++ b/src/java/org/apache/hadoop/hbase/HStoreFile.java
@@ -504,14 +504,19 @@ public class HStoreFile implements HConstants, WritableComparable {
*/
static Reference readSplitInfo(final Path p, final FileSystem fs)
throws IOException {
+ Reference r = null;
FSDataInputStream in = fs.open(p);
- String rn = in.readUTF();
- HStoreKey midkey = new HStoreKey();
- midkey.readFields(in);
- long fid = in.readLong();
- boolean tmp = in.readBoolean();
- return new Reference(rn, fid, midkey, tmp? Range.top: Range.bottom);
-
+ try {
+ String rn = in.readUTF();
+ HStoreKey midkey = new HStoreKey();
+ midkey.readFields(in);
+ long fid = in.readLong();
+ boolean tmp = in.readBoolean();
+ r = new Reference(rn, fid, midkey, tmp? Range.top: Range.bottom);
+ } finally {
+ in.close();
+ }
+ return r;
}
private void createOrFail(final FileSystem fs, final Path p)
diff --git a/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java b/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 71fce068f9b..90c4afa8731 100644
--- a/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -64,15 +64,6 @@ import org.apache.log4j.Logger;
*
* If number of clients > 1, we start up a MapReduce job. Each map task
* runs an individual client. Each client does about 1GB of data.
- *
- *
If client == 1, the test table is created and deleted at end of each run
- * and the sequentialWrite
test is run first if a test requires
- * a populated test table: e.g. if you are running the
- * sequentialRead
test, the test table must hold data for it to
- * read. If client > 1, and we are running clients in a map task, the table
- * is not deleted at the end-of-run. Also, if running the
- * sequentialRead
or randomRead tests, the
- * sequentialWrite
test is not automatically run first.
*/
public class PerformanceEvaluation implements HConstants {
static final Logger LOG =
@@ -553,23 +544,10 @@ public class PerformanceEvaluation implements HConstants {
try {
admin = new HBaseAdmin(this.conf);
checkTable(admin);
-
- if (cmd.equals(RANDOM_READ) || cmd.equals(RANDOM_READ_MEM) ||
- cmd.equals(SCAN) || cmd.equals(SEQUENTIAL_READ)) {
- status.setStatus("Running " + SEQUENTIAL_WRITE + " first so " +
- cmd + " has data to work against");
- runOneClient(SEQUENTIAL_WRITE, 0, this.R, this.R, status);
- }
-
runOneClient(cmd, 0, this.R, this.R, status);
} catch (Exception e) {
LOG.error("Failed", e);
- } finally {
- LOG.info("Deleting table " + tableDescriptor.getName());
- if (admin != null) {
- admin.deleteTable(tableDescriptor.getName());
- }
- }
+ }
}
private void runTest(final String cmd) throws IOException {