HBASE-629 Split reports incorrect elapsed time
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@657226 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
da3f56a058
commit
db289cb669
|
@ -14,6 +14,7 @@ Hbase Change Log
|
|||
HBASE-622 Remove StaticTestEnvironment and put a log4j.properties in src/test
|
||||
HBASE-624 Master will shut down if number of active region servers is zero
|
||||
even if shutdown was not requested
|
||||
HBASE-629 Split reports incorrect elapsed time
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-559 MR example job to count table rows
|
||||
|
|
|
@ -41,13 +41,11 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
/**
|
||||
* Compact region on request and then run split if appropriate
|
||||
*/
|
||||
class CompactSplitThread extends Thread
|
||||
implements RegionUnavailableListener, HConstants {
|
||||
class CompactSplitThread extends Thread implements HConstants {
|
||||
static final Log LOG = LogFactory.getLog(CompactSplitThread.class);
|
||||
|
||||
private HTable root = null;
|
||||
private HTable meta = null;
|
||||
private volatile long startTime;
|
||||
private final long frequency;
|
||||
private final ReentrantLock lock = new ReentrantLock();
|
||||
|
||||
|
@ -132,7 +130,8 @@ implements RegionUnavailableListener, HConstants {
|
|||
private void split(final HRegion region, final byte [] midKey)
|
||||
throws IOException {
|
||||
final HRegionInfo oldRegionInfo = region.getRegionInfo();
|
||||
final HRegion[] newRegions = region.splitRegion(this, midKey);
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final HRegion[] newRegions = region.splitRegion(midKey);
|
||||
if (newRegions == null) {
|
||||
// Didn't need to be split
|
||||
return;
|
||||
|
@ -190,16 +189,6 @@ implements RegionUnavailableListener, HConstants {
|
|||
// Do not serve the new regions. Let the Master assign them.
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void closing(@SuppressWarnings("unused") final byte [] regionName) {
|
||||
// continue
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void closed(@SuppressWarnings("unused") final byte [] regionName) {
|
||||
// continue
|
||||
}
|
||||
|
||||
/**
|
||||
* Only interrupt once it's done with a run through the work loop.
|
||||
*/
|
||||
|
|
|
@ -357,7 +357,7 @@ public class HRegion implements HConstants {
|
|||
new ReentrantReadWriteLock();
|
||||
private final Integer splitLock = new Integer(0);
|
||||
private final long minSequenceId;
|
||||
private final AtomicInteger activeScannerCount = new AtomicInteger(0);
|
||||
final AtomicInteger activeScannerCount = new AtomicInteger(0);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Constructor
|
||||
|
@ -525,7 +525,7 @@ public class HRegion implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public List<HStoreFile> close() throws IOException {
|
||||
return close(false, null);
|
||||
return close(false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -536,15 +536,13 @@ public class HRegion implements HConstants {
|
|||
* time-sensitive thread.
|
||||
*
|
||||
* @param abort true if server is aborting (only during testing)
|
||||
* @param listener call back to alert caller on close status
|
||||
* @return Vector of all the storage files that the HRegion's component
|
||||
* HStores make use of. It's a list of HStoreFile objects. Can be null if
|
||||
* we are not to close at this time or we are already closed.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
List<HStoreFile> close(boolean abort,
|
||||
final RegionUnavailableListener listener) throws IOException {
|
||||
List<HStoreFile> close(boolean abort) throws IOException {
|
||||
if (isClosed()) {
|
||||
LOG.warn("region " + this + " already closed");
|
||||
return null;
|
||||
|
@ -592,13 +590,6 @@ public class HRegion implements HConstants {
|
|||
waitOnRowLocks();
|
||||
LOG.debug("No more row locks outstanding on region " + this);
|
||||
|
||||
if (listener != null) {
|
||||
// If there is a listener, let them know that we have now
|
||||
// acquired all the necessary locks and are starting to
|
||||
// do the close
|
||||
listener.closing(getRegionName());
|
||||
}
|
||||
|
||||
// Don't flush the cache if we are aborting
|
||||
if (!abort) {
|
||||
internalFlushcache();
|
||||
|
@ -610,12 +601,6 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
this.closed.set(true);
|
||||
|
||||
if (listener != null) {
|
||||
// If there is a listener, tell them that the region is now
|
||||
// closed.
|
||||
listener.closed(getRegionName());
|
||||
}
|
||||
|
||||
LOG.info("closed " + this);
|
||||
return result;
|
||||
} finally {
|
||||
|
@ -707,13 +692,11 @@ public class HRegion implements HConstants {
|
|||
* current HRegion. Split should be fast since we don't rewrite store files
|
||||
* but instead create new 'reference' store files that read off the top and
|
||||
* bottom ranges of parent store files.
|
||||
* @param listener May be null.
|
||||
* @param midKey key on which to split region
|
||||
* @return two brand-new (and open) HRegions or null if a split is not needed
|
||||
* @throws IOException
|
||||
*/
|
||||
HRegion[] splitRegion(final RegionUnavailableListener listener,
|
||||
final byte [] midKey) throws IOException {
|
||||
HRegion[] splitRegion(final byte [] midKey) throws IOException {
|
||||
synchronized (splitLock) {
|
||||
if (closed.get()) {
|
||||
return null;
|
||||
|
@ -753,18 +736,12 @@ public class HRegion implements HConstants {
|
|||
// Now close the HRegion. Close returns all store files or null if not
|
||||
// supposed to close (? What to do in this case? Implement abort of close?)
|
||||
// Close also does wait on outstanding rows and calls a flush just-in-case.
|
||||
List<HStoreFile> hstoreFilesToSplit = close(false, listener);
|
||||
List<HStoreFile> hstoreFilesToSplit = close(false);
|
||||
if (hstoreFilesToSplit == null) {
|
||||
LOG.warn("Close came back null (Implement abort of close?)");
|
||||
throw new RuntimeException("close returned empty vector of HStoreFiles");
|
||||
}
|
||||
|
||||
// Tell listener that region is now closed and that they can therefore
|
||||
// clean up any outstanding references.
|
||||
if (listener != null) {
|
||||
listener.closed(this.getRegionName());
|
||||
}
|
||||
|
||||
// Split each store file.
|
||||
for(HStoreFile h: hstoreFilesToSplit) {
|
||||
// A reference to the bottom half of the hsf store file.
|
||||
|
|
|
@ -912,7 +912,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
LOG.debug("closing region " + Bytes.toString(region.getRegionName()));
|
||||
}
|
||||
try {
|
||||
region.close(abortRequested, null);
|
||||
region.close(abortRequested);
|
||||
} catch (IOException e) {
|
||||
LOG.error("error closing region " +
|
||||
Bytes.toString(region.getRegionName()),
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
|
||||
/**
|
||||
* Used as a callback mechanism so that an HRegion can notify the HRegionServer
|
||||
* of the different stages making an HRegion unavailable. Regions are made
|
||||
* unavailable during region split operations.
|
||||
*/
|
||||
public interface RegionUnavailableListener {
|
||||
/**
|
||||
* <code>regionName</code> is closing.
|
||||
* Listener should stop accepting new writes but can continue to service
|
||||
* outstanding transactions.
|
||||
* @param regionName
|
||||
*/
|
||||
public void closing(final byte [] regionName);
|
||||
|
||||
/**
|
||||
* <code>regionName</code> is closed and no longer available.
|
||||
* Listener should clean up any references to <code>regionName</code>
|
||||
* @param regionName
|
||||
*/
|
||||
public void closed(final byte [] regionName);
|
||||
}
|
|
@ -42,10 +42,8 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* A lot of the meta information for an HRegion now lives inside other
|
||||
* HRegions or in the HBaseMaster, so only basic testing is possible.
|
||||
*/
|
||||
public class TestHRegion extends HBaseTestCase
|
||||
implements RegionUnavailableListener {
|
||||
static final Logger LOG =
|
||||
Logger.getLogger(TestHRegion.class.getName());
|
||||
public class TestHRegion extends HBaseTestCase {
|
||||
static final Logger LOG = Logger.getLogger(TestHRegion.class);
|
||||
|
||||
/**
|
||||
* Since all the "tests" depend on the results of the previous test, they are
|
||||
|
@ -60,7 +58,6 @@ implements RegionUnavailableListener {
|
|||
badPuts();
|
||||
basic();
|
||||
scan();
|
||||
// batchWrite();
|
||||
splitAndMerge();
|
||||
read();
|
||||
} finally {
|
||||
|
@ -70,18 +67,15 @@ implements RegionUnavailableListener {
|
|||
|
||||
|
||||
private static final int FIRST_ROW = 1;
|
||||
private static final int N_ROWS = 1000000;
|
||||
private static final int NUM_VALS = 1000;
|
||||
private static final byte [] CONTENTS_BASIC = Bytes.toBytes("contents:basic");
|
||||
private static final String CONTENTSTR = "contentstr";
|
||||
private static final String ANCHORNUM = "anchor:anchornum-";
|
||||
private static final String ANCHORSTR = "anchorstr";
|
||||
private static final byte [] CONTENTS_BODY = Bytes.toBytes("contents:body");
|
||||
private static final byte [] CONTENTS_FIRSTCOL = Bytes.toBytes("contents:firstcol");
|
||||
private static final byte [] ANCHOR_SECONDCOL = Bytes.toBytes("anchor:secondcol");
|
||||
|
||||
private MiniDFSCluster cluster = null;
|
||||
private HLog log = null;
|
||||
private HTableDescriptor desc = null;
|
||||
HRegion r = null;
|
||||
HRegionIncommon region = null;
|
||||
|
@ -112,7 +106,6 @@ implements RegionUnavailableListener {
|
|||
desc.addFamily(new HColumnDescriptor("contents:"));
|
||||
desc.addFamily(new HColumnDescriptor("anchor:"));
|
||||
r = createNewHRegion(desc, null, null);
|
||||
log = r.getLog();
|
||||
region = new HRegionIncommon(r);
|
||||
LOG.info("setup completed.");
|
||||
}
|
||||
|
@ -518,80 +511,13 @@ implements RegionUnavailableListener {
|
|||
LOG.info("scan completed.");
|
||||
}
|
||||
|
||||
// Do a large number of writes. Disabled if not debugging because it takes a
|
||||
// long time to run.
|
||||
// Creates contents:body
|
||||
|
||||
private void batchWrite() throws IOException {
|
||||
long totalFlush = 0;
|
||||
long totalCompact = 0;
|
||||
long totalLog = 0;
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
// 1M writes
|
||||
|
||||
int valsize = 1000;
|
||||
for (int k = FIRST_ROW; k <= N_ROWS; k++) {
|
||||
// Come up with a random 1000-byte string
|
||||
String randstr1 = "" + System.currentTimeMillis();
|
||||
StringBuffer buf1 = new StringBuffer("val_" + k + "__");
|
||||
while (buf1.length() < valsize) {
|
||||
buf1.append(randstr1);
|
||||
}
|
||||
|
||||
// Write to the HRegion
|
||||
BatchUpdate batchUpdate =
|
||||
new BatchUpdate(Bytes.toBytes("row_" + k), System.currentTimeMillis());
|
||||
batchUpdate.put(CONTENTS_BODY,
|
||||
buf1.toString().getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(batchUpdate);
|
||||
if (k > 0 && k % (N_ROWS / 100) == 0) {
|
||||
LOG.info("Flushing write #" + k);
|
||||
|
||||
long flushStart = System.currentTimeMillis();
|
||||
region.flushcache();
|
||||
long flushEnd = System.currentTimeMillis();
|
||||
totalFlush += (flushEnd - flushStart);
|
||||
|
||||
if (k % (N_ROWS / 10) == 0) {
|
||||
System.err.print("Rolling log...");
|
||||
long logStart = System.currentTimeMillis();
|
||||
log.rollWriter();
|
||||
long logEnd = System.currentTimeMillis();
|
||||
totalLog += (logEnd - logStart);
|
||||
LOG.info(" elapsed time: " + ((logEnd - logStart) / 1000.0));
|
||||
}
|
||||
}
|
||||
}
|
||||
long startCompact = System.currentTimeMillis();
|
||||
r.compactStores();
|
||||
totalCompact = System.currentTimeMillis() - startCompact;
|
||||
LOG.info("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
|
||||
long endTime = System.currentTimeMillis();
|
||||
|
||||
long totalElapsed = (endTime - startTime);
|
||||
LOG.info("");
|
||||
LOG.info("Batch-write complete.");
|
||||
LOG.info("Wrote " + N_ROWS + " rows, each of ~" + valsize + " bytes");
|
||||
LOG.info("Total flush-time: " + (totalFlush / 1000.0));
|
||||
LOG.info("Total compact-time: " + (totalCompact / 1000.0));
|
||||
LOG.info("Total log-time: " + (totalLog / 1000.0));
|
||||
LOG.info("Total time elapsed: " + (totalElapsed / 1000.0));
|
||||
LOG.info("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
|
||||
LOG.info("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
|
||||
LOG.info("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
|
||||
LOG.info("");
|
||||
|
||||
LOG.info("batchWrite completed.");
|
||||
}
|
||||
|
||||
// NOTE: This test depends on testBatchWrite succeeding
|
||||
private void splitAndMerge() throws IOException {
|
||||
Path oldRegionPath = r.getRegionDir();
|
||||
byte [] midKey = r.compactStores();
|
||||
assertNotNull(midKey);
|
||||
long startTime = System.currentTimeMillis();
|
||||
HRegion subregions[] = r.splitRegion(this, midKey);
|
||||
HRegion subregions[] = r.splitRegion(midKey);
|
||||
if (subregions != null) {
|
||||
LOG.info("Split region elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
|
@ -619,20 +545,6 @@ implements RegionUnavailableListener {
|
|||
LOG.info("splitAndMerge completed.");
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
public void closing(@SuppressWarnings("unused") final byte [] regionName) {
|
||||
// We don't use this here. It is only for the HRegionServer
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
public void closed(@SuppressWarnings("unused") final byte [] regionName) {
|
||||
// We don't use this here. It is only for the HRegionServer
|
||||
}
|
||||
|
||||
// This test verifies that everything is still there after splitting and merging
|
||||
|
||||
private void read() throws IOException {
|
||||
|
|
|
@ -207,7 +207,7 @@ public class TestSplit extends HBaseClusterTestCase {
|
|||
throws IOException {
|
||||
// Assert can get mid key from passed region.
|
||||
assertGet(r, COLFAMILY_NAME3, midKey);
|
||||
HRegion [] regions = r.splitRegion(null, midKey);
|
||||
HRegion [] regions = r.splitRegion(midKey);
|
||||
assertEquals(regions.length, 2);
|
||||
return regions;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue