Pre-trunk-merging sync of trunk into snapshots branch

git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290v2@1448497 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2013-02-21 03:06:32 +00:00
commit ca65f47aee
14 changed files with 516 additions and 170 deletions

View File

@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
@ -134,10 +136,23 @@ public abstract class BaseRegionObserver implements RegionObserver {
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c, public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final List<StoreFile> candidates) throws IOException { } final Store store, final List<StoreFile> candidates) throws IOException { }
@Override
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final List<StoreFile> candidates, final CompactionRequest request)
throws IOException {
preCompactSelection(c, store, candidates);
}
@Override @Override
public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c, public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final ImmutableList<StoreFile> selected) { } final Store store, final ImmutableList<StoreFile> selected) { }
@Override
public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final ImmutableList<StoreFile> selected, CompactionRequest request) {
postCompactSelection(c, store, selected);
}
@Override @Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType) final Store store, final InternalScanner scanner, final ScanType scanType)
@ -146,17 +161,39 @@ public abstract class BaseRegionObserver implements RegionObserver {
} }
@Override @Override
public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType, final Store store, final InternalScanner scanner, final ScanType scanType,
final long earliestPutTs, final InternalScanner s) throws IOException { CompactionRequest request) throws IOException {
return preCompact(e, store, scanner, scanType);
}
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
List<? extends KeyValueScanner> scanners, final ScanType scanType, final long earliestPutTs,
final InternalScanner s) throws IOException {
return null; return null;
} }
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
List<? extends KeyValueScanner> scanners, final ScanType scanType, final long earliestPutTs,
final InternalScanner s, CompactionRequest request) throws IOException {
return preCompactScannerOpen(c, store, scanners, scanType, earliestPutTs, s);
}
@Override @Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store, public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store,
final StoreFile resultFile) throws IOException { final StoreFile resultFile) throws IOException {
} }
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store,
final StoreFile resultFile, CompactionRequest request) throws IOException {
postCompact(e, store, resultFile);
}
@Override @Override
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e, public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
final byte [] row, final byte [] family, final Result result) final byte [] row, final byte [] family, final Result result)

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -126,87 +127,184 @@ public interface RegionObserver extends Coprocessor {
final StoreFile resultFile) throws IOException; final StoreFile resultFile) throws IOException;
/** /**
* Called prior to selecting the {@link StoreFile}s to compact from the list * Called prior to selecting the {@link StoreFile StoreFiles} to compact from the list of
* of available candidates. To alter the files used for compaction, you may * available candidates. To alter the files used for compaction, you may mutate the passed in list
* mutate the passed in list of candidates. * of candidates.
* @param c the environment provided by the region server
* @param store the store where compaction is being requested
* @param candidates the store files currently available for compaction
* @param request custom compaction request
* @throws IOException if an error occurred on the coprocessor
*/
void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final List<StoreFile> candidates, final CompactionRequest request)
throws IOException;
/**
* Called prior to selecting the {@link StoreFile}s to compact from the list of available
* candidates. To alter the files used for compaction, you may mutate the passed in list of
* candidates.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store where compaction is being requested * @param store the store where compaction is being requested
* @param candidates the store files currently available for compaction * @param candidates the store files currently available for compaction
* @throws IOException if an error occurred on the coprocessor * @throws IOException if an error occurred on the coprocessor
* @deprecated Use {@link #preCompactSelection(ObserverContext, Store, List, Object)} instead
*/ */
void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c, void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final List<StoreFile> candidates) throws IOException; final Store store, final List<StoreFile> candidates) throws IOException;
/** /**
* Called after the {@link StoreFile}s to compact have been selected from the * Called after the {@link StoreFile}s to compact have been selected from the available
* available candidates. * candidates.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param selected the store files selected to compact * @param selected the store files selected to compact
* @param request custom compaction request
*/ */
void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final ImmutableList<StoreFile> selected, CompactionRequest request);
/**
* Called after the {@link StoreFile}s to compact have been selected from the available
* candidates.
* @param c the environment provided by the region server
* @param store the store being compacted
* @param selected the store files selected to compact
* @param compactionAttributes custom attributes for the compaction
* @deprecated use {@link #postCompactSelection(ObserverContext, Store, ImmutableList, Object)}
* instead.
*/
@Deprecated
void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c, void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final ImmutableList<StoreFile> selected); final Store store, final ImmutableList<StoreFile> selected);
/** /**
* Called prior to writing the {@link StoreFile}s selected for compaction into * Called prior to writing the {@link StoreFile}s selected for compaction into a new
* a new {@code StoreFile}. To override or modify the compaction process, * {@code StoreFile}. To override or modify the compaction process, implementing classes have two
* implementing classes have two options: * options:
* <ul> * <ul>
* <li>Wrap the provided {@link InternalScanner} with a custom * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
* implementation that is returned from this method. The custom scanner * from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
* can then inspect {@link KeyValue}s from the wrapped scanner, applying * scanner, applying its own policy to what gets written.</li>
* its own policy to what gets written.</li> * <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} * custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
* and provide a custom implementation for writing of new * bypassing core compaction using this approach must write out new store files themselves or the
* {@link StoreFile}s. <strong>Note: any implementations bypassing * existing data will no longer be available after compaction.</strong></li>
* core compaction using this approach must write out new store files
* themselves or the existing data will no longer be available after
* compaction.</strong></li>
* </ul> * </ul>
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param scanner the scanner over existing data used in the store file * @param scanner the scanner over existing data used in the store file rewriting
* rewriting
* @param scanType type of Scan * @param scanType type of Scan
* @return the scanner to use during compaction. Should not be {@code null} * @param request the requested compaction
* unless the implementation is writing new store files on its own. * @return the scanner to use during compaction. Should not be {@code null} unless the
* implementation is writing new store files on its own.
* @throws IOException if an error occurred on the coprocessor * @throws IOException if an error occurred on the coprocessor
*/ */
InternalScanner preCompact(final ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner preCompact(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final InternalScanner scanner, final Store store, final InternalScanner scanner, final ScanType scanType,
final ScanType scanType) throws IOException; CompactionRequest request) throws IOException;
/** /**
* Called prior to writing the {@link StoreFile}s selected for compaction into * Called prior to writing the {@link StoreFile}s selected for compaction into a new
* a new {@code StoreFile} and prior to creating the scanner used to read the * {@code StoreFile}. To override or modify the compaction process, implementing classes have two
* input files. To override or modify the compaction process, * options:
* implementing classes can return a new scanner to provide the KeyValues to be * <ul>
* stored into the new {@code StoreFile} or null to perform the default processing. * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
* scanner, applying its own policy to what gets written.</li>
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
* bypassing core compaction using this approach must write out new store files themselves or the
* existing data will no longer be available after compaction.</strong></li>
* </ul>
* @param c the environment provided by the region server
* @param store the store being compacted
* @param scanner the scanner over existing data used in the store file rewriting
* @param scanType type of Scan
* @param request the requested compaction
* @return the scanner to use during compaction. Should not be {@code null} unless the
* implementation is writing new store files on its own.
* @throws IOException if an error occurred on the coprocessor
* @deprecated use
* {@link #preCompact(ObserverContext, Store, InternalScanner, ScanType, CompactionRequest)}
* instead
*/
@Deprecated
InternalScanner preCompact(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final InternalScanner scanner, final ScanType scanType) throws IOException;
/**
* Called prior to writing the {@link StoreFile}s selected for compaction into a new
* {@code StoreFile} and prior to creating the scanner used to read the input files. To override
* or modify the compaction process, implementing classes can return a new scanner to provide the
* KeyValues to be stored into the new {@code StoreFile} or null to perform the default
* processing. Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
* effect in this hook. * effect in this hook.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param scanners the list {@link StoreFileScanner}s to be read from * @param scanners the list {@link StoreFileScanner}s to be read from
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction * @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved * @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
* store files * files
* @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
* @return the scanner to use during compaction. {@code null} if the default implementation * @param request the requested compaction
* is to be used. * @return the scanner to use during compaction. {@code null} if the default implementation is to
* be used.
* @throws IOException if an error occurred on the coprocessor * @throws IOException if an error occurred on the coprocessor
*/ */
InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
final long earliestPutTs, final InternalScanner s, CompactionRequest request)
throws IOException;
/**
* Called prior to writing the {@link StoreFile}s selected for compaction into a new
* {@code StoreFile} and prior to creating the scanner used to read the input files. To override
* or modify the compaction process, implementing classes can return a new scanner to provide the
* KeyValues to be stored into the new {@code StoreFile} or null to perform the default
* processing. Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
* effect in this hook.
* @param c the environment provided by the region server
* @param store the store being compacted
* @param scanners the list {@link StoreFileScanner}s to be read from
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
* files
* @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
* @param request the requested compaction
* @return the scanner to use during compaction. {@code null} if the default implementation is to
* be used.
* @throws IOException if an error occurred on the coprocessor
* @deprecated Use
* {@link #preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest)}
* instead.
*/
@Deprecated
InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType, final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
final long earliestPutTs, final InternalScanner s) throws IOException; final long earliestPutTs, final InternalScanner s) throws IOException;
/** /**
* Called after compaction has completed and the new store file has been * Called after compaction has completed and the new store file has been moved in to place.
* moved in to place. * @param c the environment provided by the region server
* @param store the store being compacted
* @param resultFile the new store file written out during compaction
* @param request the requested compaction
* @throws IOException if an error occurred on the coprocessor
*/
void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
StoreFile resultFile, CompactionRequest request) throws IOException;
/**
* Called after compaction has completed and the new store file has been moved in to place.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param resultFile the new store file written out during compaction * @param resultFile the new store file written out during compaction
* @throws IOException if an error occurred on the coprocessor * @throws IOException if an error occurred on the coprocessor
* @deprecated Use {@link #postCompact(ObserverContext, Store, StoreFile, CompactionRequest)}
* instead
*/ */
@Deprecated
void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store, void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
StoreFile resultFile) throws IOException; StoreFile resultFile) throws IOException;

View File

@ -19,7 +19,9 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.PriorityBlockingQueue;
@ -183,23 +185,41 @@ public class CompactSplitThread implements CompactionRequestor {
} }
} }
public synchronized void requestCompaction(final HRegion r, @Override
final String why) throws IOException { public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why)
for (Store s : r.getStores().values()) { throws IOException {
requestCompaction(r, s, why, Store.NO_PRIORITY); return requestCompaction(r, why, null);
}
} }
public synchronized void requestCompaction(final HRegion r, final Store s, @Override
final String why) throws IOException { public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why,
requestCompaction(r, s, why, Store.NO_PRIORITY); List<CompactionRequest> requests) throws IOException {
return requestCompaction(r, why, Store.NO_PRIORITY, requests);
} }
public synchronized void requestCompaction(final HRegion r, final String why, @Override
int p) throws IOException { public synchronized CompactionRequest requestCompaction(final HRegion r, final Store s,
for (Store s : r.getStores().values()) { final String why, CompactionRequest request) throws IOException {
requestCompaction(r, s, why, p); return requestCompaction(r, s, why, Store.NO_PRIORITY, request);
} }
@Override
public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why,
int p, List<CompactionRequest> requests) throws IOException {
// not a special compaction request, so make our own list
List<CompactionRequest> ret;
if (requests == null) {
ret = new ArrayList<CompactionRequest>(r.getStores().size());
for (Store s : r.getStores().values()) {
ret.add(requestCompaction(r, s, why, p, null));
}
} else {
ret = new ArrayList<CompactionRequest>(requests.size());
for (CompactionRequest request : requests) {
requests.add(requestCompaction(r, request.getStore(), why, p, request));
}
}
return ret;
} }
/** /**
@ -207,13 +227,15 @@ public class CompactSplitThread implements CompactionRequestor {
* @param s Store to request compaction on * @param s Store to request compaction on
* @param why Why compaction requested -- used in debug messages * @param why Why compaction requested -- used in debug messages
* @param priority override the default priority (NO_PRIORITY == decide) * @param priority override the default priority (NO_PRIORITY == decide)
* @param request custom compaction request. Can be <tt>null</tt> in which case a simple
* compaction will be used.
*/ */
public synchronized void requestCompaction(final HRegion r, final Store s, public synchronized CompactionRequest requestCompaction(final HRegion r, final Store s,
final String why, int priority) throws IOException { final String why, int priority, CompactionRequest request) throws IOException {
if (this.server.isStopped()) { if (this.server.isStopped()) {
return; return null;
} }
CompactionRequest cr = s.requestCompaction(priority); CompactionRequest cr = s.requestCompaction(priority, request);
if (cr != null) { if (cr != null) {
cr.setServer(server); cr.setServer(server);
if (priority != Store.NO_PRIORITY) { if (priority != Store.NO_PRIORITY) {
@ -234,6 +256,7 @@ public class CompactSplitThread implements CompactionRequestor {
" because compaction request was cancelled"); " because compaction request was cancelled");
} }
} }
return cr;
} }
/** /**

View File

@ -19,42 +19,73 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
@InterfaceAudience.Private @InterfaceAudience.Private
public interface CompactionRequestor { public interface CompactionRequestor {
/** /**
* @param r Region to compact * @param r Region to compact
* @param why Why compaction was requested -- used in debug messages * @param why Why compaction was requested -- used in debug messages
* @return The created {@link CompactionRequest CompactionRequests} or an empty list if no
* compactions were started
* @throws IOException * @throws IOException
*/ */
public void requestCompaction(final HRegion r, final String why) throws IOException; public List<CompactionRequest> requestCompaction(final HRegion r, final String why)
/**
* @param r Region to compact
* @param s Store within region to compact
* @param why Why compaction was requested -- used in debug messages
* @throws IOException
*/
public void requestCompaction(final HRegion r, final Store s, final String why)
throws IOException; throws IOException;
/** /**
* @param r Region to compact * @param r Region to compact
* @param why Why compaction was requested -- used in debug messages * @param why Why compaction was requested -- used in debug messages
* @param pri Priority of this compaction. minHeap. <=0 is critical * @param requests custom compaction requests. Each compaction must specify the store on which it
* is acting. Can be <tt>null</tt> in which case a compaction will be attempted on all
* stores for the region.
* @return The created {@link CompactionRequest CompactionRequests} or an empty list if no
* compactions were started
* @throws IOException * @throws IOException
*/ */
public void requestCompaction(final HRegion r, final String why, int pri) throws IOException; public List<CompactionRequest> requestCompaction(final HRegion r, final String why,
List<CompactionRequest> requests)
throws IOException;
/**
* @param r Region to compact
* @param s Store within region to compact
* @param why Why compaction was requested -- used in debug messages
* @param request custom compaction request for the {@link HRegion} and {@link Store}. Custom
* request must be <tt>null</tt> or be constructed with matching region and store.
* @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started.
* @throws IOException
*/
public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
CompactionRequest request) throws IOException;
/**
* @param r Region to compact
* @param why Why compaction was requested -- used in debug messages
* @param pri Priority of this compaction. minHeap. <=0 is critical
* @param requests custom compaction requests. Each compaction must specify the store on which it
* is acting. Can be <tt>null</tt> in which case a compaction will be attempted on all
* stores for the region.
* @return The created {@link CompactionRequest CompactionRequests} or an empty list if no
* compactions were started.
* @throws IOException
*/
public List<CompactionRequest> requestCompaction(final HRegion r, final String why, int pri,
List<CompactionRequest> requests) throws IOException;
/** /**
* @param r Region to compact * @param r Region to compact
* @param s Store within region to compact * @param s Store within region to compact
* @param why Why compaction was requested -- used in debug messages * @param why Why compaction was requested -- used in debug messages
* @param pri Priority of this compaction. minHeap. <=0 is critical * @param pri Priority of this compaction. minHeap. <=0 is critical
* @param request custom compaction request to run. {@link Store} and {@link HRegion} for the
* request must match the region and store specified here.
* @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started
* @throws IOException * @throws IOException
*/ */
public void requestCompaction(final HRegion r, final Store s, public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
final String why, int pri) throws IOException; int pri, CompactionRequest request) throws IOException;
} }

View File

@ -1375,17 +1375,17 @@ public class HRegionServer implements ClientProtocol,
try { try {
if (s.needsCompaction()) { if (s.needsCompaction()) {
// Queue a compaction. Will recognize if major is needed. // Queue a compaction. Will recognize if major is needed.
this.instance.compactSplitThread.requestCompaction(r, s, this.instance.compactSplitThread.requestCompaction(r, s, getName()
getName() + " requests compaction"); + " requests compaction", null);
} else if (s.isMajorCompaction()) { } else if (s.isMajorCompaction()) {
if (majorCompactPriority == DEFAULT_PRIORITY || if (majorCompactPriority == DEFAULT_PRIORITY
majorCompactPriority > r.getCompactPriority()) { || majorCompactPriority > r.getCompactPriority()) {
this.instance.compactSplitThread.requestCompaction(r, s, this.instance.compactSplitThread.requestCompaction(r, s, getName()
getName() + " requests major compaction; use default priority"); + " requests major compaction; use default priority", null);
} else { } else {
this.instance.compactSplitThread.requestCompaction(r, s, this.instance.compactSplitThread.requestCompaction(r, s, getName()
getName() + " requests major compaction; use configured priority", + " requests major compaction; use configured priority",
this.majorCompactPriority); this.majorCompactPriority, null);
} }
} }
} catch (IOException e) { } catch (IOException e) {
@ -1692,7 +1692,7 @@ public class HRegionServer implements ClientProtocol,
// Do checks to see if we need to compact (references or too many files) // Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) { for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) { if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region"); getCompactionRequester().requestCompaction(r, s, "Opening Region", null);
} }
} }
long openSeqNum = r.getOpenSeqNum(); long openSeqNum = r.getOpenSeqNum();
@ -3657,10 +3657,10 @@ public class HRegionServer implements ClientProtocol,
String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg; String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg;
if(family != null) { if(family != null) {
compactSplitThread.requestCompaction(region, store, log, compactSplitThread.requestCompaction(region, store, log,
Store.PRIORITY_USER); Store.PRIORITY_USER, null);
} else { } else {
compactSplitThread.requestCompaction(region, log, compactSplitThread.requestCompaction(region, log,
Store.PRIORITY_USER); Store.PRIORITY_USER, null);
} }
return CompactRegionResponse.newBuilder().build(); return CompactRegionResponse.newBuilder().build();
} catch (IOException ie) { } catch (IOException ie) {
@ -4062,4 +4062,11 @@ public class HRegionServer implements ClientProtocol,
String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC); String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC);
return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation); return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation);
} }
/**
* @return the underlying {@link CompactSplitThread} for the servers
*/
public CompactSplitThread getCompactSplitThread() {
return this.compactSplitThread;
}
} }

View File

@ -1090,14 +1090,13 @@ public class HStore implements Store {
List<StoreFile> sfs = new ArrayList<StoreFile>(); List<StoreFile> sfs = new ArrayList<StoreFile>();
long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis(); long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis();
try { try {
List<Path> newFiles = List<Path> newFiles = this.compactor.compact(cr);
this.compactor.compact(filesToCompact, cr.isMajor());
// Move the compaction into place. // Move the compaction into place.
if (this.conf.getBoolean("hbase.hstore.compaction.complete", true)) { if (this.conf.getBoolean("hbase.hstore.compaction.complete", true)) {
for (Path newFile: newFiles) { for (Path newFile: newFiles) {
StoreFile sf = completeCompaction(filesToCompact, newFile); StoreFile sf = completeCompaction(filesToCompact, newFile);
if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postCompact(this, sf); region.getCoprocessorHost().postCompact(this, sf, cr);
} }
sfs.add(sf); sfs.add(sf);
} }
@ -1181,13 +1180,12 @@ public class HStore implements Store {
try { try {
// Ready to go. Have list of files to compact. // Ready to go. Have list of files to compact.
List<Path> newFiles = List<Path> newFiles = this.compactor.compactForTesting(filesToCompact, isMajor);
this.compactor.compact(filesToCompact, isMajor);
for (Path newFile: newFiles) { for (Path newFile: newFiles) {
// Move the compaction into place. // Move the compaction into place.
StoreFile sf = completeCompaction(filesToCompact, newFile); StoreFile sf = completeCompaction(filesToCompact, newFile);
if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postCompact(this, sf); region.getCoprocessorHost().postCompact(this, sf, null);
} }
} }
} finally { } finally {
@ -1219,17 +1217,19 @@ public class HStore implements Store {
return compactionPolicy.isMajorCompaction(this.storeFileManager.getStorefiles()); return compactionPolicy.isMajorCompaction(this.storeFileManager.getStorefiles());
} }
@Override
public CompactionRequest requestCompaction() throws IOException { public CompactionRequest requestCompaction() throws IOException {
return requestCompaction(Store.NO_PRIORITY); return requestCompaction(Store.NO_PRIORITY, null);
} }
public CompactionRequest requestCompaction(int priority) throws IOException { @Override
public CompactionRequest requestCompaction(int priority, CompactionRequest request)
throws IOException {
// don't even select for compaction if writes are disabled // don't even select for compaction if writes are disabled
if (!this.region.areWritesEnabled()) { if (!this.region.areWritesEnabled()) {
return null; return null;
} }
CompactionRequest ret = null;
this.lock.readLock().lock(); this.lock.readLock().lock();
try { try {
List<StoreFile> candidates = Lists.newArrayList(storeFileManager.getStorefiles()); List<StoreFile> candidates = Lists.newArrayList(storeFileManager.getStorefiles());
@ -1238,7 +1238,7 @@ public class HStore implements Store {
candidates = compactionPolicy.preSelectCompaction(candidates, filesCompacting); candidates = compactionPolicy.preSelectCompaction(candidates, filesCompacting);
boolean override = false; boolean override = false;
if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost() != null) {
override = region.getCoprocessorHost().preCompactSelection(this, candidates); override = region.getCoprocessorHost().preCompactSelection(this, candidates, request);
} }
CompactSelection filesToCompact; CompactSelection filesToCompact;
if (override) { if (override) {
@ -1257,7 +1257,7 @@ public class HStore implements Store {
if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postCompactSelection(this, region.getCoprocessorHost().postCompactSelection(this,
ImmutableList.copyOf(filesToCompact.getFilesToCompact())); ImmutableList.copyOf(filesToCompact.getFilesToCompact()), request);
} }
// no files to compact // no files to compact
@ -1287,15 +1287,24 @@ public class HStore implements Store {
// everything went better than expected. create a compaction request // everything went better than expected. create a compaction request
int pri = getCompactPriority(priority); int pri = getCompactPriority(priority);
ret = new CompactionRequest(region, this, filesToCompact, isMajor, pri); //not a special compaction request, so we need to make one
if(request == null){
request = new CompactionRequest(region, this, filesToCompact, isMajor, pri);
}else{
//update the request with what the system thinks the request should be
//its up to the request if it wants to listen
request.setSelection(filesToCompact);
request.setIsMajor(isMajor);
request.setPriority(pri);
}
} }
} finally { } finally {
this.lock.readLock().unlock(); this.lock.readLock().unlock();
} }
if (ret != null) { if (request != null) {
this.region.reportCompactionRequestStart(ret.isMajor()); this.region.reportCompactionRequestStart(request.isMajor());
} }
return ret; return request;
} }
public void finishRequest(CompactionRequest cr) { public void finishRequest(CompactionRequest cr) {

View File

@ -586,7 +586,9 @@ public class MemStore implements HeapSize {
// which means we can prove that no scanner will see this version // which means we can prove that no scanner will see this version
// false means there was a change, so give us the size. // false means there was a change, so give us the size.
addedSize -= heapSizeChange(cur, true); long delta = heapSizeChange(cur, true);
addedSize -= delta;
this.size.addAndGet(-delta);
it.remove(); it.remove();
} else { } else {
versionsVisible++; versionsVisible++;

View File

@ -55,6 +55,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -345,11 +347,10 @@ public class RegionCoprocessorHost
/** /**
* See * See
* {@link RegionObserver#preCompactScannerOpen(ObserverContext, * {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest)}
* Store, List, ScanType, long, InternalScanner)}
*/ */
public InternalScanner preCompactScannerOpen(Store store, List<StoreFileScanner> scanners, public InternalScanner preCompactScannerOpen(Store store, List<StoreFileScanner> scanners,
ScanType scanType, long earliestPutTs) throws IOException { ScanType scanType, long earliestPutTs, CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null; ObserverContext<RegionCoprocessorEnvironment> ctx = null;
InternalScanner s = null; InternalScanner s = null;
for (RegionEnvironment env: coprocessors) { for (RegionEnvironment env: coprocessors) {
@ -357,7 +358,7 @@ public class RegionCoprocessorHost
ctx = ObserverContext.createAndPrepare(env, ctx); ctx = ObserverContext.createAndPrepare(env, ctx);
try { try {
s = ((RegionObserver) env.getInstance()).preCompactScannerOpen(ctx, store, scanners, s = ((RegionObserver) env.getInstance()).preCompactScannerOpen(ctx, store, scanners,
scanType, earliestPutTs, s); scanType, earliestPutTs, s, request);
} catch (Throwable e) { } catch (Throwable e) {
handleCoprocessorThrowable(env,e); handleCoprocessorThrowable(env,e);
} }
@ -370,22 +371,23 @@ public class RegionCoprocessorHost
} }
/** /**
* Called prior to selecting the {@link StoreFile}s for compaction from * Called prior to selecting the {@link StoreFile}s for compaction from the list of currently
* the list of currently available candidates. * available candidates.
* @param store The store where compaction is being requested * @param store The store where compaction is being requested
* @param candidates The currently available store files * @param candidates The currently available store files
* @param request custom compaction request
* @return If {@code true}, skip the normal selection process and use the current list * @return If {@code true}, skip the normal selection process and use the current list
* @throws IOException * @throws IOException
*/ */
public boolean preCompactSelection(Store store, List<StoreFile> candidates) throws IOException { public boolean preCompactSelection(Store store, List<StoreFile> candidates,
CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null; ObserverContext<RegionCoprocessorEnvironment> ctx = null;
boolean bypass = false; boolean bypass = false;
for (RegionEnvironment env: coprocessors) { for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) { if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx); ctx = ObserverContext.createAndPrepare(env, ctx);
try { try {
((RegionObserver)env.getInstance()).preCompactSelection( ((RegionObserver) env.getInstance()).preCompactSelection(ctx, store, candidates, request);
ctx, store, candidates);
} catch (Throwable e) { } catch (Throwable e) {
handleCoprocessorThrowable(env,e); handleCoprocessorThrowable(env,e);
@ -400,20 +402,20 @@ public class RegionCoprocessorHost
} }
/** /**
* Called after the {@link StoreFile}s to be compacted have been selected * Called after the {@link StoreFile}s to be compacted have been selected from the available
* from the available candidates. * candidates.
* @param store The store where compaction is being requested * @param store The store where compaction is being requested
* @param selected The store files selected to compact * @param selected The store files selected to compact
* @param request custom compaction
*/ */
public void postCompactSelection(Store store, public void postCompactSelection(Store store, ImmutableList<StoreFile> selected,
ImmutableList<StoreFile> selected) { CompactionRequest request) {
ObserverContext<RegionCoprocessorEnvironment> ctx = null; ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) { for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) { if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx); ctx = ObserverContext.createAndPrepare(env, ctx);
try { try {
((RegionObserver)env.getInstance()).postCompactSelection( ((RegionObserver) env.getInstance()).postCompactSelection(ctx, store, selected, request);
ctx, store, selected);
} catch (Throwable e) { } catch (Throwable e) {
handleCoprocessorThrowableNoRethrow(env,e); handleCoprocessorThrowableNoRethrow(env,e);
} }
@ -429,18 +431,19 @@ public class RegionCoprocessorHost
* @param store the store being compacted * @param store the store being compacted
* @param scanner the scanner used to read store data during compaction * @param scanner the scanner used to read store data during compaction
* @param scanType type of Scan * @param scanType type of Scan
* @param request the compaction that will be executed
* @throws IOException * @throws IOException
*/ */
public InternalScanner preCompact(Store store, InternalScanner scanner, public InternalScanner preCompact(Store store, InternalScanner scanner, ScanType scanType,
ScanType scanType) throws IOException { CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null; ObserverContext<RegionCoprocessorEnvironment> ctx = null;
boolean bypass = false; boolean bypass = false;
for (RegionEnvironment env: coprocessors) { for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) { if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx); ctx = ObserverContext.createAndPrepare(env, ctx);
try { try {
scanner = ((RegionObserver)env.getInstance()).preCompact( scanner = ((RegionObserver) env.getInstance()).preCompact(ctx, store, scanner, scanType,
ctx, store, scanner, scanType); request);
} catch (Throwable e) { } catch (Throwable e) {
handleCoprocessorThrowable(env,e); handleCoprocessorThrowable(env,e);
} }
@ -457,15 +460,17 @@ public class RegionCoprocessorHost
* Called after the store compaction has completed. * Called after the store compaction has completed.
* @param store the store being compacted * @param store the store being compacted
* @param resultFile the new store file written during compaction * @param resultFile the new store file written during compaction
* @param request the compaction that is being executed
* @throws IOException * @throws IOException
*/ */
public void postCompact(Store store, StoreFile resultFile) throws IOException { public void postCompact(Store store, StoreFile resultFile, CompactionRequest request)
throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null; ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) { for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) { if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx); ctx = ObserverContext.createAndPrepare(env, ctx);
try { try {
((RegionObserver)env.getInstance()).postCompact(ctx, store, resultFile); ((RegionObserver) env.getInstance()).postCompact(ctx, store, resultFile, request);
} catch (Throwable e) { } catch (Throwable e) {
handleCoprocessorThrowable(env, e); handleCoprocessorThrowable(env, e);
} }

View File

@ -160,7 +160,8 @@ public interface Store extends HeapSize, StoreConfigInformation {
public CompactionRequest requestCompaction() throws IOException; public CompactionRequest requestCompaction() throws IOException;
public CompactionRequest requestCompaction(int priority) throws IOException; public CompactionRequest requestCompaction(int priority, CompactionRequest request)
throws IOException;
public void finishRequest(CompactionRequest cr); public void finishRequest(CompactionRequest cr);

View File

@ -19,20 +19,22 @@
package org.apache.hadoop.hbase.regionserver.compactions; package org.apache.hadoop.hbase.regionserver.compactions;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -46,35 +48,51 @@ import com.google.common.collect.Collections2;
/** /**
* This class holds all details necessary to run a compaction. * This class holds all details necessary to run a compaction.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate({ "coprocessor" })
@InterfaceStability.Evolving
public class CompactionRequest implements Comparable<CompactionRequest>, public class CompactionRequest implements Comparable<CompactionRequest>,
Runnable { Runnable {
static final Log LOG = LogFactory.getLog(CompactionRequest.class); static final Log LOG = LogFactory.getLog(CompactionRequest.class);
private final HRegion region; private final HRegion region;
private final HStore store; private final HStore store;
private final CompactSelection compactSelection; private CompactSelection compactSelection;
private final long totalSize; private long totalSize;
private final boolean isMajor; private boolean isMajor;
private int priority; private int priority;
private final Long timeInNanos; private final Long timeInNanos;
private HRegionServer server = null; private HRegionServer server = null;
public CompactionRequest(HRegion region, HStore store, public static CompactionRequest getRequestForTesting(Collection<StoreFile> selection,
CompactSelection files, boolean isMajor, int priority) { boolean isMajor) {
Preconditions.checkNotNull(region); return new CompactionRequest(null, null, new CompactSelection(new ArrayList<StoreFile>(
Preconditions.checkNotNull(files); selection)), isMajor, 0, System.nanoTime());
}
/**
* Constructor for a custom compaction. Uses the setXXX methods to update the state of the
* compaction before being used.
*/
public CompactionRequest(HRegion region, HStore store, int priority) {
this(region, store, null, false, priority, System
.nanoTime());
}
public CompactionRequest(HRegion r, HStore s, CompactSelection files, boolean isMajor, int p) {
// delegate to the internal constructor after checking basic preconditions
this(Preconditions.checkNotNull(r), s, Preconditions.checkNotNull(files), isMajor, p, System
.nanoTime());
}
private CompactionRequest(HRegion region, HStore store, CompactSelection files, boolean isMajor,
int priority, long startTime) {
this.region = region; this.region = region;
this.store = store; this.store = store;
this.compactSelection = files;
long sz = 0;
for (StoreFile sf : files.getFilesToCompact()) {
sz += sf.getReader().length();
}
this.totalSize = sz;
this.isMajor = isMajor; this.isMajor = isMajor;
this.priority = priority; this.priority = priority;
this.timeInNanos = System.nanoTime(); this.timeInNanos = startTime;
if (files != null) {
this.setSelection(files);
}
} }
/** /**
@ -162,6 +180,28 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
this.server = hrs; this.server = hrs;
} }
/**
* Set the files (and, implicitly, the size of the compaction based on those files)
* @param files files that should be included in the compaction
*/
public void setSelection(CompactSelection files) {
long sz = 0;
for (StoreFile sf : files.getFilesToCompact()) {
sz += sf.getReader().length();
}
this.totalSize = sz;
this.compactSelection = files;
}
/**
* Specify if this compaction should be a major compaction based on the state of the store
* @param isMajor <tt>true</tt> if the system determines that this compaction should be a major
* compaction
*/
public void setIsMajor(boolean isMajor) {
this.isMajor = isMajor;
}
@Override @Override
public String toString() { public String toString() {
String fsList = Joiner.on(", ").join( String fsList = Joiner.on(", ").join(
@ -200,11 +240,10 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
if (completed) { if (completed) {
// degenerate case: blocked regions require recursive enqueues // degenerate case: blocked regions require recursive enqueues
if (store.getCompactPriority() <= 0) { if (store.getCompactPriority() <= 0) {
server.compactSplitThread server.compactSplitThread.requestCompaction(region, store, "Recursive enqueue", null);
.requestCompaction(region, store, "Recursive enqueue");
} else { } else {
// see if the compaction has caused us to exceed max region size // see if the compaction has caused us to exceed max region size
server.compactSplitThread.requestSplit(region); server.getCompactSplitThread().requestSplit(region);
} }
} }
} catch (IOException ex) { } catch (IOException ex) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.regionserver.compactions; package org.apache.hadoop.hbase.regionserver.compactions;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
@ -41,15 +42,28 @@ public abstract class Compactor {
/** /**
* Do a minor/major compaction on an explicit set of storefiles from a Store. * Do a minor/major compaction on an explicit set of storefiles from a Store.
*
* @param filesToCompact which files to compact * @param filesToCompact which files to compact
* @param majorCompaction true to major compact (prune all deletes, max versions, etc) * @param request the requested compaction
* @return Product of compaction or an empty list if all cells expired or deleted and * @return Product of compaction or an empty list if all cells expired or deleted and nothing made
* nothing made it through the compaction. * it through the compaction.
* @throws IOException * @throws IOException
*/ */
public abstract List<Path> compact(final Collection<StoreFile> filesToCompact, public abstract List<Path> compact(final CompactionRequest request) throws IOException;
final boolean majorCompaction) throws IOException;
/**
* Compact a list of files for testing. Creates a fake {@link CompactionRequest} to pass to
* {@link #compact(CompactionRequest)};
* @param filesToCompact the files to compact. These are used as the compactionSelection for the
* generated {@link CompactionRequest}.
* @param isMajor true to major compact (prune all deletes, max versions, etc)
* @return Product of compaction or an empty list if all cells expired or deleted and nothing made
* it through the compaction.
* @throws IOException
*/
public List<Path> compactForTesting(final Collection<StoreFile> filesToCompact, boolean isMajor)
throws IOException {
return compact(CompactionRequest.getRequestForTesting(filesToCompact, isMajor));
}
public CompactionProgress getProgress() { public CompactionProgress getProgress() {
return this.progress; return this.progress;

View File

@ -59,16 +59,12 @@ public class DefaultCompactor extends Compactor {
/** /**
* Do a minor/major compaction on an explicit set of storefiles from a Store. * Do a minor/major compaction on an explicit set of storefiles from a Store.
*
* @param filesToCompact which files to compact
* @param majorCompaction true to major compact (prune all deletes, max versions, etc)
* @return Product of compaction or an empty list if all cells expired or deleted and
* nothing made it through the compaction.
* @throws IOException
*/ */
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public List<Path> compact(final Collection<StoreFile> filesToCompact, @Override
final boolean majorCompaction) throws IOException { public List<Path> compact(final CompactionRequest request) throws IOException {
final Collection<StoreFile> filesToCompact = request.getFiles();
boolean majorCompaction = request.isMajor();
// Max-sequenceID is the last key in the files we're compacting // Max-sequenceID is the last key in the files we're compacting
long maxId = StoreFile.getMaxSequenceIdInList(filesToCompact, true); long maxId = StoreFile.getMaxSequenceIdInList(filesToCompact, true);
@ -139,7 +135,8 @@ public class DefaultCompactor extends Compactor {
scanner = store scanner = store
.getCoprocessorHost() .getCoprocessorHost()
.preCompactScannerOpen(store, scanners, .preCompactScannerOpen(store, scanners,
majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, earliestPutTs); majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, earliestPutTs,
request);
} }
ScanType scanType = majorCompaction? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT; ScanType scanType = majorCompaction? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT;
if (scanner == null) { if (scanner == null) {
@ -150,8 +147,8 @@ public class DefaultCompactor extends Compactor {
scanType, smallestReadPoint, earliestPutTs); scanType, smallestReadPoint, earliestPutTs);
} }
if (store.getCoprocessorHost() != null) { if (store.getCoprocessorHost() != null) {
InternalScanner cpScanner = InternalScanner cpScanner = store.getCoprocessorHost().preCompact(store, scanner,
store.getCoprocessorHost().preCompact(store, scanner, scanType); scanType, request);
// NULL scanner returned from coprocessor hooks means skip normal processing // NULL scanner returned from coprocessor hooks means skip normal processing
if (cpScanner == null) { if (cpScanner == null) {
return newFiles; // an empty list return newFiles; // an empty list

View File

@ -28,6 +28,7 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -47,13 +48,17 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.compactions.*; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
@ -591,7 +596,7 @@ public class TestCompaction extends HBaseTestCase {
Collection<StoreFile> storeFiles = store.getStorefiles(); Collection<StoreFile> storeFiles = store.getStorefiles();
Compactor tool = store.compactor; Compactor tool = store.compactor;
List<Path> newFiles = tool.compact(storeFiles, false); List<Path> newFiles = tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file. // Now lets corrupt the compacted file.
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
@ -630,7 +635,7 @@ public class TestCompaction extends HBaseTestCase {
} }
store.triggerMajorCompaction(); store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY); CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null);
assertNotNull("Expected to receive a compaction request", request); assertNotNull("Expected to receive a compaction request", request);
assertEquals( assertEquals(
"System-requested major compaction should not occur if there are too many store files", "System-requested major compaction should not occur if there are too many store files",
@ -648,7 +653,7 @@ public class TestCompaction extends HBaseTestCase {
createStoreFile(r); createStoreFile(r);
} }
store.triggerMajorCompaction(); store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER); CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null);
assertNotNull("Expected to receive a compaction request", request); assertNotNull("Expected to receive a compaction request", request);
assertEquals( assertEquals(
"User-requested major compaction should always occur, even if there are too many store files", "User-requested major compaction should always occur, even if there are too many store files",
@ -656,5 +661,53 @@ public class TestCompaction extends HBaseTestCase {
request.isMajor()); request.isMajor());
} }
/**
* Create a custom compaction request and be sure that we can track it through the queue, knowing
* when the compaction is completed.
*/
public void testTrackingCompactionRequest() throws Exception {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.class);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
} }
CountDownLatch latch = new CountDownLatch(1);
TrackableCompactionRequest request = new TrackableCompactionRequest(r, (HStore) store, latch);
thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request);
// wait for the latch to complete.
latch.await();
thread.interruptIfNecessary();
}
/**
* Simple {@link CompactionRequest} on which you can wait until the requested compaction finishes.
*/
public static class TrackableCompactionRequest extends CompactionRequest {
private CountDownLatch done;
/**
* Constructor for a custom compaction. Uses the setXXX methods to update the state of the
* compaction before being used.
*/
public TrackableCompactionRequest(HRegion region, HStore store, CountDownLatch finished) {
super(region, store, Store.PRIORITY_USER);
this.done = finished;
}
@Override
public void run() {
super.run();
this.done.countDown();
}
}
}

View File

@ -846,6 +846,36 @@ public class TestMemStore extends TestCase {
Integer.toString(i2)); Integer.toString(i2));
} }
/**
* Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased
* as older keyvalues are deleted from the memstore.
* @throws Exception
*/
public void testUpsertMemstoreSize() throws Exception {
Configuration conf = HBaseConfiguration.create();
memstore = new MemStore(conf, KeyValue.COMPARATOR);
long oldSize = memstore.size.get();
List<KeyValue> l = new ArrayList<KeyValue>();
KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v");
KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v");
kv1.setMvccVersion(1); kv2.setMvccVersion(1);kv3.setMvccVersion(1);
l.add(kv1); l.add(kv2); l.add(kv3);
this.memstore.upsert(l, 2);// readpoint is 2
long newSize = this.memstore.size.get();
assert(newSize > oldSize);
KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v");
kv4.setMvccVersion(1);
l.clear(); l.add(kv4);
this.memstore.upsert(l, 3);
assertEquals(newSize, this.memstore.size.get());
//this.memstore = null;
}
/** /**
* Adds {@link #ROW_COUNT} rows and {@link #QUALIFIER_COUNT} * Adds {@link #ROW_COUNT} rows and {@link #QUALIFIER_COUNT}
* @param hmc Instance to add rows to. * @param hmc Instance to add rows to.