From d3a817f212f5d012f3cc7c6277314b682a907b0b Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Fri, 29 Sep 2017 15:25:25 -0700 Subject: [PATCH] HBASE-18815 We need to pass something like CompactionRequest in CP to give user some information about the compaction CompactionRequest was removed from CP in HBASE-18453, this change reintroduces CompatcionRequest to CP as a read-only interface called CompactionRequest. The CompactionRequest class is renamed to CompactionRequestImpl. Additionally, this change removes selectionTimeInNanos from CompactionRequest and uses selectionTime as a replacement. This means that CompactionRequest:toString is modified and compare as well. Signed-off-by: Michael Stack --- .../example/ZooKeeperScanPolicyObserver.java | 4 +- .../hbase/coprocessor/RegionObserver.java | 27 ++- .../hbase/mob/DefaultMobStoreCompactor.java | 9 +- .../hbase/regionserver/CompactSplit.java | 6 +- .../regionserver/DateTieredStoreEngine.java | 4 +- .../hadoop/hbase/regionserver/HStore.java | 28 +-- .../regionserver/RegionCoprocessorHost.java | 37 ++-- .../hbase/regionserver/StripeStoreEngine.java | 6 +- .../compactions/CompactionContext.java | 6 +- .../compactions/CompactionPolicy.java | 2 - .../compactions/CompactionRequest.java | 157 ++++------------- .../compactions/CompactionRequestImpl.java | 159 ++++++++++++++++++ .../regionserver/compactions/Compactor.java | 18 +- .../DateTieredCompactionPolicy.java | 10 +- .../DateTieredCompactionRequest.java | 2 +- .../compactions/DateTieredCompactor.java | 6 +- .../compactions/DefaultCompactor.java | 14 +- .../compactions/FIFOCompactionPolicy.java | 8 +- .../RatioBasedCompactionPolicy.java | 4 +- .../compactions/SortedCompactionPolicy.java | 6 +- .../compactions/StripeCompactionPolicy.java | 18 +- .../compactions/StripeCompactor.java | 8 +- .../security/access/AccessController.java | 5 +- ...tAvoidCellReferencesIntoShippedBlocks.java | 4 +- .../coprocessor/SimpleRegionObserver.java | 17 +- .../coprocessor/TestCoprocessorInterface.java | 7 +- .../TestRegionObserverInterface.java | 6 +- .../TestRegionObserverScannerOpenHook.java | 4 +- .../mob/compactions/TestMobCompactor.java | 4 +- .../hbase/namespace/TestNamespaceAuditor.java | 4 +- .../regionserver/NoOpScanPolicyObserver.java | 4 +- .../hbase/regionserver/TestCompaction.java | 6 +- .../regionserver/TestCompactionPolicy.java | 4 +- .../TestDefaultCompactSelection.java | 4 +- .../TestHRegionServerBulkLoad.java | 4 +- .../regionserver/TestMajorCompaction.java | 6 +- .../regionserver/TestStripeStoreEngine.java | 6 +- .../PerfTestCompactionPolicies.java | 2 +- .../compactions/TestCompactor.java | 4 +- .../compactions/TestDateTieredCompactor.java | 4 +- .../TestStripeCompactionPolicy.java | 4 +- .../security/access/TestAccessController.java | 2 +- .../hbase/util/TestCoprocessorScanPolicy.java | 4 +- 43 files changed, 382 insertions(+), 262 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 80290dd4ca5..7f2a906be81 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; @@ -228,7 +229,8 @@ public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObs @Override public InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, long earliestPutTs, - InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) throws IOException { + InternalScanner s, CompactionLifeCycleTracker tracker, CompactionRequest request, + long readPoint) throws IOException { ScanInfo scanInfo = getScanInfo(store, c.getEnvironment()); if (scanInfo == null) { // take default action diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 75c1da9fcd1..cbd6e4bd84c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList; import org.apache.hadoop.hbase.util.Pair; @@ -187,9 +188,11 @@ public interface RegionObserver { * @param store the store where compaction is being requested * @param candidates the store files currently available for compaction * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) throws IOException {} + List candidates, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException {} /** * Called after the {@link StoreFile}s to compact have been selected from the available @@ -198,9 +201,11 @@ public interface RegionObserver { * @param store the store being compacted * @param selected the store files selected to compact * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompactSelection(ObserverContext c, Store store, - ImmutableList selected, CompactionLifeCycleTracker tracker) {} + ImmutableList selected, CompactionLifeCycleTracker tracker, + CompactionRequest request) {} /** * Called prior to writing the {@link StoreFile}s selected for compaction into a new @@ -221,11 +226,13 @@ public interface RegionObserver { * @param scanner the scanner over existing data used in the store file rewriting * @param scanType type of Scan * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction * @return the scanner to use during compaction. Should not be {@code null} unless the * implementation is writing new store files on its own. */ default InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { return scanner; } @@ -245,13 +252,15 @@ public interface RegionObserver { * files * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain * @param tracker used to track the life cycle of a compaction + * @param request the requested compaction * @param readPoint the readpoint to create scanner * @return the scanner to use during compaction. {@code null} if the default implementation is to * be used. */ default InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, long earliestPutTs, - InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) throws IOException { + InternalScanner s, CompactionLifeCycleTracker tracker, CompactionRequest request, + long readPoint) throws IOException { return s; } @@ -261,9 +270,11 @@ public interface RegionObserver { * @param store the store being compacted * @param resultFile the new store file written out during compaction * @param tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompact(ObserverContext c, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker) throws IOException {} + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) + throws IOException {} /** * Called before the region is reported as closed to the master. @@ -320,7 +331,7 @@ public interface RegionObserver { * coprocessors * @param c the environment provided by the region server * @param get the Get request - * @param exists + * @param exists the result returned by the region server * @return the value to return to the client if bypassing default processing */ default boolean preExists(ObserverContext c, Get get, @@ -799,8 +810,8 @@ public interface RegionObserver { *

* See {@link #preFlushScannerOpen(ObserverContext, Store, List, InternalScanner, long)} * and {@link #preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, - * InternalScanner, CompactionLifeCycleTracker, long)} to override scanners created for flushes - * or compactions, resp. + * InternalScanner, CompactionLifeCycleTracker, CompactionRequest, long)} to override scanners + * created for flushes or compactions, resp. *

* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors. * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index e8ada97c72e..502a446b297 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.HMobStore; import org.apache.hadoop.hbase.regionserver.HStore; @@ -41,17 +40,17 @@ import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.ShipperListener; -import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreScanner; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputControlUtil; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * Compact passed set of files in the mob-enabled column family. @@ -66,7 +65,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { private final InternalScannerFactory scannerFactory = new InternalScannerFactory() { @Override - public ScanType getScanType(CompactionRequest request) { + public ScanType getScanType(CompactionRequestImpl request) { // retain the delete markers until they are expired. return ScanType.COMPACT_RETAIN_DELETES; } @@ -105,7 +104,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { } @Override - public List compact(CompactionRequest request, ThroughputController throughputController, + public List compact(CompactionRequestImpl request, ThroughputController throughputController, User user) throws IOException { return compact(request, scannerFactory, writerFactory, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index e193dcb95bf..f37e49e50d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; @@ -393,7 +393,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { private static final Comparator COMPARATOR = new Comparator() { - private int compare(CompactionRequest r1, CompactionRequest r2) { + private int compare(CompactionRequestImpl r1, CompactionRequestImpl r2) { if (r1 == r2) { return 0; //they are the same request } @@ -402,7 +402,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { if (cmp != 0) { return cmp; } - cmp = Long.compare(r1.getSelectionNanoTime(), r2.getSelectionNanoTime()); + cmp = Long.compare(r1.getSelectionTime(), r2.getSelectionTime()); if (cmp != 0) { return cmp; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java index f7c18f94719..4539ed625e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java @@ -23,9 +23,9 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactor; @@ -81,7 +81,7 @@ public class DateTieredStoreEngine extends StoreEngine sfs = null; - CompactionRequest cr = compaction.getRequest(); + CompactionRequestImpl cr = compaction.getRequest(); try { - // Do all sanity checking in here if we have a valid CompactionRequest + // Do all sanity checking in here if we have a valid CompactionRequestImpl // because we need to clean up after it on the way out in a finally // block below long compactionStartTime = EnvironmentEdgeManager.currentTime(); @@ -1387,7 +1387,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat return sfs; } // Do the steps necessary to complete the compaction. - sfs = moveCompatedFilesIntoPlace(cr, newFiles, user); + sfs = moveCompactedFilesIntoPlace(cr, newFiles, user); writeCompactionWalRecord(filesToCompact, sfs); replaceStoreFiles(filesToCompact, sfs); if (cr.isMajor()) { @@ -1417,14 +1417,14 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } } - private List moveCompatedFilesIntoPlace(CompactionRequest cr, List newFiles, + private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, List newFiles, User user) throws IOException { List sfs = new ArrayList<>(newFiles.size()); for (Path newFile : newFiles) { assert newFile != null; HStoreFile sf = moveFileIntoPlace(newFile); if (this.getCoprocessorHost() != null) { - getCoprocessorHost().postCompact(this, sf, cr.getTracker(), user); + getCoprocessorHost().postCompact(this, sf, cr.getTracker(), cr, user); } assert sf != null; sfs.add(sf); @@ -1483,7 +1483,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat * @param compactionStartTime Start time. */ private void logCompactionEndMessage( - CompactionRequest cr, List sfs, long now, long compactionStartTime) { + CompactionRequestImpl cr, List sfs, long now, long compactionStartTime) { StringBuilder message = new StringBuilder( "Completed" + (cr.isMajor() ? " major" : "") + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in " @@ -1625,7 +1625,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat // Move the compaction into place. HStoreFile sf = moveFileIntoPlace(newFile); if (this.getCoprocessorHost() != null) { - this.getCoprocessorHost().postCompact(this, sf, null, null); + this.getCoprocessorHost().postCompact(this, sf, null, null, null); } replaceStoreFiles(filesToCompact, Collections.singletonList(sf)); completeCompaction(filesToCompact); @@ -1674,7 +1674,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat removeUnneededFiles(); final CompactionContext compaction = storeEngine.createCompaction(); - CompactionRequest request = null; + CompactionRequestImpl request = null; this.lock.readLock().lock(); try { synchronized (filesCompacting) { @@ -1682,11 +1682,12 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat if (this.getCoprocessorHost() != null) { final List candidatesForCoproc = compaction.preSelect(this.filesCompacting); boolean override = false; + //TODO: is it correct way to get CompactionRequest? override = getCoprocessorHost().preCompactSelection(this, candidatesForCoproc, - tracker, user); + tracker, null, user); if (override) { // Coprocessor is overriding normal file selection. - compaction.forceSelect(new CompactionRequest(candidatesForCoproc)); + compaction.forceSelect(new CompactionRequestImpl(candidatesForCoproc)); } } @@ -1712,7 +1713,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCompactSelection( - this, ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker, user); + this, ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker, + compaction.getRequest(), user); } // Finally, we have the resulting files list. Check if we have any files at all. request = compaction.getRequest(); @@ -1790,7 +1792,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat finishCompactionRequest(compaction.getRequest()); } - private void finishCompactionRequest(CompactionRequest cr) { + private void finishCompactionRequest(CompactionRequestImpl cr) { this.region.reportCompactionRequestEnd(cr.isMajor(), cr.getFiles().size(), cr.getSize()); if (cr.isOffPeak()) { offPeakCompactionTracker.set(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 1d9abca3dbd..b76980d2cd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList; @@ -544,11 +545,12 @@ public class RegionCoprocessorHost /** * See * {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, - * InternalScanner, CompactionLifeCycleTracker, long)} + * InternalScanner, CompactionLifeCycleTracker, CompactionRequest, long)} */ public InternalScanner preCompactScannerOpen(final HStore store, final List scanners, final ScanType scanType, final long earliestPutTs, - final CompactionLifeCycleTracker tracker, final User user, final long readPoint) + final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user, + final long readPoint) throws IOException { return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : new ObserverOperationWithResult( @@ -556,7 +558,7 @@ public class RegionCoprocessorHost @Override public InternalScanner call(RegionObserver observer) throws IOException { return observer.preCompactScannerOpen(this, store, scanners, scanType, - earliestPutTs, getResult(), tracker, readPoint); + earliestPutTs, getResult(), tracker, request, readPoint); } }); } @@ -567,15 +569,18 @@ public class RegionCoprocessorHost * @param store The store where compaction is being requested * @param candidates The currently available store files * @param tracker used to track the life cycle of a compaction + * @param request the compaction request + * @param user the user * @return If {@code true}, skip the normal selection process and use the current list * @throws IOException */ public boolean preCompactSelection(final HStore store, final List candidates, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + final CompactionLifeCycleTracker tracker, final CompactionRequest request, + final User user) throws IOException { return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { - observer.preCompactSelection(this, store, candidates, tracker); + observer.preCompactSelection(this, store, candidates, tracker, request); } }); } @@ -586,13 +591,16 @@ public class RegionCoprocessorHost * @param store The store where compaction is being requested * @param selected The store files selected to compact * @param tracker used to track the life cycle of a compaction + * @param request the compaction request + * @param user the user */ public void postCompactSelection(final HStore store, final ImmutableList selected, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + final CompactionLifeCycleTracker tracker, final CompactionRequest request, + final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { - observer.postCompactSelection(this, store, selected, tracker); + observer.postCompactSelection(this, store, selected, tracker, request); } }); } @@ -603,17 +611,19 @@ public class RegionCoprocessorHost * @param scanner the scanner used to read store data during compaction * @param scanType type of Scan * @param tracker used to track the life cycle of a compaction + * @param request the compaction request + * @param user the user * @throws IOException */ public InternalScanner preCompact(final HStore store, final InternalScanner scanner, - final ScanType scanType, final CompactionLifeCycleTracker tracker, final User user) - throws IOException { + final ScanType scanType, final CompactionLifeCycleTracker tracker, + final CompactionRequest request, final User user) throws IOException { return execOperationWithResult(false, scanner, coprocEnvironments.isEmpty() ? null : new ObserverOperationWithResult( regionObserverGetter, user) { @Override public InternalScanner call(RegionObserver observer) throws IOException { - return observer.preCompact(this, store, getResult(), scanType, tracker); + return observer.preCompact(this, store, getResult(), scanType, tracker, request); } }); } @@ -623,14 +633,17 @@ public class RegionCoprocessorHost * @param store the store being compacted * @param resultFile the new store file written during compaction * @param tracker used to track the life cycle of a compaction + * @param request the compaction request + * @param user the user * @throws IOException */ public void postCompact(final HStore store, final HStoreFile resultFile, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) + throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { - observer.postCompact(this, store, resultFile, tracker); + observer.postCompact(this, store, resultFile, tracker, request); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index 39f142f5ca7..8c2636355cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; @@ -84,12 +84,12 @@ public class StripeStoreEngine extends StoreEngine()) : this.stripeRequest.getRequest(); + ? new CompactionRequestImpl(new ArrayList<>()) : this.stripeRequest.getRequest(); return this.stripeRequest != null; } @Override - public void forceSelect(CompactionRequest request) { + public void forceSelect(CompactionRequestImpl request) { super.forceSelect(request); if (this.stripeRequest != null) { this.stripeRequest.setRequest(this.request); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java index d0b0731d77e..9aa383c4e66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java @@ -36,7 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public abstract class CompactionContext { - protected CompactionRequest request = null; + protected CompactionRequestImpl request = null; /** * Called before coprocessor preCompactSelection and should filter the candidates @@ -61,14 +61,14 @@ public abstract class CompactionContext { * Forces external selection to be applied for this compaction. * @param request The pre-cooked request with selection and other settings. */ - public void forceSelect(CompactionRequest request) { + public void forceSelect(CompactionRequestImpl request) { this.request = request; } public abstract List compact(ThroughputController throughputController, User user) throws IOException; - public CompactionRequest getRequest() { + public CompactionRequestImpl getRequest() { assert hasSelection(); return this.request; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java index a2778d34838..755b9d39cb2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java @@ -62,8 +62,6 @@ public abstract class CompactionPolicy { this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo); } - - /** * @return The current compaction configuration settings. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 1e2f18a31b9..73f36837f9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -18,142 +18,51 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; -import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY; - -import java.util.Collection; -import java.util.stream.Collectors; - -import org.apache.hadoop.hbase.regionserver.HStoreFile; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +import java.util.Collection; /** - * This class holds all logical details necessary to run a compaction. + * Coprocessors use this interface to get details about compaction. */ -@InterfaceAudience.Private -public class CompactionRequest { - - // was this compaction promoted to an off-peak - private boolean isOffPeak = false; - private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } - private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; - private int priority = NO_PRIORITY; - private Collection filesToCompact; - - // CompactRequest object creation time. - private long selectionTime; - // System time used to compare objects in FIFO order. TODO: maybe use selectionTime? - private long timeInNanos; - private String regionName = ""; - private String storeName = ""; - private long totalSize = -1L; - private CompactionLifeCycleTracker tracker = CompactionLifeCycleTracker.DUMMY; - - public CompactionRequest(Collection files) { - this.selectionTime = EnvironmentEdgeManager.currentTime(); - this.timeInNanos = System.nanoTime(); - this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null"); - recalculateSize(); - } - - public void updateFiles(Collection files) { - this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null"); - recalculateSize(); - } - - public Collection getFiles() { - return this.filesToCompact; - } +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public interface CompactionRequest { /** - * Sets the region/store name, for logging. + * @return unmodifiable collection of StoreFiles in compaction */ - public void setDescription(String regionName, String storeName) { - this.regionName = regionName; - this.storeName = storeName; - } - - /** Gets the total size of all StoreFiles in compaction */ - public long getSize() { - return totalSize; - } - - public boolean isAllFiles() { - return this.isMajor == DisplayCompactionType.MAJOR - || this.isMajor == DisplayCompactionType.ALL_FILES; - } - - public boolean isMajor() { - return this.isMajor == DisplayCompactionType.MAJOR; - } - - /** Gets the priority for the request */ - public int getPriority() { - return priority; - } - - /** Sets the priority for the request */ - public void setPriority(int p) { - this.priority = p; - } - - public boolean isOffPeak() { - return this.isOffPeak; - } - - public void setOffPeak(boolean value) { - this.isOffPeak = value; - } - - public long getSelectionTime() { - return this.selectionTime; - } - - public long getSelectionNanoTime() { - return this.timeInNanos; - } + Collection getFiles(); /** - * Specify if this compaction should be a major compaction based on the state of the store - * @param isMajor true if the system determines that this compaction should be a major - * compaction + * @return total size of all StoreFiles in compaction */ - public void setIsMajor(boolean isMajor, boolean isAllFiles) { - assert isAllFiles || !isMajor; - this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR - : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); - } - - public void setTracker(CompactionLifeCycleTracker tracker) { - this.tracker = tracker; - } - - public CompactionLifeCycleTracker getTracker() { - return tracker; - } - - @Override - public String toString() { - String fsList = filesToCompact.stream().filter(f -> f.getReader() != null) - .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) - .collect(Collectors.joining(", ")); - - return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + - this.getFiles().size() + ", fileSize=" + - TraditionalBinaryPrefix.long2String(totalSize, "", 1) + - ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + - timeInNanos; - } + long getSize(); /** - * Recalculate the size of the compaction based on current files. - * @param files files that should be included in the compaction + * @return true if major compaction or all files are compacted */ - private void recalculateSize() { - this.totalSize = filesToCompact.stream().map(HStoreFile::getReader) - .mapToLong(r -> r != null ? r.length() : 0L).sum(); - } + boolean isAllFiles(); + + /** + * @return true if major compaction + */ + boolean isMajor(); + + /** + * @return priority of compaction request + */ + int getPriority(); + + /** + * @return true if compaction is Off-peak + */ + boolean isOffPeak(); + + /** + * @return compaction request creation time in milliseconds + */ + long getSelectionTime(); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java new file mode 100644 index 00000000000..0c32a17e8ed --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java @@ -0,0 +1,159 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.compactions; + +import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY; + +import java.util.Collection; +import java.util.Collections; +import java.util.stream.Collectors; + +import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; + +/** + * This class holds all logical details necessary to run a compaction. + */ +@InterfaceAudience.Private +public class CompactionRequestImpl implements CompactionRequest { + + // was this compaction promoted to an off-peak + private boolean isOffPeak = false; + private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } + private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; + private int priority = NO_PRIORITY; + private Collection filesToCompact; + + // CompactRequest object creation time. + private long selectionTime; + private String regionName = ""; + private String storeName = ""; + private long totalSize = -1L; + private CompactionLifeCycleTracker tracker = CompactionLifeCycleTracker.DUMMY; + + public CompactionRequestImpl(Collection files) { + this.selectionTime = EnvironmentEdgeManager.currentTime(); + this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null"); + recalculateSize(); + } + + public void updateFiles(Collection files) { + this.filesToCompact = Preconditions.checkNotNull(files, "files for compaction can not null"); + recalculateSize(); + } + + @Override + public Collection getFiles() { + return Collections.unmodifiableCollection(this.filesToCompact); + } + + /** + * Sets the region/store name, for logging. + */ + public void setDescription(String regionName, String storeName) { + this.regionName = regionName; + this.storeName = storeName; + } + + /** Gets the total size of all StoreFiles in compaction */ + @Override + public long getSize() { + return totalSize; + } + + @Override + public boolean isAllFiles() { + return this.isMajor == DisplayCompactionType.MAJOR + || this.isMajor == DisplayCompactionType.ALL_FILES; + } + + @Override + public boolean isMajor() { + return this.isMajor == DisplayCompactionType.MAJOR; + } + + /** Gets the priority for the request */ + @Override + public int getPriority() { + return priority; + } + + /** Sets the priority for the request */ + public void setPriority(int p) { + this.priority = p; + } + + @Override + public boolean isOffPeak() { + return this.isOffPeak; + } + + public void setOffPeak(boolean value) { + this.isOffPeak = value; + } + + @Override + public long getSelectionTime() { + return this.selectionTime; + } + + /** + * Specify if this compaction should be a major compaction based on the state of the store + * @param isMajor true if the system determines that this compaction should be a major + * compaction + */ + public void setIsMajor(boolean isMajor, boolean isAllFiles) { + assert isAllFiles || !isMajor; + this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR + : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); + } + + public void setTracker(CompactionLifeCycleTracker tracker) { + this.tracker = tracker; + } + + public CompactionLifeCycleTracker getTracker() { + return tracker; + } + + @Override + public String toString() { + String fsList = filesToCompact.stream().filter(f -> f.getReader() != null) + .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) + .collect(Collectors.joining(", ")); + + return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + + this.getFiles().size() + ", fileSize=" + + TraditionalBinaryPrefix.long2String(totalSize, "", 1) + + ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + + selectionTime; + } + + /** + * Recalculate the size of the compaction based on current files. + */ + private void recalculateSize() { + this.totalSize = filesToCompact.stream().map(HStoreFile::getReader) + .mapToLong(r -> r != null ? r.length() : 0L).sum(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 5865ed56651..7ca3ab4a2bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -229,7 +229,7 @@ public abstract class Compactor { protected interface InternalScannerFactory { - ScanType getScanType(CompactionRequest request); + ScanType getScanType(CompactionRequestImpl request); InternalScanner createScanner(List scanners, ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException; @@ -238,7 +238,7 @@ public abstract class Compactor { protected final InternalScannerFactory defaultScannerFactory = new InternalScannerFactory() { @Override - public ScanType getScanType(CompactionRequest request) { + public ScanType getScanType(CompactionRequestImpl request) { return request.isAllFiles() ? ScanType.COMPACT_DROP_DELETES : ScanType.COMPACT_RETAIN_DELETES; } @@ -267,7 +267,7 @@ public abstract class Compactor { /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind); } - protected List compact(final CompactionRequest request, + protected List compact(final CompactionRequestImpl request, InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, ThroughputController throughputController, User user) throws IOException { FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles()); @@ -325,8 +325,8 @@ public abstract class Compactor { return commitWriter(writer, fd, request); } - protected abstract List commitWriter(T writer, FileDetails fd, CompactionRequest request) - throws IOException; + protected abstract List commitWriter(T writer, FileDetails fd, + CompactionRequestImpl request) throws IOException; protected abstract void abortWriter(T writer) throws IOException; @@ -340,14 +340,14 @@ public abstract class Compactor { * @param readPoint the read point to help create scanner by Coprocessor if required. * @return Scanner override by coprocessor; null if not overriding. */ - protected InternalScanner preCreateCoprocScanner(CompactionRequest request, ScanType scanType, + protected InternalScanner preCreateCoprocScanner(CompactionRequestImpl request, ScanType scanType, long earliestPutTs, List scanners, User user, long readPoint) throws IOException { if (store.getCoprocessorHost() == null) { return null; } return store.getCoprocessorHost().preCompactScannerOpen(store, scanners, scanType, - earliestPutTs, request.getTracker(), user, readPoint); + earliestPutTs, request.getTracker(), request, user, readPoint); } /** @@ -357,13 +357,13 @@ public abstract class Compactor { * @param scanner The default scanner created for compaction. * @return Scanner scanner to use (usually the default); null if compaction should not proceed. */ - protected InternalScanner postCreateCoprocScanner(CompactionRequest request, ScanType scanType, + protected InternalScanner postCreateCoprocScanner(CompactionRequestImpl request, ScanType scanType, InternalScanner scanner, User user) throws IOException { if (store.getCoprocessorHost() == null) { return scanner; } return store.getCoprocessorHost().preCompact(store, scanner, scanType, request.getTracker(), - user); + request, user); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index a4cc65c3785..a0c3e309c98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -186,9 +186,9 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } @Override - protected CompactionRequest createCompactionRequest(ArrayList candidateSelection, + protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { - CompactionRequest result = tryingMajor ? selectMajorCompaction(candidateSelection) + CompactionRequestImpl result = tryingMajor ? selectMajorCompaction(candidateSelection) : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); if (LOG.isDebugEnabled()) { LOG.debug("Generated compaction request: " + result); @@ -196,7 +196,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { return result; } - public CompactionRequest selectMajorCompaction(ArrayList candidateSelection) { + public CompactionRequestImpl selectMajorCompaction(ArrayList candidateSelection) { long now = EnvironmentEdgeManager.currentTime(); return new DateTieredCompactionRequest(candidateSelection, this.getCompactBoundariesForMajor(candidateSelection, now)); @@ -210,7 +210,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. */ - public CompactionRequest selectMinorCompaction(ArrayList candidateSelection, + public CompactionRequestImpl selectMinorCompaction(ArrayList candidateSelection, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { long now = EnvironmentEdgeManager.currentTime(); long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); @@ -261,7 +261,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } } // A non-null file list is expected by HStore - return new CompactionRequest(Collections.emptyList()); + return new CompactionRequestImpl(Collections.emptyList()); } private DateTieredCompactionRequest generateCompactionRequest(ArrayList storeFiles, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java index 5fddf33ebff..03571d51f0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", justification="It is intended to use the same equal method as superclass") -public class DateTieredCompactionRequest extends CompactionRequest { +public class DateTieredCompactionRequest extends CompactionRequestImpl { private List boundaries; public DateTieredCompactionRequest(Collection files, List boundaryList) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index d0beed08e9e..09dda90d905 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -45,7 +45,7 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor compact(final CompactionRequest request, final List lowerBoundaries, + public List compact(final CompactionRequestImpl request, final List lowerBoundaries, ThroughputController throughputController, User user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing compaction with " + lowerBoundaries.size() @@ -77,7 +77,7 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor commitWriter(DateTieredMultiFileWriter writer, FileDetails fd, - CompactionRequest request) throws IOException { + CompactionRequestImpl request) throws IOException { return writer.commitWriters(fd.maxSeqId, request.isAllFiles()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 08951b4d579..14539b0e2ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; /** * Compact passed set of files. Create an instance and then call - * {@link #compact(CompactionRequest, ThroughputController, User)} + * {@link #compact(CompactionRequestImpl, ThroughputController, User)} */ @InterfaceAudience.Private public class DefaultCompactor extends Compactor { @@ -61,16 +61,16 @@ public class DefaultCompactor extends Compactor { /** * Do a minor/major compaction on an explicit set of storefiles from a Store. */ - public List compact(final CompactionRequest request, + public List compact(final CompactionRequestImpl request, ThroughputController throughputController, User user) throws IOException { return compact(request, defaultScannerFactory, writerFactory, throughputController, user); } /** - * Compact a list of files for testing. Creates a fake {@link CompactionRequest} to pass to - * {@link #compact(CompactionRequest, ThroughputController, User)}; + * Compact a list of files for testing. Creates a fake {@link CompactionRequestImpl} to pass to + * {@link #compact(CompactionRequestImpl, ThroughputController, User)}; * @param filesToCompact the files to compact. These are used as the compactionSelection for the - * generated {@link CompactionRequest}. + * generated {@link CompactionRequestImpl}. * @param isMajor true to major compact (prune all deletes, max versions, etc) * @return Product of compaction or an empty list if all cells expired or deleted and nothing \ * made it through the compaction. @@ -78,14 +78,14 @@ public class DefaultCompactor extends Compactor { */ public List compactForTesting(Collection filesToCompact, boolean isMajor) throws IOException { - CompactionRequest cr = new CompactionRequest(filesToCompact); + CompactionRequestImpl cr = new CompactionRequestImpl(filesToCompact); cr.setIsMajor(isMajor, isMajor); return compact(cr, NoLimitThroughputController.INSTANCE, null); } @Override protected List commitWriter(StoreFileWriter writer, FileDetails fd, - CompactionRequest request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = Lists.newArrayList(writer.getPath()); writer.appendMetadata(fd.maxSeqId, request.isAllFiles()); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index a0609bc6930..032a9c614d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -55,7 +55,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { } @Override - public CompactionRequest selectCompaction(Collection candidateFiles, + public CompactionRequestImpl selectCompaction(Collection candidateFiles, List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { if(forceMajor){ @@ -67,10 +67,10 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); } - + // Nothing to compact Collection toCompact = getExpiredStores(candidateFiles, filesCompacting); - CompactionRequest result = new CompactionRequest(toCompact); + CompactionRequestImpl result = new CompactionRequestImpl(toCompact); return result; } @@ -123,7 +123,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { if (maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)){ continue; - } else if(filesCompacting == null || filesCompacting.contains(sf) == false){ + } else if(filesCompacting == null || !filesCompacting.contains(sf)){ expiredStores.add(sf); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 172f17a2dfa..98f87e61a53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -113,7 +113,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { } @Override - protected CompactionRequest createCompactionRequest(ArrayList + protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (!tryingMajor) { @@ -122,7 +122,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { candidateSelection = checkMinFilesCriteria(candidateSelection, comConf.getMinFilesToCompact()); } - return new CompactionRequest(candidateSelection); + return new CompactionRequestImpl(candidateSelection); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index facc161f608..f284489eaaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -53,7 +53,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * on seqId for data consistency. * @return subset copy of candidate list that meets compaction criteria */ - public CompactionRequest selectCompaction(Collection candidateFiles, + public CompactionRequestImpl selectCompaction(Collection candidateFiles, List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { // Preliminary compaction subject to filters @@ -85,7 +85,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { // Or, if there are any references among the candidates. boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection); - CompactionRequest result = createCompactionRequest(candidateSelection, + CompactionRequestImpl result = createCompactionRequest(candidateSelection, isTryingMajor || isAfterSplit, mayUseOffPeak, mayBeStuck); ArrayList filesToCompact = Lists.newArrayList(result.getFiles()); @@ -99,7 +99,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { return result; } - protected abstract CompactionRequest createCompactionRequest( + protected abstract CompactionRequestImpl createCompactionRequest( ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 9dc8246f261..b6de6783bcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -74,7 +74,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { } public StripeCompactionRequest createEmptyRequest( - StripeInformationProvider si, CompactionRequest request) { + StripeInformationProvider si, CompactionRequestImpl request) { // Treat as L0-ish compaction with fixed set of files, and hope for the best. if (si.getStripeCount() > 0) { return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries()); @@ -376,7 +376,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { /** Stripe compaction request wrapper. */ public abstract static class StripeCompactionRequest { - protected CompactionRequest request; + protected CompactionRequestImpl request; protected byte[] majorRangeFromRow = null, majorRangeToRow = null; public List execute(StripeCompactor compactor, @@ -392,7 +392,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { public abstract List execute(StripeCompactor compactor, ThroughputController throughputController, User user) throws IOException; - public StripeCompactionRequest(CompactionRequest request) { + public StripeCompactionRequest(CompactionRequestImpl request) { this.request = request; } @@ -407,11 +407,11 @@ public class StripeCompactionPolicy extends CompactionPolicy { this.majorRangeToRow = endRow; } - public CompactionRequest getRequest() { + public CompactionRequestImpl getRequest() { return this.request; } - public void setRequest(CompactionRequest request) { + public void setRequest(CompactionRequestImpl request) { assert request != null; this.request = request; this.majorRangeFromRow = this.majorRangeToRow = null; @@ -429,7 +429,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { * @param request Original request. * @param targetBoundaries New files should be written with these boundaries. */ - public BoundaryStripeCompactionRequest(CompactionRequest request, + public BoundaryStripeCompactionRequest(CompactionRequestImpl request, List targetBoundaries) { super(request); this.targetBoundaries = targetBoundaries; @@ -437,7 +437,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { public BoundaryStripeCompactionRequest(Collection files, List targetBoundaries) { - this(new CompactionRequest(files), targetBoundaries); + this(new CompactionRequestImpl(files), targetBoundaries); } @Override @@ -467,7 +467,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than * total number of kvs, all the overflow data goes into the last stripe. */ - public SplitStripeCompactionRequest(CompactionRequest request, + public SplitStripeCompactionRequest(CompactionRequestImpl request, byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { super(request); this.startRow = startRow; @@ -483,7 +483,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { public SplitStripeCompactionRequest(Collection files, byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { - this(new CompactionRequest(files), startRow, endRow, targetCount, targetKvs); + this(new CompactionRequestImpl(files), startRow, endRow, targetCount, targetKvs); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index f552f9684e4..f4836a8d709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -58,7 +58,7 @@ public class StripeCompactor extends AbstractMultiOutputCompactor compact(CompactionRequest request, final List targetBoundaries, + public List compact(CompactionRequestImpl request, final List targetBoundaries, final byte[] majorRangeFromRow, final byte[] majorRangeToRow, ThroughputController throughputController, User user) throws IOException { if (LOG.isDebugEnabled()) { @@ -101,7 +101,7 @@ public class StripeCompactor extends AbstractMultiOutputCompactor compact(CompactionRequest request, final int targetCount, final long targetSize, + public List compact(CompactionRequestImpl request, final int targetCount, final long targetSize, final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow, ThroughputController throughputController, User user) throws IOException { if (LOG.isDebugEnabled()) { @@ -125,7 +125,7 @@ public class StripeCompactor extends AbstractMultiOutputCompactor commitWriter(StripeMultiFileWriter writer, FileDetails fd, - CompactionRequest request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor()); assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata."; return newFiles; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index c8a089b43a7..83f8b3b37da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.security.AccessDeniedException; @@ -1572,8 +1573,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, @Override public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) - throws IOException { + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { requirePermission(getActiveUser(c), "compact", getTableName(c.getEnvironment()), null, null, Action.ADMIN, Action.CREATE); return scanner; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index 2d99e00b5c8..5dee0e39cd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -265,7 +266,8 @@ public class TestAvoidCellReferencesIntoShippedBlocks { @Override public InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, - long earliestPutTs, InternalScanner s, CompactionLifeCycleTracker request, long readPoint) + long earliestPutTs, InternalScanner s, CompactionLifeCycleTracker tracker, + CompactionRequest request, long readPoint) throws IOException { return createCompactorScanner((HStore) store, scanners, scanType, earliestPutTs); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 727fa398c08..71ea6ffdd44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALEdit; @@ -202,20 +203,22 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { @Override public void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) throws IOException { + List candidates, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { ctPreCompactSelect.incrementAndGet(); } @Override public void postCompactSelection(ObserverContext c, Store store, - ImmutableList selected, CompactionLifeCycleTracker tracker) { + ImmutableList selected, CompactionLifeCycleTracker tracker, + CompactionRequest request) { ctPostCompactSelect.incrementAndGet(); } @Override public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) - throws IOException { + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { ctPreCompact.incrementAndGet(); return scanner; } @@ -223,14 +226,16 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { @Override public InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, long earliestPutTs, - InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) throws IOException { + InternalScanner s, CompactionLifeCycleTracker tracker, CompactionRequest request, + long readPoint) throws IOException { ctPreCompactScanner.incrementAndGet(); return s; } @Override public void postCompact(ObserverContext c, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker) throws IOException { + StoreFile resultFile, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { ctPostCompact.incrementAndGet(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index 8c5dbae604e..ecb3f244555 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Rule; @@ -201,13 +202,15 @@ public class TestCoprocessorInterface { } @Override public InternalScanner preCompact(ObserverContext e, - Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) { + Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) { preCompactCalled = true; return scanner; } @Override public void postCompact(ObserverContext e, - Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker) { + Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, + CompactionRequest request) { postCompactCalled = true; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 88e548a1a45..e6c1da95f93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; @@ -423,7 +424,8 @@ public class TestRegionObserverInterface { @Override public InternalScanner preCompact(ObserverContext e, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) { + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) { return new InternalScanner() { @Override public boolean next(List results) throws IOException { @@ -462,7 +464,7 @@ public class TestRegionObserverInterface { @Override public void postCompact(ObserverContext e, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker) { + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) { lastCompaction = EnvironmentEdgeManager.currentTime(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 2c9ab201cc4..0a95f4161d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; @@ -177,7 +178,8 @@ public class TestRegionObserverScannerOpenHook { @Override public InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, - long earliestPutTs, InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) + long earliestPutTs, InternalScanner s, CompactionLifeCycleTracker tracker, + CompactionRequest request, long readPoint) throws IOException { scanners.forEach(KeyValueScanner::close); return NO_DATA; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index e0d9fa24bbc..7add5d2d224 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -730,7 +731,8 @@ public class TestMobCompactor { @Override public void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) + List candidates, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { int count = candidates.size(); if (count >= 2) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 47e37833057..2140cc1dc7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.quotas.QuotaUtil; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -465,7 +466,8 @@ public class TestNamespaceAuditor { @Override public void postCompact(ObserverContext e, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker) throws IOException { + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) + throws IOException { postCompact.countDown(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java index bde28a2410f..2b98cf23630 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; /** * RegionObserver that just reimplements the default behavior, @@ -69,7 +70,8 @@ public class NoOpScanPolicyObserver implements RegionCoprocessor, RegionObserver public InternalScanner preCompactScannerOpen( final ObserverContext c, Store store, List scanners, ScanType scanType, long earliestPutTs, - InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) throws IOException { + InternalScanner s, CompactionLifeCycleTracker tracker, CompactionRequest request, + long readPoint) throws IOException { HStore hs = (HStore) store; // this demonstrates how to override the scanners default behavior ScanInfo oldSI = hs.getScanInfo(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 349815f324e..a9f331e6f41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; @@ -415,7 +415,7 @@ public class TestCompaction { @Override public boolean select(List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { - this.request = new CompactionRequest(selectedFiles); + this.request = new CompactionRequestImpl(selectedFiles); this.request.setPriority(getPriority()); return true; } @@ -496,7 +496,7 @@ public class TestCompaction { @Override public boolean select(List f, boolean i, boolean m, boolean e) throws IOException { - this.request = new CompactionRequest(new ArrayList<>()); + this.request = new CompactionRequestImpl(new ArrayList<>()); return true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java index 6ae10ec33de..1e9fd409ce1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.util.Bytes; @@ -192,7 +192,7 @@ public class TestCompactionPolicy { long... expected) throws IOException { store.forceMajor = forcemajor; // Test Default compactions - CompactionRequest result = + CompactionRequestImpl result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction( candidates, new ArrayList<>(), false, isOffPeak, forcemajor); List actual = new ArrayList<>(result.getFiles()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index 3b4ce462ce2..c6c0bdc440c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -22,7 +22,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -173,7 +173,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy { } } // Test Default compactions - CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine + CompactionRequestImpl result = ((RatioBasedCompactionPolicy) store.storeEngine .getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, false, false); Assert.assertTrue(result.getFiles().isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 8c604bf4cdc..53d9741e96d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -262,7 +263,8 @@ public class TestHRegionServerBulkLoad { @Override public InternalScanner preCompact(ObserverContext e, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { try { Thread.sleep(sleepDuration); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 71f18c0427e..323564e3d1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -417,7 +417,7 @@ public class TestMajorCompaction { } store.triggerMajorCompaction(); - CompactionRequest request = store.requestCompaction().get().getRequest(); + CompactionRequestImpl request = store.requestCompaction().get().getRequest(); assertNotNull("Expected to receive a compaction request", request); assertEquals( "System-requested major compaction should not occur if there are too many store files", @@ -436,7 +436,7 @@ public class TestMajorCompaction { createStoreFile(r); } store.triggerMajorCompaction(); - CompactionRequest request = + CompactionRequestImpl request = store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get() .getRequest(); assertNotNull("Expected to receive a compaction request", request); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index a562af86979..41f124d4444 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; @@ -76,7 +76,7 @@ public class TestStripeStoreEngine { StripeCompactor mockCompactor = mock(StripeCompactor.class); se.setCompactorOverride(mockCompactor); when( - mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), + mockCompactor.compact(any(CompactionRequestImpl.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class), any(ThroughputController.class), any(User.class))) .thenReturn(new ArrayList<>()); @@ -92,7 +92,7 @@ public class TestStripeStoreEngine { // Override the file list. Granted, overriding this compaction in this manner will // break things in real world, but we only want to verify the override. compactUs.remove(sf); - CompactionRequest req = new CompactionRequest(compactUs); + CompactionRequestImpl req = new CompactionRequestImpl(compactUs); compaction.forceSelect(req); assertEquals(2, compaction.getRequest().getFiles().size()); assertFalse(compaction.getRequest().getFiles().contains(sf)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 55546833ca8..7cdd24d4982 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -169,7 +169,7 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { private List runIteration(List startingStoreFiles) throws IOException { List storeFiles = new ArrayList<>(startingStoreFiles); - CompactionRequest req = cp.selectCompaction( + CompactionRequestImpl req = cp.selectCompaction( storeFiles, new ArrayList<>(), false, false, false); long newFileSize = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index eb4801a8261..2acf1da757d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -71,8 +71,8 @@ public class TestCompactor { return sf; } - public static CompactionRequest createDummyRequest() throws Exception { - return new CompactionRequest(Arrays.asList(createDummyStoreFile(1L))); + public static CompactionRequestImpl createDummyRequest() throws Exception { + return new CompactionRequestImpl(Arrays.asList(createDummyStoreFile(1L))); } // StoreFile.Writer has private ctor and is unwieldy, so this has to be convoluted. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 04b0ba9377b..c1a9c29fc38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -130,7 +130,7 @@ public class TestDateTieredCompactor { HStoreFile sf1 = createDummyStoreFile(1L); HStoreFile sf2 = createDummyStoreFile(2L); DateTieredCompactor dtc = createCompactor(writers, input, Arrays.asList(sf1, sf2)); - List paths = dtc.compact(new CompactionRequest(Arrays.asList(sf1)), + List paths = dtc.compact(new CompactionRequestImpl(Arrays.asList(sf1)), boundaries.subList(0, boundaries.size() - 1), NoLimitThroughputController.INSTANCE, null); writers.verifyKvs(output, allFiles, boundaries); if (allFiles) { @@ -156,7 +156,7 @@ public class TestDateTieredCompactor { @Test public void testEmptyOutputFile() throws Exception { StoreFileWritersCapture writers = new StoreFileWritersCapture(); - CompactionRequest request = createDummyRequest(); + CompactionRequestImpl request = createDummyRequest(); DateTieredCompactor dtc = createCompactor(writers, new KeyValue[0], new ArrayList<>(request.getFiles())); List paths = dtc.compact(request, Arrays.asList(Long.MIN_VALUE, Long.MAX_VALUE), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 1249fee6ced..f3cb2938252 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -230,7 +230,9 @@ public class TestStripeCompactionPolicy { assertTrue(policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); - assertEquals(si.getStorefiles(), scr.getRequest().getFiles()); + // UnmodifiableCollection does not implement equals so we need to change it here to a + // collection that implements it. + assertEquals(si.getStorefiles(), new ArrayList<>(scr.getRequest().getFiles())); scr.execute(sc, NoLimitThroughputController.INSTANCE, null); verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 81c11fd4891..0fe5b2e2f48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -874,7 +874,7 @@ public class TestAccessController extends SecureTestUtil { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCompact(ObserverContext.createAndPrepare(RCP_ENV, null), null, null, - ScanType.COMPACT_RETAIN_DELETES, null); + ScanType.COMPACT_RETAIN_DELETES, null, null); return null; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index 67ee8edb382..ab9bfc59766 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.wal.WALEdit; @@ -272,7 +273,8 @@ public class TestCoprocessorScanPolicy { public InternalScanner preCompactScannerOpen( final ObserverContext c, Store store, List scanners, ScanType scanType, long earliestPutTs, - InternalScanner s,CompactionLifeCycleTracker tracker, long readPoint) throws IOException { + InternalScanner s,CompactionLifeCycleTracker tracker, CompactionRequest request, + long readPoint) throws IOException { HStore hs = (HStore) store; Long newTtl = ttls.get(store.getTableName()); Integer newVersions = versions.get(store.getTableName());