HBASE-22871 Move the DirScanPool out and do not use static field (#504)
Signed-off-by: Zheng Hu <openinx@gmail.com> Signed-off-by: Reid Chan <reidchan@apache.org>
This commit is contained in:
parent
8cbe20c382
commit
c295fd7c6a
|
@ -109,7 +109,7 @@ import org.apache.hadoop.hbase.master.balancer.BalancerChore;
|
||||||
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
|
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
|
||||||
import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
|
import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
|
||||||
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.CleanerChore;
|
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
|
||||||
|
@ -380,6 +380,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
|
|
||||||
private HbckChore hbckChore;
|
private HbckChore hbckChore;
|
||||||
CatalogJanitor catalogJanitorChore;
|
CatalogJanitor catalogJanitorChore;
|
||||||
|
private DirScanPool cleanerPool;
|
||||||
private LogCleaner logCleaner;
|
private LogCleaner logCleaner;
|
||||||
private HFileCleaner hfileCleaner;
|
private HFileCleaner hfileCleaner;
|
||||||
private ReplicationBarrierCleaner replicationBarrierCleaner;
|
private ReplicationBarrierCleaner replicationBarrierCleaner;
|
||||||
|
@ -1118,6 +1119,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
(System.currentTimeMillis() - masterActiveTime) / 1000.0f));
|
(System.currentTimeMillis() - masterActiveTime) / 1000.0f));
|
||||||
this.masterFinishedInitializationTime = System.currentTimeMillis();
|
this.masterFinishedInitializationTime = System.currentTimeMillis();
|
||||||
configurationManager.registerObserver(this.balancer);
|
configurationManager.registerObserver(this.balancer);
|
||||||
|
configurationManager.registerObserver(this.cleanerPool);
|
||||||
configurationManager.registerObserver(this.hfileCleaner);
|
configurationManager.registerObserver(this.hfileCleaner);
|
||||||
configurationManager.registerObserver(this.logCleaner);
|
configurationManager.registerObserver(this.logCleaner);
|
||||||
// Set master as 'initialized'.
|
// Set master as 'initialized'.
|
||||||
|
@ -1422,22 +1424,20 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
|
this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
|
||||||
startProcedureExecutor();
|
startProcedureExecutor();
|
||||||
|
|
||||||
// Initial cleaner chore
|
// Create cleaner thread pool
|
||||||
CleanerChore.initChorePool(conf);
|
cleanerPool = new DirScanPool(conf);
|
||||||
// Start log cleaner thread
|
// Start log cleaner thread
|
||||||
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
|
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
|
||||||
this.logCleaner =
|
this.logCleaner = new LogCleaner(cleanerInterval, this, conf,
|
||||||
new LogCleaner(cleanerInterval,
|
getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool);
|
||||||
this, conf, getMasterWalManager().getFileSystem(),
|
|
||||||
getMasterWalManager().getOldLogDir());
|
|
||||||
getChoreService().scheduleChore(logCleaner);
|
getChoreService().scheduleChore(logCleaner);
|
||||||
|
|
||||||
// start the hfile archive cleaner thread
|
// start the hfile archive cleaner thread
|
||||||
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
||||||
Map<String, Object> params = new HashMap<>();
|
Map<String, Object> params = new HashMap<>();
|
||||||
params.put(MASTER, this);
|
params.put(MASTER, this);
|
||||||
this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
|
this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf,
|
||||||
.getFileSystem(), archiveDir, params);
|
getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params);
|
||||||
getChoreService().scheduleChore(hfileCleaner);
|
getChoreService().scheduleChore(hfileCleaner);
|
||||||
|
|
||||||
replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(),
|
replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(),
|
||||||
|
@ -1475,7 +1475,10 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
this.mobCompactThread.close();
|
this.mobCompactThread.close();
|
||||||
}
|
}
|
||||||
super.stopServiceThreads();
|
super.stopServiceThreads();
|
||||||
CleanerChore.shutDownChorePool();
|
if (cleanerPool != null) {
|
||||||
|
cleanerPool.shutdownNow();
|
||||||
|
cleanerPool = null;
|
||||||
|
}
|
||||||
|
|
||||||
LOG.debug("Stopping service threads");
|
LOG.debug("Stopping service threads");
|
||||||
|
|
||||||
|
|
|
@ -26,11 +26,8 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ForkJoinPool;
|
|
||||||
import java.util.concurrent.ForkJoinTask;
|
|
||||||
import java.util.concurrent.RecursiveTask;
|
import java.util.concurrent.RecursiveTask;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -38,7 +35,6 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||||
import org.apache.hadoop.hbase.ScheduledChore;
|
import org.apache.hadoop.hbase.ScheduledChore;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
@ -56,11 +52,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
* Abstract Cleaner that uses a chain of delegates to clean a directory of files
|
* Abstract Cleaner that uses a chain of delegates to clean a directory of files
|
||||||
* @param <T> Cleaner delegate class that is dynamically loaded from configuration
|
* @param <T> Cleaner delegate class that is dynamically loaded from configuration
|
||||||
*/
|
*/
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
|
|
||||||
justification="Static pool will be only updated once.")
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public abstract class CleanerChore<T extends FileCleanerDelegate> extends ScheduledChore
|
public abstract class CleanerChore<T extends FileCleanerDelegate> extends ScheduledChore {
|
||||||
implements ConfigurationObserver {
|
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(CleanerChore.class);
|
private static final Logger LOG = LoggerFactory.getLogger(CleanerChore.class);
|
||||||
private static final int AVAIL_PROCESSORS = Runtime.getRuntime().availableProcessors();
|
private static final int AVAIL_PROCESSORS = Runtime.getRuntime().availableProcessors();
|
||||||
|
@ -72,84 +65,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
* while latter will use only 1 thread for chore to scan dir.
|
* while latter will use only 1 thread for chore to scan dir.
|
||||||
*/
|
*/
|
||||||
public static final String CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size";
|
public static final String CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size";
|
||||||
private static final String DEFAULT_CHORE_POOL_SIZE = "0.25";
|
static final String DEFAULT_CHORE_POOL_SIZE = "0.25";
|
||||||
|
|
||||||
private static class DirScanPool {
|
private final DirScanPool pool;
|
||||||
int size;
|
|
||||||
ForkJoinPool pool;
|
|
||||||
int cleanerLatch;
|
|
||||||
AtomicBoolean reconfigNotification;
|
|
||||||
|
|
||||||
DirScanPool(Configuration conf) {
|
|
||||||
String poolSize = conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE);
|
|
||||||
size = calculatePoolSize(poolSize);
|
|
||||||
// poolSize may be 0 or 0.0 from a careless configuration,
|
|
||||||
// double check to make sure.
|
|
||||||
size = size == 0 ? calculatePoolSize(DEFAULT_CHORE_POOL_SIZE) : size;
|
|
||||||
pool = new ForkJoinPool(size);
|
|
||||||
LOG.info("Cleaner pool size is {}", size);
|
|
||||||
reconfigNotification = new AtomicBoolean(false);
|
|
||||||
cleanerLatch = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if pool can be updated. If so, mark for update later.
|
|
||||||
* @param conf configuration
|
|
||||||
*/
|
|
||||||
synchronized void markUpdate(Configuration conf) {
|
|
||||||
int newSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE));
|
|
||||||
if (newSize == size) {
|
|
||||||
LOG.trace("Size from configuration is same as previous={}, no need to update.", newSize);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
size = newSize;
|
|
||||||
// Chore is working, update it later.
|
|
||||||
reconfigNotification.set(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update pool with new size.
|
|
||||||
*/
|
|
||||||
synchronized void updatePool(long timeout) {
|
|
||||||
long stopTime = System.currentTimeMillis() + timeout;
|
|
||||||
while (cleanerLatch != 0 && timeout > 0) {
|
|
||||||
try {
|
|
||||||
wait(timeout);
|
|
||||||
timeout = stopTime - System.currentTimeMillis();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
shutDownNow();
|
|
||||||
LOG.info("Update chore's pool size from {} to {}", pool.getParallelism(), size);
|
|
||||||
pool = new ForkJoinPool(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized void latchCountUp() {
|
|
||||||
cleanerLatch++;
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized void latchCountDown() {
|
|
||||||
cleanerLatch--;
|
|
||||||
notifyAll();
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("FutureReturnValueIgnored")
|
|
||||||
synchronized void submit(ForkJoinTask task) {
|
|
||||||
pool.submit(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized void shutDownNow() {
|
|
||||||
if (pool == null || pool.isShutdown()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
pool.shutdownNow();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// It may be waste resources for each cleaner chore own its pool,
|
|
||||||
// so let's make pool for all cleaner chores.
|
|
||||||
private static volatile DirScanPool POOL;
|
|
||||||
|
|
||||||
protected final FileSystem fs;
|
protected final FileSystem fs;
|
||||||
private final Path oldFileDir;
|
private final Path oldFileDir;
|
||||||
|
@ -158,22 +76,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
private final AtomicBoolean enabled = new AtomicBoolean(true);
|
private final AtomicBoolean enabled = new AtomicBoolean(true);
|
||||||
protected List<T> cleanersChain;
|
protected List<T> cleanersChain;
|
||||||
|
|
||||||
public static void initChorePool(Configuration conf) {
|
|
||||||
if (POOL == null) {
|
|
||||||
POOL = new DirScanPool(conf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void shutDownChorePool() {
|
|
||||||
if (POOL != null) {
|
|
||||||
POOL.shutDownNow();
|
|
||||||
POOL = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
|
public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
|
||||||
FileSystem fs, Path oldFileDir, String confKey) {
|
FileSystem fs, Path oldFileDir, String confKey, DirScanPool pool) {
|
||||||
this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, null);
|
this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, pool, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -184,14 +89,15 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
* @param fs handle to the FS
|
* @param fs handle to the FS
|
||||||
* @param oldFileDir the path to the archived files
|
* @param oldFileDir the path to the archived files
|
||||||
* @param confKey configuration key for the classes to instantiate
|
* @param confKey configuration key for the classes to instantiate
|
||||||
|
* @param pool the thread pool used to scan directories
|
||||||
* @param params members could be used in cleaner
|
* @param params members could be used in cleaner
|
||||||
*/
|
*/
|
||||||
public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
|
public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
|
||||||
FileSystem fs, Path oldFileDir, String confKey, Map<String, Object> params) {
|
FileSystem fs, Path oldFileDir, String confKey, DirScanPool pool, Map<String, Object> params) {
|
||||||
super(name, s, sleepPeriod);
|
super(name, s, sleepPeriod);
|
||||||
|
|
||||||
Preconditions.checkNotNull(POOL, "Chore's pool isn't initialized, please call"
|
Preconditions.checkNotNull(pool, "Chore's pool can not be null");
|
||||||
+ "CleanerChore.initChorePool(Configuration) before new a cleaner chore.");
|
this.pool = pool;
|
||||||
this.fs = fs;
|
this.fs = fs;
|
||||||
this.oldFileDir = oldFileDir;
|
this.oldFileDir = oldFileDir;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
@ -255,11 +161,6 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onConfigurationChange(Configuration conf) {
|
|
||||||
POOL.markUpdate(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A utility method to create new instances of LogCleanerDelegate based on the class name of the
|
* A utility method to create new instances of LogCleanerDelegate based on the class name of the
|
||||||
* LogCleanerDelegate.
|
* LogCleanerDelegate.
|
||||||
|
@ -287,22 +188,20 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
if (getEnabled()) {
|
if (getEnabled()) {
|
||||||
try {
|
try {
|
||||||
POOL.latchCountUp();
|
pool.latchCountUp();
|
||||||
if (runCleaner()) {
|
if (runCleaner()) {
|
||||||
LOG.trace("Cleaned all WALs under {}", oldFileDir);
|
LOG.trace("Cleaned all WALs under {}", oldFileDir);
|
||||||
} else {
|
} else {
|
||||||
LOG.trace("WALs outstanding under {}", oldFileDir);
|
LOG.trace("WALs outstanding under {}", oldFileDir);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
POOL.latchCountDown();
|
pool.latchCountDown();
|
||||||
}
|
}
|
||||||
// After each cleaner chore, checks if received reconfigure notification while cleaning.
|
// After each cleaner chore, checks if received reconfigure notification while cleaning.
|
||||||
// First in cleaner turns off notification, to avoid another cleaner updating pool again.
|
// First in cleaner turns off notification, to avoid another cleaner updating pool again.
|
||||||
if (POOL.reconfigNotification.compareAndSet(true, false)) {
|
// This cleaner is waiting for other cleaners finishing their jobs.
|
||||||
// This cleaner is waiting for other cleaners finishing their jobs.
|
// To avoid missing next chore, only wait 0.8 * period, then shutdown.
|
||||||
// To avoid missing next chore, only wait 0.8 * period, then shutdown.
|
pool.tryUpdatePoolSize((long) (0.8 * getTimeUnit().toMillis(getPeriod())));
|
||||||
POOL.updatePool((long) (0.8 * getTimeUnit().toMillis(getPeriod())));
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
LOG.trace("Cleaner chore disabled! Not cleaning.");
|
LOG.trace("Cleaner chore disabled! Not cleaning.");
|
||||||
}
|
}
|
||||||
|
@ -315,7 +214,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
public Boolean runCleaner() {
|
public Boolean runCleaner() {
|
||||||
preRunCleaner();
|
preRunCleaner();
|
||||||
CleanerTask task = new CleanerTask(this.oldFileDir, true);
|
CleanerTask task = new CleanerTask(this.oldFileDir, true);
|
||||||
POOL.submit(task);
|
pool.execute(task);
|
||||||
return task.join();
|
return task.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -467,7 +366,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
int getChorePoolSize() {
|
int getChorePoolSize() {
|
||||||
return POOL.size;
|
return pool.getSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -485,10 +384,13 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attemps to clean up a directory, its subdirectories, and files.
|
* Attemps to clean up a directory, its subdirectories, and files. Return value is true if
|
||||||
* Return value is true if everything was deleted. false on partial / total failures.
|
* everything was deleted. false on partial / total failures.
|
||||||
*/
|
*/
|
||||||
private class CleanerTask extends RecursiveTask<Boolean> {
|
private final class CleanerTask extends RecursiveTask<Boolean> {
|
||||||
|
|
||||||
|
private static final long serialVersionUID = -5444212174088754172L;
|
||||||
|
|
||||||
private final Path dir;
|
private final Path dir;
|
||||||
private final boolean root;
|
private final boolean root;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.master.cleaner;
|
||||||
|
|
||||||
|
import java.util.concurrent.ForkJoinPool;
|
||||||
|
import java.util.concurrent.ForkJoinTask;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The thread pool used for scan directories
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class DirScanPool implements ConfigurationObserver {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(DirScanPool.class);
|
||||||
|
private volatile int size;
|
||||||
|
private ForkJoinPool pool;
|
||||||
|
private int cleanerLatch;
|
||||||
|
private boolean reconfigNotification;
|
||||||
|
|
||||||
|
public DirScanPool(Configuration conf) {
|
||||||
|
String poolSize = conf.get(CleanerChore.CHORE_POOL_SIZE, CleanerChore.DEFAULT_CHORE_POOL_SIZE);
|
||||||
|
size = CleanerChore.calculatePoolSize(poolSize);
|
||||||
|
// poolSize may be 0 or 0.0 from a careless configuration,
|
||||||
|
// double check to make sure.
|
||||||
|
size = size == 0 ? CleanerChore.calculatePoolSize(CleanerChore.DEFAULT_CHORE_POOL_SIZE) : size;
|
||||||
|
pool = new ForkJoinPool(size);
|
||||||
|
LOG.info("Cleaner pool size is {}", size);
|
||||||
|
cleanerLatch = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if pool can be updated. If so, mark for update later.
|
||||||
|
* @param conf configuration
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public synchronized void onConfigurationChange(Configuration conf) {
|
||||||
|
int newSize = CleanerChore.calculatePoolSize(
|
||||||
|
conf.get(CleanerChore.CHORE_POOL_SIZE, CleanerChore.DEFAULT_CHORE_POOL_SIZE));
|
||||||
|
if (newSize == size) {
|
||||||
|
LOG.trace("Size from configuration is same as previous={}, no need to update.", newSize);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
size = newSize;
|
||||||
|
// Chore is working, update it later.
|
||||||
|
reconfigNotification = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized void latchCountUp() {
|
||||||
|
cleanerLatch++;
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized void latchCountDown() {
|
||||||
|
cleanerLatch--;
|
||||||
|
notifyAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized void execute(ForkJoinTask<?> task) {
|
||||||
|
pool.execute(task);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void shutdownNow() {
|
||||||
|
if (pool == null || pool.isShutdown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
pool.shutdownNow();
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized void tryUpdatePoolSize(long timeout) {
|
||||||
|
if (!reconfigNotification) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
reconfigNotification = false;
|
||||||
|
long stopTime = System.currentTimeMillis() + timeout;
|
||||||
|
while (cleanerLatch != 0 && timeout > 0) {
|
||||||
|
try {
|
||||||
|
wait(timeout);
|
||||||
|
timeout = stopTime - System.currentTimeMillis();
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
shutdownNow();
|
||||||
|
LOG.info("Update chore's pool size from {} to {}", pool.getParallelism(), size);
|
||||||
|
pool = new ForkJoinPool(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getSize() {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,31 +25,33 @@ import java.util.Map;
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
|
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||||
import org.apache.hadoop.hbase.io.HFileLink;
|
import org.apache.hadoop.hbase.io.HFileLink;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||||
import org.apache.hadoop.hbase.util.StealJobQueue;
|
import org.apache.hadoop.hbase.util.StealJobQueue;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
/**
|
/**
|
||||||
* This Chore, every time it runs, will clear the HFiles in the hfile archive
|
* This Chore, every time it runs, will clear the HFiles in the hfile archive
|
||||||
* folder that are deletable for each HFile cleaner in the chain.
|
* folder that are deletable for each HFile cleaner in the chain.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate>
|
||||||
|
implements ConfigurationObserver {
|
||||||
|
|
||||||
public static final String MASTER_HFILE_CLEANER_PLUGINS = "hbase.master.hfilecleaner.plugins";
|
public static final String MASTER_HFILE_CLEANER_PLUGINS = "hbase.master.hfilecleaner.plugins";
|
||||||
|
|
||||||
public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
||||||
Path directory) {
|
Path directory, DirScanPool pool) {
|
||||||
this(period, stopper, conf, fs, directory, null);
|
this(period, stopper, conf, fs, directory, pool, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration key for large/small throttle point
|
// Configuration key for large/small throttle point
|
||||||
|
@ -110,12 +112,13 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
||||||
* @param conf configuration to use
|
* @param conf configuration to use
|
||||||
* @param fs handle to the FS
|
* @param fs handle to the FS
|
||||||
* @param directory directory to be cleaned
|
* @param directory directory to be cleaned
|
||||||
|
* @param pool the thread pool used to scan directories
|
||||||
* @param params params could be used in subclass of BaseHFileCleanerDelegate
|
* @param params params could be used in subclass of BaseHFileCleanerDelegate
|
||||||
*/
|
*/
|
||||||
public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
||||||
Path directory, Map<String, Object> params) {
|
Path directory, DirScanPool pool, Map<String, Object> params) {
|
||||||
super("HFileCleaner", period, stopper, conf, fs,
|
super("HFileCleaner", period, stopper, conf, fs, directory, MASTER_HFILE_CLEANER_PLUGINS, pool,
|
||||||
directory, MASTER_HFILE_CLEANER_PLUGINS, params);
|
params);
|
||||||
throttlePoint =
|
throttlePoint =
|
||||||
conf.getInt(HFILE_DELETE_THROTTLE_THRESHOLD, DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD);
|
conf.getInt(HFILE_DELETE_THROTTLE_THRESHOLD, DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD);
|
||||||
largeQueueInitSize =
|
largeQueueInitSize =
|
||||||
|
@ -405,8 +408,6 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onConfigurationChange(Configuration conf) {
|
public void onConfigurationChange(Configuration conf) {
|
||||||
super.onConfigurationChange(conf);
|
|
||||||
|
|
||||||
if (!checkAndUpdateConfigurations(conf)) {
|
if (!checkAndUpdateConfigurations(conf)) {
|
||||||
LOG.debug("Update configuration triggered but nothing changed for this cleaner");
|
LOG.debug("Update configuration triggered but nothing changed for this cleaner");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -26,12 +26,12 @@ import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
|
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
@ -47,7 +47,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||||
* @see BaseLogCleanerDelegate
|
* @see BaseLogCleanerDelegate
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
|
public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate>
|
||||||
|
implements ConfigurationObserver {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(LogCleaner.class);
|
private static final Logger LOG = LoggerFactory.getLogger(LogCleaner.class);
|
||||||
|
|
||||||
public static final String OLD_WALS_CLEANER_THREAD_SIZE = "hbase.oldwals.cleaner.thread.size";
|
public static final String OLD_WALS_CLEANER_THREAD_SIZE = "hbase.oldwals.cleaner.thread.size";
|
||||||
|
@ -68,15 +69,17 @@ public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
|
||||||
* @param conf configuration to use
|
* @param conf configuration to use
|
||||||
* @param fs handle to the FS
|
* @param fs handle to the FS
|
||||||
* @param oldLogDir the path to the archived logs
|
* @param oldLogDir the path to the archived logs
|
||||||
|
* @param pool the thread pool used to scan directories
|
||||||
*/
|
*/
|
||||||
public LogCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
public LogCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
|
||||||
Path oldLogDir) {
|
Path oldLogDir, DirScanPool pool) {
|
||||||
super("LogsCleaner", period, stopper, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS);
|
super("LogsCleaner", period, stopper, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS,
|
||||||
|
pool);
|
||||||
this.pendingDelete = new LinkedBlockingQueue<>();
|
this.pendingDelete = new LinkedBlockingQueue<>();
|
||||||
int size = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE);
|
int size = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE);
|
||||||
this.oldWALsCleaner = createOldWalsCleaner(size);
|
this.oldWALsCleaner = createOldWalsCleaner(size);
|
||||||
this.cleanerThreadTimeoutMsec = conf.getLong(OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC,
|
this.cleanerThreadTimeoutMsec = conf.getLong(OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC,
|
||||||
DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC);
|
DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -87,8 +90,6 @@ public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onConfigurationChange(Configuration conf) {
|
public void onConfigurationChange(Configuration conf) {
|
||||||
super.onConfigurationChange(conf);
|
|
||||||
|
|
||||||
int newSize = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE);
|
int newSize = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE);
|
||||||
if (newSize == oldWALsCleaner.size()) {
|
if (newSize == oldWALsCleaner.size()) {
|
||||||
LOG.debug("Size from configuration is the same as previous which "
|
LOG.debug("Size from configuration is the same as previous which "
|
||||||
|
|
|
@ -27,7 +27,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -41,6 +40,7 @@ import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -80,6 +80,7 @@ public class TestHFileArchiving {
|
||||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
private static final byte[] TEST_FAM = Bytes.toBytes("fam");
|
private static final byte[] TEST_FAM = Bytes.toBytes("fam");
|
||||||
|
|
||||||
|
private static DirScanPool POOL;
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
|
@ -93,6 +94,8 @@ public class TestHFileArchiving {
|
||||||
|
|
||||||
// We don't want the cleaner to remove files. The tests do that.
|
// We don't want the cleaner to remove files. The tests do that.
|
||||||
UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cancel(true);
|
UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cancel(true);
|
||||||
|
|
||||||
|
POOL = new DirScanPool(UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setupConf(Configuration conf) {
|
private static void setupConf(Configuration conf) {
|
||||||
|
@ -111,20 +114,13 @@ public class TestHFileArchiving {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
// cleanup the archive directory
|
// cleanup the archive directory
|
||||||
try {
|
clearArchiveDirectory();
|
||||||
clearArchiveDirectory();
|
|
||||||
} catch (IOException e) {
|
|
||||||
Assert.fail("Failure to delete archive directory:" + e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void cleanupTest() throws Exception {
|
public static void cleanupTest() throws Exception {
|
||||||
try {
|
UTIL.shutdownMiniCluster();
|
||||||
UTIL.shutdownMiniCluster();
|
POOL.shutdownNow();
|
||||||
} catch (Exception e) {
|
|
||||||
// NOOP;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -474,7 +470,7 @@ public class TestHFileArchiving {
|
||||||
Stoppable stoppable = new StoppableImplementation();
|
Stoppable stoppable = new StoppableImplementation();
|
||||||
|
|
||||||
// The cleaner should be looping without long pauses to reproduce the race condition.
|
// The cleaner should be looping without long pauses to reproduce the race condition.
|
||||||
HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
|
HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir, POOL);
|
||||||
try {
|
try {
|
||||||
choreService.scheduleChore(cleaner);
|
choreService.scheduleChore(cleaner);
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.CleanerChore;
|
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||||
import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
|
import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -88,6 +88,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
private final List<Path> toCleanup = new ArrayList<>();
|
private final List<Path> toCleanup = new ArrayList<>();
|
||||||
private static ClusterConnection CONNECTION;
|
private static ClusterConnection CONNECTION;
|
||||||
private static RegionServerServices rss;
|
private static RegionServerServices rss;
|
||||||
|
private static DirScanPool POOL;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Setup the config for the cluster
|
* Setup the config for the cluster
|
||||||
|
@ -103,6 +104,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
|
String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
|
||||||
ZKUtil.createWithParents(watcher, archivingZNode);
|
ZKUtil.createWithParents(watcher, archivingZNode);
|
||||||
rss = mock(RegionServerServices.class);
|
rss = mock(RegionServerServices.class);
|
||||||
|
POOL = new DirScanPool(UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setupConf(Configuration conf) {
|
private static void setupConf(Configuration conf) {
|
||||||
|
@ -130,12 +132,9 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void cleanupTest() throws Exception {
|
public static void cleanupTest() throws Exception {
|
||||||
try {
|
CONNECTION.close();
|
||||||
CONNECTION.close();
|
UTIL.shutdownMiniZKCluster();
|
||||||
UTIL.shutdownMiniZKCluster();
|
POOL.shutdownNow();
|
||||||
} catch (Exception e) {
|
|
||||||
LOG.warn("problem shutting down cluster", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -176,7 +175,6 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
Configuration conf = UTIL.getConfiguration();
|
Configuration conf = UTIL.getConfiguration();
|
||||||
// setup the delegate
|
// setup the delegate
|
||||||
Stoppable stop = new StoppableImplementation();
|
Stoppable stop = new StoppableImplementation();
|
||||||
CleanerChore.initChorePool(conf);
|
|
||||||
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
|
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
|
||||||
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
|
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
|
||||||
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
|
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
|
||||||
|
@ -231,7 +229,6 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
// setup the delegate
|
// setup the delegate
|
||||||
Stoppable stop = new StoppableImplementation();
|
Stoppable stop = new StoppableImplementation();
|
||||||
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
|
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
|
||||||
CleanerChore.initChorePool(conf);
|
|
||||||
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
|
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
|
||||||
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
|
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
|
||||||
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
|
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
|
||||||
|
@ -330,7 +327,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
Stoppable stop) {
|
Stoppable stop) {
|
||||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||||
LongTermArchivingHFileCleaner.class.getCanonicalName());
|
LongTermArchivingHFileCleaner.class.getCanonicalName());
|
||||||
return new HFileCleaner(1000, stop, conf, fs, archiveDir);
|
return new HFileCleaner(1000, stop, conf, fs, archiveDir, POOL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -53,21 +53,22 @@ public class TestCleanerChore {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestCleanerChore.class);
|
HBaseClassTestRule.forClass(TestCleanerChore.class);
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestCleanerChore.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestCleanerChore.class);
|
||||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
private static DirScanPool POOL;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setup() {
|
public static void setup() {
|
||||||
CleanerChore.initChorePool(UTIL.getConfiguration());
|
POOL = new DirScanPool(UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void cleanup() throws Exception {
|
public static void cleanup() throws Exception {
|
||||||
// delete and recreate the test directory, ensuring a clean test dir between tests
|
// delete and recreate the test directory, ensuring a clean test dir between tests
|
||||||
UTIL.cleanupTestDir();
|
UTIL.cleanupTestDir();
|
||||||
CleanerChore.shutDownChorePool();
|
POOL.shutdownNow();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -79,7 +80,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, NeverDelete.class.getName());
|
conf.set(confKey, NeverDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
|
|
||||||
// create the directory layout in the directory to clean
|
// create the directory layout in the directory to clean
|
||||||
Path parent = new Path(testDir, "parent");
|
Path parent = new Path(testDir, "parent");
|
||||||
|
@ -121,7 +123,8 @@ public class TestCleanerChore {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-retry-ioe", stop, conf, filtered, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-retry-ioe", stop, conf, filtered, testDir, confKey, POOL);
|
||||||
|
|
||||||
// trouble talking to the filesystem
|
// trouble talking to the filesystem
|
||||||
Boolean result = chore.runCleaner();
|
Boolean result = chore.runCleaner();
|
||||||
|
@ -152,7 +155,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
|
|
||||||
// create the directory layout in the directory to clean
|
// create the directory layout in the directory to clean
|
||||||
Path parent = new Path(testDir, "parent");
|
Path parent = new Path(testDir, "parent");
|
||||||
|
@ -193,7 +197,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
// spy on the delegate to ensure that we don't check for directories
|
// spy on the delegate to ensure that we don't check for directories
|
||||||
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
||||||
AlwaysDelete spy = Mockito.spy(delegate);
|
AlwaysDelete spy = Mockito.spy(delegate);
|
||||||
|
@ -224,7 +229,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
|
|
||||||
// also create a file in the top level directory
|
// also create a file in the top level directory
|
||||||
Path topFile = new Path(testDir, "topFile");
|
Path topFile = new Path(testDir, "topFile");
|
||||||
|
@ -255,7 +261,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
// spy on the delegate to ensure that we don't check for directories
|
// spy on the delegate to ensure that we don't check for directories
|
||||||
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
||||||
AlwaysDelete spy = Mockito.spy(delegate);
|
AlwaysDelete spy = Mockito.spy(delegate);
|
||||||
|
@ -314,7 +321,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
// spy on the delegate to ensure that we don't check for directories
|
// spy on the delegate to ensure that we don't check for directories
|
||||||
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
|
||||||
AlwaysDelete spy = Mockito.spy(delegate);
|
AlwaysDelete spy = Mockito.spy(delegate);
|
||||||
|
@ -358,7 +366,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
|
|
||||||
// Enable cleaner
|
// Enable cleaner
|
||||||
chore.setEnabled(true);
|
chore.setEnabled(true);
|
||||||
|
@ -391,7 +400,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
|
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
|
|
||||||
// Disable cleaner
|
// Disable cleaner
|
||||||
chore.setEnabled(false);
|
chore.setEnabled(false);
|
||||||
|
@ -423,7 +433,7 @@ public class TestCleanerChore {
|
||||||
}
|
}
|
||||||
|
|
||||||
// have at least 2 available processors/cores
|
// have at least 2 available processors/cores
|
||||||
int initPoolSize = availableProcessorNum / 2;
|
int initPoolSize = availableProcessorNum / 2;
|
||||||
int changedPoolSize = availableProcessorNum;
|
int changedPoolSize = availableProcessorNum;
|
||||||
|
|
||||||
Stoppable stop = new StoppableImplementation();
|
Stoppable stop = new StoppableImplementation();
|
||||||
|
@ -433,7 +443,8 @@ public class TestCleanerChore {
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
conf.set(CleanerChore.CHORE_POOL_SIZE, String.valueOf(initPoolSize));
|
conf.set(CleanerChore.CHORE_POOL_SIZE, String.valueOf(initPoolSize));
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
AllValidPaths chore =
|
||||||
|
new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
|
||||||
chore.setEnabled(true);
|
chore.setEnabled(true);
|
||||||
// Create subdirs under testDir
|
// Create subdirs under testDir
|
||||||
int dirNums = 6;
|
int dirNums = 6;
|
||||||
|
@ -452,7 +463,7 @@ public class TestCleanerChore {
|
||||||
t.start();
|
t.start();
|
||||||
// Change size of chore's pool
|
// Change size of chore's pool
|
||||||
conf.set(CleanerChore.CHORE_POOL_SIZE, String.valueOf(changedPoolSize));
|
conf.set(CleanerChore.CHORE_POOL_SIZE, String.valueOf(changedPoolSize));
|
||||||
chore.onConfigurationChange(conf);
|
POOL.onConfigurationChange(conf);
|
||||||
assertEquals(changedPoolSize, chore.getChorePoolSize());
|
assertEquals(changedPoolSize, chore.getChorePoolSize());
|
||||||
// Stop chore
|
// Stop chore
|
||||||
t.join();
|
t.join();
|
||||||
|
@ -460,21 +471,17 @@ public class TestCleanerChore {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMinimumNumberOfThreads() throws Exception {
|
public void testMinimumNumberOfThreads() throws Exception {
|
||||||
Stoppable stop = new StoppableImplementation();
|
|
||||||
Configuration conf = UTIL.getConfiguration();
|
Configuration conf = UTIL.getConfiguration();
|
||||||
Path testDir = UTIL.getDataTestDir();
|
|
||||||
FileSystem fs = UTIL.getTestFileSystem();
|
|
||||||
String confKey = "hbase.test.cleaner.delegates";
|
String confKey = "hbase.test.cleaner.delegates";
|
||||||
conf.set(confKey, AlwaysDelete.class.getName());
|
conf.set(confKey, AlwaysDelete.class.getName());
|
||||||
conf.set(CleanerChore.CHORE_POOL_SIZE, "2");
|
conf.set(CleanerChore.CHORE_POOL_SIZE, "2");
|
||||||
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
|
|
||||||
int numProcs = Runtime.getRuntime().availableProcessors();
|
int numProcs = Runtime.getRuntime().availableProcessors();
|
||||||
// Sanity
|
// Sanity
|
||||||
assertEquals(numProcs, chore.calculatePoolSize(Integer.toString(numProcs)));
|
assertEquals(numProcs, CleanerChore.calculatePoolSize(Integer.toString(numProcs)));
|
||||||
// The implementation does not allow us to set more threads than we have processors
|
// The implementation does not allow us to set more threads than we have processors
|
||||||
assertEquals(numProcs, chore.calculatePoolSize(Integer.toString(numProcs + 2)));
|
assertEquals(numProcs, CleanerChore.calculatePoolSize(Integer.toString(numProcs + 2)));
|
||||||
// Force us into the branch that is multiplying 0.0 against the number of processors
|
// Force us into the branch that is multiplying 0.0 against the number of processors
|
||||||
assertEquals(1, chore.calculatePoolSize("0.0"));
|
assertEquals(1, CleanerChore.calculatePoolSize("0.0"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException {
|
private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException {
|
||||||
|
@ -494,8 +501,8 @@ public class TestCleanerChore {
|
||||||
private static class AllValidPaths extends CleanerChore<BaseHFileCleanerDelegate> {
|
private static class AllValidPaths extends CleanerChore<BaseHFileCleanerDelegate> {
|
||||||
|
|
||||||
public AllValidPaths(String name, Stoppable s, Configuration conf, FileSystem fs,
|
public AllValidPaths(String name, Stoppable s, Configuration conf, FileSystem fs,
|
||||||
Path oldFileDir, String confkey) {
|
Path oldFileDir, String confkey, DirScanPool pool) {
|
||||||
super(name, Integer.MAX_VALUE, s, conf, fs, oldFileDir, confkey);
|
super(name, Integer.MAX_VALUE, s, conf, fs, oldFileDir, confkey, pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
// all paths are valid
|
// all paths are valid
|
||||||
|
|
|
@ -63,16 +63,19 @@ public class TestHFileCleaner {
|
||||||
|
|
||||||
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
|
private static DirScanPool POOL;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupCluster() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
// have to use a minidfs cluster because the localfs doesn't modify file times correctly
|
// have to use a minidfs cluster because the localfs doesn't modify file times correctly
|
||||||
UTIL.startMiniDFSCluster(1);
|
UTIL.startMiniDFSCluster(1);
|
||||||
CleanerChore.initChorePool(UTIL.getConfiguration());
|
POOL = new DirScanPool(UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void shutdownCluster() throws IOException {
|
public static void shutdownCluster() throws IOException {
|
||||||
UTIL.shutdownMiniDFSCluster();
|
UTIL.shutdownMiniDFSCluster();
|
||||||
|
POOL.shutdownNow();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -114,9 +117,10 @@ public class TestHFileCleaner {
|
||||||
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
|
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
|
||||||
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
|
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
|
||||||
Server server = new DummyServer();
|
Server server = new DummyServer();
|
||||||
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
|
Path archivedHfileDir =
|
||||||
|
new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
|
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
|
||||||
|
|
||||||
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
|
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
|
||||||
final long createTime = System.currentTimeMillis();
|
final long createTime = System.currentTimeMillis();
|
||||||
|
@ -179,11 +183,12 @@ public class TestHFileCleaner {
|
||||||
// no cleaner policies = delete all files
|
// no cleaner policies = delete all files
|
||||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
|
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
|
||||||
Server server = new DummyServer();
|
Server server = new DummyServer();
|
||||||
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
|
Path archivedHfileDir =
|
||||||
|
new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||||
|
|
||||||
// setup the cleaner
|
// setup the cleaner
|
||||||
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
||||||
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
|
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
|
||||||
|
|
||||||
// make all the directories for archiving files
|
// make all the directories for archiving files
|
||||||
Path table = new Path(archivedHfileDir, "table");
|
Path table = new Path(archivedHfileDir, "table");
|
||||||
|
@ -291,7 +296,7 @@ public class TestHFileCleaner {
|
||||||
|
|
||||||
// setup the cleaner
|
// setup the cleaner
|
||||||
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
||||||
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
|
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
|
||||||
// clean up archive directory
|
// clean up archive directory
|
||||||
fs.delete(archivedHfileDir, true);
|
fs.delete(archivedHfileDir, true);
|
||||||
fs.mkdirs(archivedHfileDir);
|
fs.mkdirs(archivedHfileDir);
|
||||||
|
@ -320,7 +325,7 @@ public class TestHFileCleaner {
|
||||||
|
|
||||||
// setup the cleaner
|
// setup the cleaner
|
||||||
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
||||||
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
|
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
|
||||||
// clean up archive directory
|
// clean up archive directory
|
||||||
fs.delete(archivedHfileDir, true);
|
fs.delete(archivedHfileDir, true);
|
||||||
fs.mkdirs(archivedHfileDir);
|
fs.mkdirs(archivedHfileDir);
|
||||||
|
@ -361,7 +366,7 @@ public class TestHFileCleaner {
|
||||||
|
|
||||||
// setup the cleaner
|
// setup the cleaner
|
||||||
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
|
||||||
final HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
|
final HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
|
||||||
Assert.assertEquals(ORIGINAL_THROTTLE_POINT, cleaner.getThrottlePoint());
|
Assert.assertEquals(ORIGINAL_THROTTLE_POINT, cleaner.getThrottlePoint());
|
||||||
Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getLargeQueueInitSize());
|
Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getLargeQueueInitSize());
|
||||||
Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getSmallQueueInitSize());
|
Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getSmallQueueInitSize());
|
||||||
|
|
|
@ -30,18 +30,21 @@ import org.apache.hadoop.hbase.ChoreService;
|
||||||
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
import org.apache.hadoop.hbase.io.HFileLink;
|
import org.apache.hadoop.hbase.io.HFileLink;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -49,21 +52,32 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the HFileLink Cleaner.
|
* Test the HFileLink Cleaner. HFiles with links cannot be deleted until a link is present.
|
||||||
* HFiles with links cannot be deleted until a link is present.
|
|
||||||
*/
|
*/
|
||||||
@Category({MasterTests.class, MediumTests.class})
|
@Category({ MasterTests.class, MediumTests.class })
|
||||||
public class TestHFileLinkCleaner {
|
public class TestHFileLinkCleaner {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestHFileLinkCleaner.class);
|
HBaseClassTestRule.forClass(TestHFileLinkCleaner.class);
|
||||||
|
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
|
private static DirScanPool POOL;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() {
|
||||||
|
POOL = new DirScanPool(TEST_UTIL.getConfiguration());
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDown() {
|
||||||
|
POOL.shutdownNow();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHFileLinkCleaning() throws Exception {
|
public void testHFileLinkCleaning() throws Exception {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
|
@ -77,14 +91,12 @@ public class TestHFileLinkCleaner {
|
||||||
final String hfileName = "1234567890";
|
final String hfileName = "1234567890";
|
||||||
final String familyName = "cf";
|
final String familyName = "cf";
|
||||||
|
|
||||||
HRegionInfo hri = new HRegionInfo(tableName);
|
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
|
||||||
HRegionInfo hriLink = new HRegionInfo(tableLinkName);
|
RegionInfo hriLink = RegionInfoBuilder.newBuilder(tableLinkName).build();
|
||||||
|
|
||||||
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
||||||
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
||||||
tableName, hri.getEncodedName(), familyName);
|
tableName, hri.getEncodedName(), familyName);
|
||||||
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
|
||||||
tableLinkName, hriLink.getEncodedName(), familyName);
|
|
||||||
|
|
||||||
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
|
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
|
||||||
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
|
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
|
||||||
|
@ -93,8 +105,8 @@ public class TestHFileLinkCleaner {
|
||||||
fs.createNewFile(hfilePath);
|
fs.createNewFile(hfilePath);
|
||||||
|
|
||||||
// Create link to hfile
|
// Create link to hfile
|
||||||
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
|
Path familyLinkPath =
|
||||||
hriLink.getEncodedName(), familyName);
|
getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName);
|
||||||
fs.mkdirs(familyLinkPath);
|
fs.mkdirs(familyLinkPath);
|
||||||
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
|
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
|
||||||
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
|
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
|
||||||
|
@ -107,8 +119,7 @@ public class TestHFileLinkCleaner {
|
||||||
final long ttl = 1000;
|
final long ttl = 1000;
|
||||||
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
|
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
|
||||||
Server server = new DummyServer();
|
Server server = new DummyServer();
|
||||||
CleanerChore.initChorePool(conf);
|
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir, POOL);
|
||||||
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
|
|
||||||
|
|
||||||
// Link backref cannot be removed
|
// Link backref cannot be removed
|
||||||
cleaner.chore();
|
cleaner.chore();
|
||||||
|
|
|
@ -30,7 +30,6 @@ import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.lang3.RandomUtils;
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -86,17 +85,20 @@ public class TestLogsCleaner {
|
||||||
|
|
||||||
private static Configuration conf;
|
private static Configuration conf;
|
||||||
|
|
||||||
|
private static DirScanPool POOL;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
TEST_UTIL.startMiniZKCluster();
|
TEST_UTIL.startMiniZKCluster();
|
||||||
TEST_UTIL.startMiniDFSCluster(1);
|
TEST_UTIL.startMiniDFSCluster(1);
|
||||||
CleanerChore.initChorePool(TEST_UTIL.getConfiguration());
|
POOL = new DirScanPool(TEST_UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownAfterClass() throws Exception {
|
public static void tearDownAfterClass() throws Exception {
|
||||||
TEST_UTIL.shutdownMiniZKCluster();
|
TEST_UTIL.shutdownMiniZKCluster();
|
||||||
TEST_UTIL.shutdownMiniDFSCluster();
|
TEST_UTIL.shutdownMiniDFSCluster();
|
||||||
|
POOL.shutdownNow();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -198,7 +200,7 @@ public class TestLogsCleaner {
|
||||||
// 10 procedure WALs
|
// 10 procedure WALs
|
||||||
assertEquals(10, fs.listStatus(OLD_PROCEDURE_WALS_DIR).length);
|
assertEquals(10, fs.listStatus(OLD_PROCEDURE_WALS_DIR).length);
|
||||||
|
|
||||||
LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR);
|
LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR, POOL);
|
||||||
cleaner.chore();
|
cleaner.chore();
|
||||||
|
|
||||||
// In oldWALs we end up with the current WAL, a newer WAL, the 3 old WALs which
|
// In oldWALs we end up with the current WAL, a newer WAL, the 3 old WALs which
|
||||||
|
@ -292,7 +294,7 @@ public class TestLogsCleaner {
|
||||||
Server server = new DummyServer();
|
Server server = new DummyServer();
|
||||||
|
|
||||||
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
|
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
|
||||||
LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR);
|
LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL);
|
||||||
assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE, cleaner.getSizeOfCleaners());
|
assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE, cleaner.getSizeOfCleaners());
|
||||||
assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC,
|
assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC,
|
||||||
cleaner.getCleanerThreadTimeoutMsec());
|
cleaner.getCleanerThreadTimeoutMsec());
|
||||||
|
|
Loading…
Reference in New Issue