SOLR-8995: Use Lamdas for Thread/Runnable

This commit is contained in:
Noble Paul 2016-07-11 19:29:15 +05:30
parent 42e1caf2bf
commit 74de196565
14 changed files with 119 additions and 174 deletions

View File

@ -455,12 +455,7 @@ public class DataImporter {
}
public void runAsync(final RequestInfo reqParams, final DIHWriter sw) {
new Thread() {
@Override
public void run() {
runCmd(reqParams, sw);
}
}.start();
new Thread(() -> runCmd(reqParams, sw)).start();
}
void runCmd(RequestInfo reqParams, DIHWriter sw) {

View File

@ -261,13 +261,8 @@ public class Overseer implements Closeable {
}
} finally {
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
new Thread("OverseerExitThread"){
//do this in a separate thread because any wait is interrupted in this main thread
@Override
public void run() {
checkIfIamStillLeader();
}
}.start();
new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
}
}

View File

@ -578,17 +578,15 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
}
//if there are too many nodes this command may time out. And most likely dedicated
// overseers are created when there are too many nodes . So , do this operation in a separate thread
new Thread(){
@Override
public void run() {
new Thread(() -> {
try {
overseerPrioritizer.prioritizeOverseerNodes(myId);
} catch (Exception e) {
log.error("Error in prioritizing Overseer", e);
}
}
}.start();
}).start();
}
@SuppressWarnings("unchecked")

View File

@ -2435,10 +2435,8 @@ public final class ZkController {
final Set<Runnable> listeners = confDirectoryListeners.get(zkDir);
if (listeners != null && !listeners.isEmpty()) {
final Set<Runnable> listenersCopy = new HashSet<>(listeners);
new Thread() {
// run these in a separate thread because this can be long running
@Override
public void run() {
new Thread(() -> {
log.info("Running listeners for {}", zkDir);
for (final Runnable listener : listenersCopy) {
try {
@ -2447,8 +2445,8 @@ public final class ZkController {
log.warn("listener throws error", e);
}
}
}
}.start();
}).start();
}
}
return true;

View File

@ -533,8 +533,8 @@ public class CoreContainer {
} finally {
if (asyncSolrCoreLoad && futures != null) {
Thread shutdownThread = new Thread() {
public void run() {
coreContainerWorkExecutor.submit((Runnable) () -> {
try {
for (Future<SolrCore> future : futures) {
try {
@ -548,9 +548,7 @@ public class CoreContainer {
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
};
coreContainerWorkExecutor.submit(shutdownThread);
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}

View File

@ -2595,18 +2595,14 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
final String myIndexDir = getIndexDir();
final String coreName = getName();
if (myDirFactory != null && myDataDir != null && myIndexDir != null) {
Thread cleanupThread = new Thread() {
@Override
public void run() {
Thread cleanupThread = new Thread(() -> {
log.info("Looking for old index directories to cleanup for core {} in {}", coreName, myDataDir);
try {
myDirFactory.cleanupOldIndexDirectories(myDataDir, myIndexDir);
} catch (Exception exc) {
log.error("Failed to cleanup old index directories for core "+coreName, exc);
}
}
};
cleanupThread.setName("OldIndexDirectoryCleanupThreadForCore-"+coreName);
}, "OldIndexDirectoryCleanupThreadForCore-"+coreName);
cleanupThread.setDaemon(true);
cleanupThread.start();
}

View File

@ -174,9 +174,7 @@ public class ZkContainer {
}
public void registerInZk(final SolrCore core, boolean background) {
Thread thread = new Thread() {
@Override
public void run() {
Runnable r = () -> {
MDCLoggingContext.setCore(core);
try {
try {
@ -199,17 +197,15 @@ public class ZkContainer {
} finally {
MDCLoggingContext.clear();
}
}
};
if (zkController != null) {
if (background) {
coreZkRegister.execute(thread);
coreZkRegister.execute(r);
} else {
MDCLoggingContext.setCore(core);
try {
thread.run();
r.run();
} finally {
MDCLoggingContext.clear();
}

View File

@ -768,9 +768,7 @@ public class IndexFetcher {
private void reloadCore() {
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
@Override
public void run() {
new Thread(() -> {
try {
solrCore.getCoreDescriptor().getCoreContainer().reload(solrCore.getName());
} catch (Exception e) {
@ -778,8 +776,7 @@ public class IndexFetcher {
} finally {
latch.countDown();
}
}
}.start();
}).start();
try {
latch.await();
} catch (InterruptedException e) {

View File

@ -275,12 +275,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
return;
}
final SolrParams paramsCopy = new ModifiableSolrParams(solrParams);
Thread fetchThread = new Thread("explicit-fetchindex-cmd") {
@Override
public void run() {
doFetch(paramsCopy, false);
}
};
Thread fetchThread = new Thread(() -> doFetch(paramsCopy, false), "explicit-fetchindex-cmd") ;
fetchThread.setDaemon(false);
fetchThread.start();
if (solrParams.getBool(WAIT, false)) {

View File

@ -128,12 +128,7 @@ public class SnapShooter {
}
protected void deleteSnapAsync(final ReplicationHandler replicationHandler) {
new Thread() {
@Override
public void run() {
deleteNamedSnapshot(replicationHandler);
}
}.start();
new Thread(() -> deleteNamedSnapshot(replicationHandler)).start();
}
public void validateCreateSnapshot() throws IOException {
@ -170,9 +165,8 @@ public class SnapShooter {
public void createSnapAsync(final IndexCommit indexCommit, final int numberToKeep, Consumer<NamedList> result) {
solrCore.getDeletionPolicy().saveCommitPoint(indexCommit.getGeneration());
new Thread() { //TODO should use Solr's ExecutorUtil
@Override
public void run() {
//TODO should use Solr's ExecutorUtil
new Thread(() -> {
try {
result.accept(createSnapshot(indexCommit));
} catch (Exception e) {
@ -190,8 +184,8 @@ public class SnapShooter {
LOG.warn("Unable to delete old snapshots ", e);
}
}
}
}.start();
}).start();
}
// note: remember to reserve the indexCommit first so it won't get deleted concurrently

View File

@ -206,9 +206,7 @@ public class SolrConfigHandler extends RequestHandlerBase implements SolrCoreAwa
log.info("I already have the expected version {} of params", expectedVersion);
}
if (checkStale && req.getCore().getResourceLoader() instanceof ZkSolrResourceLoader) {
new Thread(SolrConfigHandler.class.getSimpleName() + "-refreshconf") {
@Override
public void run() {
new Thread(() -> {
if (!reloadLock.tryLock()) {
log.info("Another reload is in progress . Not doing anything");
return;
@ -221,8 +219,7 @@ public class SolrConfigHandler extends RequestHandlerBase implements SolrCoreAwa
} finally {
reloadLock.unlock();
}
}
}.start();
}, SolrConfigHandler.class.getSimpleName() + "-refreshconf").start();
} else {
log.info("checkStale {} , resourceloader {}", checkStale, req.getCore().getResourceLoader().getClass().getName());
}

View File

@ -582,9 +582,7 @@ enum CoreAdminOperation {
public void call(final CallInfo callInfo) throws IOException {
final SolrParams params = callInfo.req.getParams();
log.info("It has been requested that we recover: core="+params.get(CoreAdminParams.CORE));
Thread thread = new Thread() {
@Override
public void run() {
new Thread(() -> {
String cname = params.get(CoreAdminParams.CORE);
if (cname == null) {
cname = "";
@ -596,10 +594,8 @@ enum CoreAdminOperation {
SolrException.log(log, "Could not find core to call recovery:" + cname);
}
}
}
};
}).start();
thread.start();
}
},
REQUESTSYNCSHARD_OP(REQUESTSYNCSHARD) {

View File

@ -16,10 +16,6 @@
*/
package org.apache.solr.util;
import org.apache.solr.common.util.Cache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.invoke.MethodHandles;
import java.lang.ref.WeakReference;
import java.util.LinkedHashMap;
@ -30,6 +26,10 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.solr.common.util.Cache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A LFU cache implementation based upon ConcurrentHashMap.
* <p>
@ -139,12 +139,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
// in this method.
if (currentSize > upperWaterMark && !isCleaning) {
if (newThreadForCleanup) {
new Thread() {
@Override
public void run() {
markAndSweep();
}
}.start();
new Thread(this::markAndSweep).start();
} else if (cleanupThread != null) {
cleanupThread.wakeThread();
} else {

View File

@ -136,12 +136,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
// in this method.
if (currentSize > upperWaterMark && !isCleaning) {
if (newThreadForCleanup) {
new Thread() {
@Override
public void run() {
markAndSweep();
}
}.start();
new Thread(this::markAndSweep).start();
} else if (cleanupThread != null){
cleanupThread.wakeThread();
} else {