Remove some unused code (#27792)

This commit removes some unused code.
This commit is contained in:
Tanguy Leroux 2017-12-13 16:45:55 +01:00 committed by GitHub
parent 247efa86bf
commit b69923f112
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 18 additions and 105 deletions

View File

@ -41,6 +41,10 @@ public class FailureTrackingResponseListenerTests extends RestClientTestCase {
MockResponseListener responseListener = new MockResponseListener();
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
final Response response = mockResponse();
listener.onSuccess(response);
assertSame(response, responseListener.response.get());
assertNull(responseListener.exception.get());
}
public void testOnFailure() {

View File

@ -185,7 +185,6 @@ public class BulkProcessor implements Closeable {
private BulkRequest bulkRequest;
private final BulkRequestHandler bulkRequestHandler;
private final Scheduler scheduler;
private final Runnable onClose;
private volatile boolean closed = false;
@ -196,7 +195,6 @@ public class BulkProcessor implements Closeable {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.getBytes();
this.bulkRequest = new BulkRequest();
this.scheduler = scheduler;
this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, scheduler, concurrentRequests);
// Start period flushing task after everything is setup
this.cancellableFlushTask = startFlushTask(flushInterval, scheduler);

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
@ -69,7 +68,6 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
@Override
protected void resolveRequest(ClusterState state, InternalRequest request) {
IndexMetaData indexMeta = state.getMetaData().index(request.concreteIndex());
// update the routing (request#index here is possibly an alias)
request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index()));
// Fail fast on the node that received the request.

View File

@ -46,9 +46,9 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.node.InternalSettingsPreparer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@ -57,8 +57,8 @@ import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
/**
@ -239,7 +239,6 @@ final class Bootstrap {
}
private static Environment createEnvironment(
final boolean foreground,
final Path pidFile,
final SecureSettings secureSettings,
final Settings initialSettings,
@ -283,7 +282,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
final SecureSettings keystore = loadSecureSettings(initialEnv);
final Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
try {
LogConfigurator.configure(environment);
} catch (IOException e) {

View File

@ -39,7 +39,6 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -357,8 +356,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
// Retrieve which nodes we can potentially send the query to
final Set<String> nodeIds = getAllNodeIds(shards);
final int nodeCount = nodeIds.size();
final Map<String, Optional<ResponseCollectorService.ComputedNodeStats>> nodeStats = getNodeStats(nodeIds, collector);
// Retrieve all the nodes the shards exist on
@ -424,16 +421,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
}
}
/**
* Returns true if no primaries are active or initializing for this shard
*/
private boolean noPrimariesActive() {
if (!primaryAsList.isEmpty() && !primaryAsList.get(0).active() && !primaryAsList.get(0).initializing()) {
return true;
}
return false;
}
/**
* Returns an iterator only on the primary shard.
*/

View File

@ -632,7 +632,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// if the activeReplica was relocating before this call to failShard, its relocation was cancelled earlier when we
// failed initializing replica shards (and moved replica relocation source back to started)
assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica);
promoteActiveReplicaShardToPrimary(activeReplica);
routingChangesObserver.replicaPromoted(activeReplica);
}

View File

@ -19,7 +19,6 @@ package org.elasticsearch.common.inject.spi;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Binding;
import org.elasticsearch.common.inject.Key;
import org.elasticsearch.common.inject.MembersInjector;
import org.elasticsearch.common.inject.Module;
@ -60,18 +59,6 @@ import java.util.Set;
* @since 2.0
*/
public final class Elements {
private static final BindingTargetVisitor<Object, Object> GET_INSTANCE_VISITOR
= new DefaultBindingTargetVisitor<Object, Object>() {
@Override
public Object visit(InstanceBinding<?> binding) {
return binding.getInstance();
}
@Override
protected Object visitOther(Binding<?> binding) {
throw new IllegalArgumentException();
}
};
/**
* Records the elements executed by {@code modules}.

View File

@ -749,7 +749,6 @@ public class BigArrays implements Releasable {
* @param size the initial length of the array
*/
public <T> ObjectArray<T> newObjectArray(long size) {
final ObjectArray<T> array;
if (size > OBJECT_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.util.concurrent;
import java.util.concurrent.BlockingQueue;
import org.elasticsearch.common.SuppressForbidden;
/**
* Extends the {@code SizeBlockingQueue} to add the {@code adjustCapacity} method, which will adjust
@ -35,12 +34,6 @@ final class ResizableBlockingQueue<E> extends SizeBlockingQueue<E> {
this.capacity = initialCapacity;
}
@SuppressForbidden(reason = "optimalCapacity is non-negative, therefore the difference cannot be < -Integer.MAX_VALUE")
private int getChangeAmount(int optimalCapacity) {
assert optimalCapacity >= 0 : "optimal capacity should always be positive, got: " + optimalCapacity;
return Math.abs(optimalCapacity - this.capacity);
}
@Override
public int capacity() {
return this.capacity;

View File

@ -278,13 +278,6 @@ public final class NodeEnvironment implements Closeable {
return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId));
}
/** Returns true if the directory is empty */
private static boolean dirEmpty(final Path path) throws IOException {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
return stream.iterator().hasNext() == false;
}
}
private static void releaseAndNullLocks(Lock[] locks) {
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {

View File

@ -44,8 +44,6 @@ import java.util.Objects;
public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> {
public static final String NAME = "script";
private static final ParseField PARAMS_FIELD = new ParseField("params");
private final Script script;
public ScriptQueryBuilder(Script script) {

View File

@ -303,7 +303,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} else {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
}
indexShardOperationPermits = new IndexShardOperationPermits(shardId, logger, threadPool);
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
searcherWrapper = indexSearcherWrapper;
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
refreshListeners = buildRefreshListeners();

View File

@ -49,7 +49,6 @@ import java.util.function.Supplier;
final class IndexShardOperationPermits implements Closeable {
private final ShardId shardId;
private final Logger logger;
private final ThreadPool threadPool;
static final int TOTAL_PERMITS = Integer.MAX_VALUE;
@ -62,12 +61,10 @@ final class IndexShardOperationPermits implements Closeable {
* Construct operation permits for the specified shards.
*
* @param shardId the shard
* @param logger the logger for the shard
* @param threadPool the thread pool (used to execute delayed operations)
*/
IndexShardOperationPermits(final ShardId shardId, final Logger logger, final ThreadPool threadPool) {
IndexShardOperationPermits(final ShardId shardId, final ThreadPool threadPool) {
this.shardId = shardId;
this.logger = logger;
this.threadPool = threadPool;
}

View File

@ -33,9 +33,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public final class ShardPath {
public static final String INDEX_FOLDER_NAME = "index";
@ -198,13 +196,6 @@ public final class ShardPath {
NodeEnvironment.NodePath bestPath = getPathWithMostFreeSpace(env);
if (paths.length != 1) {
int shardCount = indexSettings.getNumberOfShards();
// Maximum number of shards that a path should have for a particular index assuming
// all the shards were assigned to this node. For example, with a node with 4 data
// paths and an index with 9 primary shards, the maximum number of shards per path
// would be 3.
int maxShardsPerPath = Math.floorDiv(shardCount, paths.length) + ((shardCount % paths.length) == 0 ? 0 : 1);
Map<NodeEnvironment.NodePath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
// Compute how much space there is on each path

View File

@ -1499,7 +1499,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
}
}
final RecoveryState.Index index = recoveryState.getIndex();
if (filesToRecover.isEmpty()) {
logger.trace("no files to recover, all exists within the local store");
}

View File

@ -194,7 +194,6 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell(shard.getIndexName());
table.addCell(shard.id());
IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index());
if (shard.primary()) {
table.addCell("p");
} else {

View File

@ -31,8 +31,6 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -47,9 +45,6 @@ import java.util.function.Consumer;
public final class DirectCandidateGeneratorBuilder implements CandidateGenerator {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(
Loggers.getLogger(DirectCandidateGeneratorBuilder.class));
private static final String TYPE = "direct_generator";
public static final ParseField DIRECT_GENERATOR_FIELD = new ParseField(TYPE);

View File

@ -30,8 +30,6 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardContext;
@ -69,8 +67,6 @@ import static org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBu
*/
public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuilder> {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TermSuggestionBuilder.class));
private static final String SUGGESTION_NAME = "term";
private SuggestMode suggestMode = SuggestMode.MISSING;

View File

@ -85,7 +85,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
@Before
public void createIndexShardOperationsLock() {
permits = new IndexShardOperationPermits(new ShardId("blubb", "id", 0), logger, threadPool);
permits = new IndexShardOperationPermits(new ShardId("blubb", "id", 0), threadPool);
}
@After

View File

@ -25,7 +25,6 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@ -36,12 +35,12 @@ import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.transport.TransportService;
import java.io.UncheckedIOException;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Closeable;
import java.io.UncheckedIOException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
@ -56,8 +55,6 @@ import java.util.function.Supplier;
public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable {
private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class);
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
public static final String EC2 = "ec2";
static {

View File

@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
@ -59,7 +58,6 @@ import java.util.function.Supplier;
public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin {
private static final Logger logger = Loggers.getLogger(FileBasedDiscoveryPlugin.class);
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
private final Settings settings;
private final Path configPath;

View File

@ -23,7 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.env.Environment;
@ -38,7 +37,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -67,8 +65,6 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast
private final Path unicastHostsFilePath;
private final AtomicLong nodeIdGenerator = new AtomicLong(); // generates unique ids for the node
private final TimeValue resolveTimeout;
FileBasedUnicastHostsProvider(Environment environment, TransportService transportService, ExecutorService executorService) {

View File

@ -29,7 +29,6 @@ import org.elasticsearch.cloud.gce.GceInstancesServiceImpl;
import org.elasticsearch.cloud.gce.GceMetadataService;
import org.elasticsearch.cloud.gce.network.GceNameResolver;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@ -53,7 +52,6 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
public static final String GCE = "gce";
private final Settings settings;
private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class);
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
// stashed when created in order to properly close
private final SetOnce<GceInstancesServiceImpl> gceInstancesService = new SetOnce<>();

View File

@ -19,10 +19,6 @@
package org.elasticsearch.repositories.s3;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
@ -41,6 +37,10 @@ import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service {
@ -139,14 +139,6 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
}
}
/** Returns the value for a given setting from the repository, or returns the fallback value. */
private static <T> T getRepoValue(Settings repositorySettings, Setting<T> repositorySetting, T fallback) {
if (repositorySetting.exists(repositorySettings)) {
return repositorySetting.get(repositorySettings);
}
return fallback;
}
@Override
protected void doStart() throws ElasticsearchException {
}

View File

@ -31,7 +31,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotR
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ClusterAdminClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.repositories.RepositoryVerificationException;
@ -181,7 +180,6 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
Settings settings = internalCluster().getInstance(Settings.class);
Settings bucket = settings.getByPrefix("repositories.s3.");
RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY);
AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings);
String bucketName = bucket.get("bucket");