mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-28 07:59:10 +00:00
Make logging message String constant to allow static checks
This commit is contained in:
parent
718876a941
commit
04e55ecf6b
@ -270,7 +270,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send response for " + actionName, e1);
|
||||
logger.warn("Failed to send response for {}", e1, actionName);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -395,7 +395,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (IOException responseException) {
|
||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
||||
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||
logger.warn("actual Exception", t);
|
||||
}
|
||||
}
|
||||
@ -1107,7 +1107,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
try {
|
||||
channel.sendResponse(finalResponse);
|
||||
} catch (IOException responseException) {
|
||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
||||
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);
|
||||
|
@ -76,7 +76,7 @@ class JNANatives {
|
||||
softLimit = rlimit.rlim_cur.longValue();
|
||||
hardLimit = rlimit.rlim_max.longValue();
|
||||
} else {
|
||||
logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError()));
|
||||
logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError()));
|
||||
}
|
||||
}
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
@ -85,18 +85,19 @@ class JNANatives {
|
||||
}
|
||||
|
||||
// mlockall failed for some reason
|
||||
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
|
||||
logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg);
|
||||
logger.warn("This can result in part of the JVM being swapped out.");
|
||||
if (errno == JNACLibrary.ENOMEM) {
|
||||
if (rlimitSuccess) {
|
||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
|
||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit));
|
||||
if (Constants.LINUX) {
|
||||
// give specific instructions for the linux case to make it easy
|
||||
String user = System.getProperty("user.name");
|
||||
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
|
||||
"\t# allow user '" + user + "' mlockall\n" +
|
||||
"\t" + user + " soft memlock unlimited\n" +
|
||||
"\t" + user + " hard memlock unlimited"
|
||||
"\t# allow user '{}' mlockall\n" +
|
||||
"\t{} soft memlock unlimited\n" +
|
||||
"\t{} hard memlock unlimited",
|
||||
user, user, user
|
||||
);
|
||||
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
|
||||
}
|
||||
@ -155,7 +156,7 @@ class JNANatives {
|
||||
// the amount of memory we wish to lock, plus a small overhead (1MB).
|
||||
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
|
||||
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
|
||||
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError());
|
||||
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError());
|
||||
} else {
|
||||
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
|
||||
long address = 0;
|
||||
@ -188,7 +189,7 @@ class JNANatives {
|
||||
if (result) {
|
||||
logger.debug("console ctrl handler correctly set");
|
||||
} else {
|
||||
logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:");
|
||||
logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError());
|
||||
}
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
// this will have already been logged by Kernel32Library, no need to repeat it
|
||||
|
@ -200,7 +200,7 @@ final class JVMCheck {
|
||||
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
|
||||
if (bug != null && bug.check()) {
|
||||
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
|
||||
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
|
||||
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
|
||||
} else {
|
||||
throw new RuntimeException(bug.getErrorMessage());
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ final class Seccomp {
|
||||
method = 0;
|
||||
int errno1 = Native.getLastError();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
|
||||
}
|
||||
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
|
||||
int errno2 = Native.getLastError();
|
||||
|
@ -119,7 +119,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
|
||||
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||
}
|
||||
|
||||
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
@ -318,7 +318,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
it.remove();
|
||||
logger.debug("failed to connect to discovered node [" + node + "]", e);
|
||||
logger.debug("failed to connect to discovered node [{}]", e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
if (numberOfUnassignedShards > 0) {
|
||||
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(reason + ", scheduling a reroute");
|
||||
logger.trace("{}, scheduling a reroute", reason);
|
||||
}
|
||||
routingService.reroute(reason);
|
||||
}
|
||||
|
@ -436,11 +436,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in [").append(executionTime).append("], state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, previousClusterState.version(), source,
|
||||
previousClusterState.nodes().prettyPrint(), previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||
@ -523,9 +520,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
logger.trace("cluster state updated, source [{}]\n{}", source, newClusterState.prettyPrint());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
@ -612,11 +607,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
|
||||
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
|
||||
newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
|
||||
final String message = "[" + this.name + "] Data too large, data for [" +
|
||||
fieldName + "] would be larger than limit of [" +
|
||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||
logger.debug(message);
|
||||
logger.debug("{}", message);
|
||||
throw new CircuitBreakingException(message,
|
||||
bytesNeeded, this.memoryBytesLimit);
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
|
||||
this.trippedCount.incrementAndGet();
|
||||
final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" +
|
||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||
logger.debug(message);
|
||||
logger.debug("{}", message);
|
||||
throw new CircuitBreakingException(message);
|
||||
}
|
||||
|
||||
|
@ -394,7 +394,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
for (int i = 0; i < result.length; i++) {
|
||||
LOGGER.debug("Component [{}]:", i);
|
||||
for (int j = 0; j < result[i].length; j++) {
|
||||
LOGGER.debug("\t" + Arrays.toString(result[i][j]));
|
||||
LOGGER.debug("\t{}", Arrays.toString(result[i][j]));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -444,7 +444,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
// is an arbitrary point of the hole. The polygon edge next to this point
|
||||
// is part of the polygon the hole belongs to.
|
||||
if (debugEnabled()) {
|
||||
LOGGER.debug("Holes: " + Arrays.toString(holes));
|
||||
LOGGER.debug("Holes: {}", Arrays.toString(holes));
|
||||
}
|
||||
for (int i = 0; i < numHoles; i++) {
|
||||
final Edge current = new Edge(holes[i].coordinate, holes[i].next);
|
||||
@ -464,9 +464,9 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
final int component = -edges[index].component - numHoles - 1;
|
||||
|
||||
if(debugEnabled()) {
|
||||
LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]);
|
||||
LOGGER.debug("\tComponent: " + component);
|
||||
LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges));
|
||||
LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]);
|
||||
LOGGER.debug("\tComponent: {}", component);
|
||||
LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges));
|
||||
}
|
||||
|
||||
components.get(component).add(points[i]);
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
|
||||
/**
|
||||
* A logger that logs deprecation notices.
|
||||
*/
|
||||
@ -45,6 +47,7 @@ public class DeprecationLogger {
|
||||
/**
|
||||
* Logs a deprecated message.
|
||||
*/
|
||||
@SuppressLoggerChecks(reason = "safely delegates to logger")
|
||||
public void deprecated(String msg, Object... params) {
|
||||
logger.debug(msg, params);
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ final class IfConfig {
|
||||
msg.append(formatFlags(nic));
|
||||
msg.append(System.lineSeparator());
|
||||
}
|
||||
logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString());
|
||||
logger.debug("configuration:{}{}", System.lineSeparator(), msg);
|
||||
}
|
||||
|
||||
/** format internet address: java's default doesn't include everything useful */
|
||||
|
@ -823,7 +823,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
return null;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("full ping responses:");
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (fullPingResponses.length == 0) {
|
||||
sb.append(" {none}");
|
||||
} else {
|
||||
@ -831,7 +831,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
sb.append("\n\t--> ").append(pingResponse);
|
||||
}
|
||||
}
|
||||
logger.trace(sb.toString());
|
||||
logger.trace("full ping responses:{}", sb);
|
||||
}
|
||||
|
||||
// filter responses
|
||||
@ -848,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
}
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])");
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (pingResponses.isEmpty()) {
|
||||
sb.append(" {none}");
|
||||
} else {
|
||||
@ -856,7 +856,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
sb.append("\n\t--> ").append(pingResponse);
|
||||
}
|
||||
}
|
||||
logger.debug(sb.toString());
|
||||
logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
|
||||
masterElectionFilterDataNodes, sb);
|
||||
}
|
||||
|
||||
final DiscoveryNode localNode = clusterService.localNode();
|
||||
@ -918,7 +919,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
// *** called from within an cluster state update task *** //
|
||||
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME);
|
||||
|
||||
logger.warn(reason + ", current nodes: {}", clusterState.nodes());
|
||||
logger.warn("{}, current nodes: {}", reason, clusterState.nodes());
|
||||
nodesFD.stop();
|
||||
masterFD.stop(reason);
|
||||
|
||||
|
@ -250,7 +250,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
// We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
|
||||
if (logger.isDebugEnabled()) {
|
||||
// Log one line per path.data:
|
||||
StringBuilder sb = new StringBuilder("node data locations details:");
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (NodePath nodePath : nodePaths) {
|
||||
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
|
||||
|
||||
@ -278,7 +278,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
.append(fsPath.getType())
|
||||
.append(']');
|
||||
}
|
||||
logger.debug(sb.toString());
|
||||
logger.debug("node data locations details:{}", sb);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
FsInfo.Path totFSPath = new FsInfo.Path();
|
||||
Set<String> allTypes = new HashSet<>();
|
||||
@ -306,14 +306,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
}
|
||||
|
||||
// Just log a 1-line summary:
|
||||
logger.info(String.format(Locale.ROOT,
|
||||
"using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]",
|
||||
nodePaths.length,
|
||||
allMounts,
|
||||
totFSPath.getAvailable(),
|
||||
totFSPath.getTotal(),
|
||||
toString(allSpins),
|
||||
toString(allTypes)));
|
||||
logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
|
||||
nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation)) {
|
||||
for (Path stateFile : stream) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]");
|
||||
logger.trace("[upgrade]: processing [{}]", stateFile.getFileName());
|
||||
}
|
||||
final String name = stateFile.getFileName().toString();
|
||||
if (name.startsWith("metadata-")) {
|
||||
|
@ -161,11 +161,14 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
||||
if (state.nodes().masterNodeId() == null) {
|
||||
logger.debug("not recovering from gateway, no master elected yet");
|
||||
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
|
||||
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
|
||||
logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]",
|
||||
nodes.masterAndDataNodes().size(), recoverAfterNodes);
|
||||
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
|
||||
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
|
||||
logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
|
||||
nodes.dataNodes().size(), recoverAfterDataNodes);
|
||||
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
|
||||
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
|
||||
logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]",
|
||||
nodes.masterNodes().size(), recoverAfterMasterNodes);
|
||||
} else {
|
||||
boolean enforceRecoverAfterTime;
|
||||
String reason;
|
||||
|
@ -262,7 +262,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||
|
||||
// validate max content length
|
||||
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
|
||||
logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
|
||||
logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
|
||||
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
|
||||
}
|
||||
this.maxContentLength = maxContentLength;
|
||||
|
@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
|
||||
// because analyzers are aliased, they might be closed several times
|
||||
// an NPE is thrown in this case, so ignore....
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to close analyzer " + analyzer);
|
||||
logger.debug("failed to close analyzer {}", analyzer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
||||
merge.rateLimiter.getMBPerSec());
|
||||
|
||||
if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
|
||||
logger.debug(message);
|
||||
logger.debug("{}", message);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace(message);
|
||||
logger.trace("{}", message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -230,13 +230,13 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
||||
IndexFieldData.Builder builder = null;
|
||||
String format = type.getFormat(indexSettings.getSettings());
|
||||
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
|
||||
logger.warn("field [" + fieldName + "] has no doc values, will use default field data format");
|
||||
logger.warn("field [{}] has no doc values, will use default field data format", fieldName);
|
||||
format = null;
|
||||
}
|
||||
if (format != null) {
|
||||
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
|
||||
if (builder == null) {
|
||||
logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default");
|
||||
logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName);
|
||||
}
|
||||
}
|
||||
if (builder == null && docValues) {
|
||||
|
@ -256,7 +256,7 @@ public class TypeParsers {
|
||||
(indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
|
||||
throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
|
||||
} else {
|
||||
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.");
|
||||
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name);
|
||||
}
|
||||
} else {
|
||||
parseCopyFields(propNode, builder);
|
||||
|
@ -155,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
||||
|
||||
// TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs,
|
||||
// for now we just assume every minor upgrade has a new format.
|
||||
logger.debug("Adding segment " + info.info.name + " to be upgraded");
|
||||
logger.debug("Adding segment {} to be upgraded", info.info.name);
|
||||
spec.add(new OneMerge(Collections.singletonList(info)));
|
||||
}
|
||||
|
||||
@ -163,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
||||
|
||||
if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) {
|
||||
// hit our max upgrades, so return the spec. we will get a cascaded call to continue.
|
||||
logger.debug("Returning " + spec.merges.size() + " merges for upgrade");
|
||||
logger.debug("Returning {} merges for upgrade", spec.merges.size());
|
||||
return spec;
|
||||
}
|
||||
}
|
||||
|
||||
// We must have less than our max upgrade merges, so the next return will be our last in upgrading mode.
|
||||
if (spec.merges.isEmpty() == false) {
|
||||
logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade");
|
||||
logger.debug("Returning {} merges for end of upgrade", spec.merges.size());
|
||||
return spec;
|
||||
}
|
||||
|
||||
|
@ -128,9 +128,8 @@ final class StoreRecovery {
|
||||
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n");
|
||||
RecoveryState.Index index = recoveryState.getIndex();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [")
|
||||
.append(new ByteSizeValue(index.totalBytes())).append("], took[")
|
||||
.append(TimeValue.timeValueMillis(index.time())).append("]\n");
|
||||
@ -142,7 +141,7 @@ final class StoreRecovery {
|
||||
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
|
||||
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
|
||||
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
|
||||
logger.trace(sb.toString());
|
||||
logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
if (isClosed.compareAndSet(false, true)) {
|
||||
// only do this once!
|
||||
decRef();
|
||||
logger.debug("store reference count on close: " + refCounter.refCount());
|
||||
logger.debug("store reference count on close: {}", refCounter.refCount());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
try {
|
||||
removeIndex(index, "shutdown", false);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to remove index on stop " + index + "", e);
|
||||
logger.warn("failed to remove index on stop [{}]", e, index);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
|
||||
"operations")
|
||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
|
||||
.append("\n");
|
||||
logger.trace(sb.toString());
|
||||
logger.trace("{}", sb);
|
||||
} else {
|
||||
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime);
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
||||
logger.error("bulk deletion failures for [{}]/[{}] items", failedItems, bulkResponse.getItems().length);
|
||||
}
|
||||
} else {
|
||||
logger.trace("bulk deletion took " + bulkResponse.getTookInMillis() + "ms");
|
||||
logger.trace("bulk deletion took {}ms", bulkResponse.getTookInMillis());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ public class RestController extends AbstractLifecycleComponent<RestController> {
|
||||
try {
|
||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||
} catch (Throwable e1) {
|
||||
logger.error("failed to send failure response for uri [" + request.uri() + "]", e1);
|
||||
logger.error("failed to send failure response for uri [{}]", e1, request.uri());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -275,7 +275,7 @@ public class RestController extends AbstractLifecycleComponent<RestController> {
|
||||
try {
|
||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||
} catch (IOException e1) {
|
||||
logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1);
|
||||
logger.error("Failed to send failure response for uri [{}]", e1, request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -343,9 +343,9 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
|
||||
indexShardRepository.snapshot(snapshotId, shardId, snapshotIndexCommit, snapshotStatus);
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("snapshot (").append(snapshotId.getSnapshot()).append(") completed to ").append(indexShardRepository).append(", took [").append(TimeValue.timeValueMillis(snapshotStatus.time())).append("]\n");
|
||||
sb.append(" index : version [").append(snapshotStatus.indexVersion()).append("], number_of_files [").append(snapshotStatus.numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n");
|
||||
logger.debug(sb.toString());
|
||||
logger.debug("snapshot ({}) completed to {}, took [{}]\n{}", snapshotId.getSnapshot(), indexShardRepository,
|
||||
TimeValue.timeValueMillis(snapshotStatus.time()), sb);
|
||||
}
|
||||
} finally {
|
||||
indexShard.releaseSnapshot(snapshotIndexCommit);
|
||||
|
@ -380,7 +380,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("failed to notify channel of error message for action [" + action + "]", e1);
|
||||
logger.warn("failed to notify channel of error message for action [{}]", e1, action);
|
||||
logger.warn("actual exception", e);
|
||||
}
|
||||
}
|
||||
@ -391,7 +391,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("failed to notify channel of error message for action [" + action + "]", e1);
|
||||
logger.warn("failed to notify channel of error message for action [{}]", e1, action);
|
||||
logger.warn("actual exception", e1);
|
||||
}
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
||||
handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e));
|
||||
}
|
||||
} else {
|
||||
logger.warn("Failed to receive message for action [" + action + "]", e);
|
||||
logger.warn("Failed to receive message for action [{}]", e, action);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -314,7 +314,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e1);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e1, action);
|
||||
logger.warn("Actual Exception", e);
|
||||
}
|
||||
}
|
||||
@ -325,7 +325,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||
logger.warn("Actual Exception", e1);
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||
logger.warn("Actual Exception", e1);
|
||||
}
|
||||
}
|
||||
@ -336,7 +336,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction());
|
||||
logger.warn("Actual Exception", e);
|
||||
}
|
||||
}
|
||||
|
@ -19,12 +19,14 @@
|
||||
|
||||
package org.elasticsearch.transport.netty;
|
||||
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.jboss.netty.logging.AbstractInternalLogger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@SuppressLoggerChecks(reason = "safely delegates to logger")
|
||||
public class NettyInternalESLogger extends AbstractInternalLogger {
|
||||
|
||||
private final ESLogger logger;
|
||||
|
@ -231,7 +231,7 @@ public class VersionTests extends ESTestCase {
|
||||
assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers()));
|
||||
|
||||
Version v = (Version) versionConstant.get(Version.class);
|
||||
logger.info("Checking " + v);
|
||||
logger.info("Checking {}", v);
|
||||
assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
|
||||
assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
|
||||
if (v.major >= 2) {
|
||||
|
@ -294,14 +294,14 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(NodeRequest request) {
|
||||
logger.info("Action on node " + node);
|
||||
logger.info("Action on node {}", node);
|
||||
actionLatch.countDown();
|
||||
try {
|
||||
checkLatch.await();
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
logger.info("Action on node " + node + " finished");
|
||||
logger.info("Action on node {} finished", node);
|
||||
return new NodeResponse(testNodes[node].discoveryNode);
|
||||
}
|
||||
};
|
||||
@ -565,7 +565,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(NodeRequest request) {
|
||||
logger.info("Action on node " + node);
|
||||
logger.info("Action on node {}", node);
|
||||
throw new RuntimeException("Test exception");
|
||||
}
|
||||
};
|
||||
@ -604,9 +604,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||
tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||
@Override
|
||||
protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) {
|
||||
logger.info("Task action on node " + node);
|
||||
logger.info("Task action on node {}", node);
|
||||
if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) {
|
||||
logger.info("Failing on node " + node);
|
||||
logger.info("Failing on node {}", node);
|
||||
throw new RuntimeException("Task level failure");
|
||||
}
|
||||
return new TestTaskResponse("Success on node " + node);
|
||||
|
@ -134,7 +134,7 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
||||
// means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
|
||||
assertFalse(hasAncientSegments(client(), indexToUpgrade));
|
||||
|
||||
logger.info("--> Running upgrade on index " + indexToUpgrade);
|
||||
logger.info("--> Running upgrade on index {}", indexToUpgrade);
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
|
||||
awaitBusy(() -> {
|
||||
try {
|
||||
@ -228,7 +228,7 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
||||
ESLogger logger = Loggers.getLogger(UpgradeIT.class);
|
||||
int toUpgrade = 0;
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes());
|
||||
logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes());
|
||||
toUpgrade += status.getToUpgradeBytes();
|
||||
}
|
||||
return toUpgrade == 0;
|
||||
|
@ -162,7 +162,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(singleDataPath));
|
||||
Files.createDirectories(singleDataPath);
|
||||
logger.info("--> Single data path: " + singleDataPath.toString());
|
||||
logger.info("--> Single data path: {}", singleDataPath);
|
||||
|
||||
// find multi data path dirs
|
||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths();
|
||||
@ -173,7 +173,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
assertFalse(Files.exists(multiDataPath[1]));
|
||||
Files.createDirectories(multiDataPath[0]);
|
||||
Files.createDirectories(multiDataPath[1]);
|
||||
logger.info("--> Multi data paths: " + multiDataPath[0].toString() + ", " + multiDataPath[1].toString());
|
||||
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
||||
|
||||
replicas.get(); // wait for replicas
|
||||
}
|
||||
@ -239,13 +239,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
// skip lock file, we don't need it
|
||||
logger.trace("Skipping lock file: " + file.toString());
|
||||
logger.trace("Skipping lock file: {}", file);
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
Path relativeFile = src.relativize(file);
|
||||
Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile);
|
||||
logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString());
|
||||
logger.trace("--> Moving {} to {}", relativeFile, destFile);
|
||||
Files.move(file, destFile);
|
||||
assertFalse(Files.exists(file));
|
||||
assertTrue(Files.exists(destFile));
|
||||
@ -269,7 +269,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
|
||||
for (String index : indexes) {
|
||||
if (expectedVersions.remove(index) == false) {
|
||||
logger.warn("Old indexes tests contain extra index: " + index);
|
||||
logger.warn("Old indexes tests contain extra index: {}", index);
|
||||
}
|
||||
}
|
||||
if (expectedVersions.isEmpty() == false) {
|
||||
@ -287,9 +287,9 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
Collections.shuffle(indexes, random());
|
||||
for (String index : indexes) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
logger.info("--> Testing old index " + index);
|
||||
logger.info("--> Testing old index {}", index);
|
||||
assertOldIndexWorks(index);
|
||||
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
|
||||
logger.info("--> Done testing {}, took {} seconds", index, (System.currentTimeMillis() - startTime) / 1000.0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,7 +344,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
SearchResponse searchRsp = searchReq.get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
long numDocs = searchRsp.getHits().getTotalHits();
|
||||
logger.info("Found " + numDocs + " in old index");
|
||||
logger.info("Found {} in old index", numDocs);
|
||||
|
||||
logger.info("--> testing basic search with sort");
|
||||
searchReq.addSort("long_sort", SortOrder.ASC);
|
||||
@ -523,7 +523,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
for (String indexFile : indexes) {
|
||||
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
||||
Path nodeDir = getNodeDir(indexFile);
|
||||
logger.info("Parsing cluster state files from index [" + indexName + "]");
|
||||
logger.info("Parsing cluster state files from index [{}]", indexName);
|
||||
assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception
|
||||
Path indexDir = nodeDir.resolve("indices").resolve(indexName);
|
||||
assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception
|
||||
|
@ -28,7 +28,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati
|
||||
public void testUpgradeStartClusterOn_0_20_6() throws Exception {
|
||||
String indexName = "unsupported-0.20.6";
|
||||
|
||||
logger.info("Checking static index " + indexName);
|
||||
logger.info("Checking static index {}", indexName);
|
||||
Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||
try {
|
||||
internalCluster().startNode(nodeSettings);
|
||||
|
@ -108,7 +108,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
for (String repoVersion : repoVersions) {
|
||||
if (expectedVersions.remove(repoVersion) == false) {
|
||||
logger.warn("Old repositories tests contain extra repo: " + repoVersion);
|
||||
logger.warn("Old repositories tests contain extra repo: {}", repoVersion);
|
||||
}
|
||||
}
|
||||
if (expectedVersions.isEmpty() == false) {
|
||||
|
@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase {
|
||||
|
||||
public void loadIndex(String index, Object... settings) throws Exception {
|
||||
logger.info("Checking static index " + index);
|
||||
logger.info("Checking static index {}", index);
|
||||
Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings);
|
||||
internalCluster().startNode(nodeSettings);
|
||||
ensureGreen(index);
|
||||
|
@ -608,13 +608,13 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
public void onMaster() {
|
||||
logger.info("on master [" + clusterService.localNode() + "]");
|
||||
logger.info("on master [{}]", clusterService.localNode());
|
||||
master = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void offMaster() {
|
||||
logger.info("off master [" + clusterService.localNode() + "]");
|
||||
logger.info("off master [{}]", clusterService.localNode());
|
||||
master = false;
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
|
||||
ensureSearchable("test1", "test2");
|
||||
|
||||
ClusterStateResponse clusterState = client().admin().cluster().prepareState().get();
|
||||
logger.info("Cluster state:\n" + clusterState.getState().prettyPrint());
|
||||
logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint());
|
||||
|
||||
internalCluster().stopRandomDataNode();
|
||||
assertTrue(awaitBusy(() -> {
|
||||
|
@ -163,7 +163,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||
for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses) {
|
||||
int shardId = shardStoreStatuses.key;
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value);
|
||||
logger.info("--> adding allocation command for shard " + shardId);
|
||||
logger.info("--> adding allocation command for shard {}", shardId);
|
||||
// force allocation based on node id
|
||||
if (useStaleReplica) {
|
||||
rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand("test", shardId, storeStatus.getNode().getId(), true));
|
||||
|
@ -63,7 +63,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
||||
this.numberOfReplicas = randomIntBetween(1, 5);
|
||||
this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1);
|
||||
this.totalNumberOfShards = this.shardsPerIndex * 2;
|
||||
logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
|
||||
logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas);
|
||||
this.emptyRoutingTable = new RoutingTable.Builder().build();
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(createIndexMetaData(TEST_INDEX_1))
|
||||
@ -81,7 +81,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
||||
* puts primary shard routings into initializing state
|
||||
*/
|
||||
private void initPrimaries() {
|
||||
logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
|
||||
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||
Builder discoBuilder = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
|
||||
discoBuilder = discoBuilder.put(newNode("node" + i));
|
||||
@ -95,7 +95,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
||||
|
||||
private void startInitializingShards(String index) {
|
||||
this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
|
||||
logger.info("start primary shards for index " + index);
|
||||
logger.info("start primary shards for index {}", index);
|
||||
RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING));
|
||||
this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
|
||||
this.testRoutingTable = rerouteResult.routingTable();
|
||||
|
@ -301,7 +301,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
|
||||
|
||||
RoutingTable routingTable = routingTableBuilder.build();
|
||||
|
||||
logger.info("start " + numberOfNodes + " nodes");
|
||||
logger.info("start {} nodes", numberOfNodes);
|
||||
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < numberOfNodes; i++) {
|
||||
nodes.put(newNode("node" + i));
|
||||
|
@ -221,18 +221,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
|
||||
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
|
||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) {
|
||||
logger.info(shard.toString());
|
||||
}
|
||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(STARTED)) {
|
||||
logger.info(shard.toString());
|
||||
}
|
||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(RELOCATING)) {
|
||||
logger.info(shard.toString());
|
||||
}
|
||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
|
||||
logger.info(shard.toString());
|
||||
}
|
||||
logger.info("Initializing shards: {}", clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
|
||||
logger.info("Started shards: {}", clusterState.getRoutingNodes().shardsWithState(STARTED));
|
||||
logger.info("Relocating shards: {}", clusterState.getRoutingNodes().shardsWithState(RELOCATING));
|
||||
logger.info("Unassigned shards: {}", clusterState.getRoutingNodes().shardsWithState(UNASSIGNED));
|
||||
|
||||
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
|
||||
|
||||
|
@ -147,12 +147,12 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
|
||||
if (initializing.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
logger.debug(initializing.toString());
|
||||
logger.debug("Initializing shards: {}", initializing);
|
||||
numRelocations += initializing.size();
|
||||
routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
}
|
||||
logger.debug("--> num relocations to get balance: " + numRelocations);
|
||||
logger.debug("--> num relocations to get balance: {}", numRelocations);
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
|
@ -409,14 +409,16 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
||||
String fromId = r.currentNodeId();
|
||||
assertThat(fromId, notNullValue());
|
||||
assertThat(toId, notNullValue());
|
||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
||||
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||
toId, routingNodes.node(toId).node().version());
|
||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||
} else {
|
||||
ShardRouting primary = routingNodes.activePrimary(r);
|
||||
assertThat(primary, notNullValue());
|
||||
String fromId = primary.currentNodeId();
|
||||
String toId = r.relocatingNodeId();
|
||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
||||
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||
toId, routingNodes.node(toId).node().version());
|
||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||
}
|
||||
}
|
||||
@ -428,7 +430,8 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
||||
assertThat(primary, notNullValue());
|
||||
String fromId = primary.currentNodeId();
|
||||
String toId = r.currentNodeId();
|
||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
||||
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||
toId, routingNodes.node(toId).node().version());
|
||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
|
||||
assertThat(shardRouting.getIndexName(), equalTo("test1"));
|
||||
}
|
||||
|
||||
logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() + " for test, see that things move");
|
||||
logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey());
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
|
@ -55,7 +55,7 @@ public class CacheTests extends ESTestCase {
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
numberOfEntries = randomIntBetween(1000, 10000);
|
||||
logger.debug("numberOfEntries: " + numberOfEntries);
|
||||
logger.debug("numberOfEntries: {}", numberOfEntries);
|
||||
}
|
||||
|
||||
// cache some entries, then randomly lookup keys that do not exist, then check the stats
|
||||
|
@ -40,7 +40,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
||||
prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
|
||||
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
|
||||
@ -60,7 +60,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
||||
allowNodes("test", 2);
|
||||
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
|
||||
@ -82,7 +82,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
||||
|
||||
allowNodes("test", 3);
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
|
||||
|
@ -208,7 +208,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
|
||||
// Figure out what is the elected master node
|
||||
final String masterNode = internalCluster().getMasterName();
|
||||
logger.info("---> legit elected master node=" + masterNode);
|
||||
logger.info("---> legit elected master node={}", masterNode);
|
||||
|
||||
// Pick a node that isn't the elected master.
|
||||
Set<String> nonMasters = new HashSet<>(nodes);
|
||||
@ -496,7 +496,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
int docsPerIndexer = randomInt(3);
|
||||
logger.info("indexing " + docsPerIndexer + " docs per indexer before partition");
|
||||
logger.info("indexing {} docs per indexer before partition", docsPerIndexer);
|
||||
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
||||
for (Semaphore semaphore : semaphores) {
|
||||
semaphore.release(docsPerIndexer);
|
||||
@ -508,7 +508,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
disruptionScheme.startDisrupting();
|
||||
|
||||
docsPerIndexer = 1 + randomInt(5);
|
||||
logger.info("indexing " + docsPerIndexer + " docs per indexer during partition");
|
||||
logger.info("indexing {} docs per indexer during partition", docsPerIndexer);
|
||||
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
||||
Collections.shuffle(semaphores, random());
|
||||
for (Semaphore semaphore : semaphores) {
|
||||
@ -539,11 +539,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
}
|
||||
} finally {
|
||||
if (exceptedExceptions.size() > 0) {
|
||||
StringBuilder sb = new StringBuilder("Indexing exceptions during disruption:");
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Exception e : exceptedExceptions) {
|
||||
sb.append("\n").append(e.getMessage());
|
||||
}
|
||||
logger.debug(sb.toString());
|
||||
logger.debug("Indexing exceptions during disruption: {}", sb);
|
||||
}
|
||||
logger.info("shutting down indexers");
|
||||
stop.set(true);
|
||||
@ -731,7 +731,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
|
||||
assertThat(indexResponse.getVersion(), equalTo(1L));
|
||||
|
||||
logger.info("Verifying if document exists via node[" + notIsolatedNode + "]");
|
||||
logger.info("Verifying if document exists via node[{}]", notIsolatedNode);
|
||||
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
|
||||
.setPreference("_local")
|
||||
.get();
|
||||
@ -745,7 +745,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
ensureGreen("test");
|
||||
|
||||
for (String node : nodes) {
|
||||
logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]");
|
||||
logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node);
|
||||
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
|
||||
.setPreference("_local")
|
||||
.get();
|
||||
@ -764,7 +764,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
List<String> nodes = startCluster(4, -1, new int[]{0});
|
||||
// Figure out what is the elected master node
|
||||
final String masterNode = internalCluster().getMasterName();
|
||||
logger.info("---> legit elected master node=" + masterNode);
|
||||
logger.info("---> legit elected master node={}", masterNode);
|
||||
List<String> otherNodes = new ArrayList<>(nodes);
|
||||
otherNodes.remove(masterNode);
|
||||
otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes.
|
||||
|
@ -225,7 +225,7 @@ public class MetaDataStateFormatTests extends ESTestCase {
|
||||
msg.append(" after: [").append(checksumAfterCorruption).append("]");
|
||||
msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
|
||||
msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||
logger.debug(msg.toString());
|
||||
logger.debug("{}", msg.toString());
|
||||
assumeTrue("Checksum collision - " + msg.toString(),
|
||||
checksumAfterCorruption != checksumBeforeCorruption // collision
|
||||
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
||||
|
@ -82,7 +82,7 @@ public class QuorumGatewayIT extends ESIntegTestCase {
|
||||
assertTrue(awaitBusy(() -> {
|
||||
logger.info("--> running cluster_health (wait for the shards to startup)");
|
||||
ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet();
|
||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||
return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
|
||||
}, 30, TimeUnit.SECONDS));
|
||||
logger.info("--> one node is closed -- index 1 document into the remaining nodes");
|
||||
|
@ -145,7 +145,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
||||
}
|
||||
writer.addDocument(d);
|
||||
}
|
||||
logger.debug(hundred + " " + ten + " " + five);
|
||||
logger.debug("{} {} {}", hundred, ten, five);
|
||||
writer.forceMerge(1, true);
|
||||
LeafReaderContext context = refreshReader();
|
||||
String[] formats = new String[] { "paged_bytes"};
|
||||
|
@ -272,7 +272,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
|
||||
logger.info(mapping);
|
||||
logger.info("Mapping: {}", mapping);
|
||||
DocumentMapper docMapper = parser.parse("test", new CompressedXContent(mapping));
|
||||
String builtMapping = docMapper.mappingSource().string();
|
||||
// reparse it
|
||||
|
@ -112,7 +112,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
|
||||
assertThat(mappingMetaData, not(nullValue()));
|
||||
Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
|
||||
Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
|
||||
logger.info("Keys: " + aField.keySet());
|
||||
logger.info("Keys: {}", aField.keySet());
|
||||
assertThat(aField.size(), equalTo(2));
|
||||
assertThat(aField.get("type").toString(), equalTo("geo_point"));
|
||||
assertThat(aField.get("fields"), notNullValue());
|
||||
|
@ -77,7 +77,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("Running Cluster Health");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
|
||||
@ -92,7 +92,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||
// first wait for 2 nodes in the cluster
|
||||
logger.info("Running Cluster Health");
|
||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
final String node2 = getLocalNodeId(server_2);
|
||||
@ -171,7 +171,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||
// verify health
|
||||
logger.info("Running Cluster Health");
|
||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
|
||||
|
@ -282,7 +282,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
||||
assertEquals("foo", value1.toUtf8());
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
||||
assertEquals("bar", value2.toUtf8());
|
||||
logger.info(requestCacheStats.stats().getMemorySize().toString());
|
||||
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
||||
assertEquals("baz", value3.toUtf8());
|
||||
assertEquals(2, cache.count());
|
||||
@ -319,7 +319,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
||||
assertEquals("foo", value1.toUtf8());
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
||||
assertEquals("bar", value2.toUtf8());
|
||||
logger.info(requestCacheStats.stats().getMemorySize().toString());
|
||||
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
||||
assertEquals("baz", value3.toUtf8());
|
||||
assertEquals(3, cache.count());
|
||||
|
@ -48,7 +48,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
assertAcked(prepareCreate("test", 2));
|
||||
logger.info("Running Cluster Health");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
|
||||
NumShards numShards = getNumShards("test");
|
||||
|
||||
@ -75,7 +75,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet());
|
||||
logger.info("Running Cluster Health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -88,7 +88,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("Running Cluster Health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -106,7 +106,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("Running Cluster Health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -128,7 +128,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -140,7 +140,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -153,7 +153,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -166,7 +166,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -183,7 +183,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -195,7 +195,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -208,7 +208,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -221,7 +221,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -237,7 +237,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
@ -253,7 +253,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> running cluster health");
|
||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet();
|
||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||
|
@ -258,7 +258,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
|
||||
|
||||
Path server2Shard = shardDirectory(node_2, "test", 0);
|
||||
logger.info("--> stopping node " + node_2);
|
||||
logger.info("--> stopping node {}", node_2);
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
||||
|
||||
logger.info("--> running cluster_health");
|
||||
@ -268,7 +268,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
.setWaitForRelocatingShards(0)
|
||||
.get();
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||
|
||||
assertThat(Files.exists(server2Shard), equalTo(true));
|
||||
|
||||
|
@ -131,7 +131,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
||||
.addField("field1").addField("field2")
|
||||
.execute().actionGet();
|
||||
if (searchResponse.getFailedShards() > 0) {
|
||||
logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
|
||||
logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures()));
|
||||
}
|
||||
assertHitCount(searchResponse, 1);
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
|
||||
|
@ -47,11 +47,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||
|
||||
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
||||
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
||||
logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
|
||||
logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId);
|
||||
|
||||
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
||||
assertThat(response.getNodes().length, is(2));
|
||||
@ -91,11 +91,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
||||
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||
|
||||
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
||||
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
||||
logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
|
||||
logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId);
|
||||
|
||||
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
||||
|
||||
|
@ -1072,7 +1072,7 @@ public class PercolatorIT extends ESIntegTestCase {
|
||||
int numLevels = randomIntBetween(1, 25);
|
||||
long numQueriesPerLevel = randomIntBetween(10, 250);
|
||||
long totalQueries = numLevels * numQueriesPerLevel;
|
||||
logger.info("--> register " + totalQueries + " queries");
|
||||
logger.info("--> register {} queries", totalQueries);
|
||||
for (int level = 1; level <= numLevels; level++) {
|
||||
for (int query = 1; query <= numQueriesPerLevel; query++) {
|
||||
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
|
||||
@ -1166,7 +1166,7 @@ public class PercolatorIT extends ESIntegTestCase {
|
||||
|
||||
Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<>();
|
||||
long numQueries = randomIntBetween(100, 250);
|
||||
logger.info("--> register " + numQueries + " queries");
|
||||
logger.info("--> register {} queries", numQueries);
|
||||
for (int i = 0; i < numQueries; i++) {
|
||||
int value = randomInt(10);
|
||||
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
|
||||
|
@ -131,7 +131,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("Running Cluster Health (wait for the shards to startup)");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
SearchResponse countResponse = client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(countResponse, 1L);
|
||||
@ -140,7 +140,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase {
|
||||
assertThat(actionGet.isAcknowledged(), equalTo(true));
|
||||
assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text"));
|
||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(0L));
|
||||
|
||||
|
@ -123,7 +123,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
|
||||
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
|
||||
logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk());
|
||||
logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk());
|
||||
if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
|
||||
latch.countDown();
|
||||
throw new RuntimeException("Caused some truncated files for fun and profit");
|
||||
|
@ -185,7 +185,7 @@ children("to_comment", "comment")
|
||||
assertThat(categoryTerms.getBuckets().size(), equalTo(3));
|
||||
|
||||
for (Terms.Bucket bucket : categoryTerms.getBuckets()) {
|
||||
logger.info("bucket=" + bucket.getKey());
|
||||
logger.info("bucket={}", bucket.getKey());
|
||||
Children childrenBucket = bucket.getAggregations().get("to_comment");
|
||||
TopHits topHits = childrenBucket.getAggregations().get("top_comments");
|
||||
logger.info("total_hits={}", topHits.getHits().getTotalHits());
|
||||
|
@ -84,7 +84,7 @@ public class NestedIT extends ESIntegTestCase {
|
||||
numParents = randomIntBetween(3, 10);
|
||||
numChildren = new int[numParents];
|
||||
aggCollectionMode = randomFrom(SubAggCollectionMode.values());
|
||||
logger.info("AGG COLLECTION MODE: " + aggCollectionMode);
|
||||
logger.info("AGG COLLECTION MODE: {}", aggCollectionMode);
|
||||
int totalChildren = 0;
|
||||
for (int i = 0; i < numParents; ++i) {
|
||||
if (i == numParents - 1 && totalChildren == 0) {
|
||||
|
@ -433,7 +433,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
||||
assertThat(hits.totalHits(), equalTo(controlHits.totalHits()));
|
||||
assertThat(hits.getHits().length, equalTo(controlHits.getHits().length));
|
||||
for (int i = 0; i < hits.getHits().length; i++) {
|
||||
logger.info(i + ": top_hits: [" + hits.getAt(i).id() + "][" + hits.getAt(i).sortValues()[0] + "] control: [" + controlHits.getAt(i).id() + "][" + controlHits.getAt(i).sortValues()[0] + "]");
|
||||
logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).id(), hits.getAt(i).sortValues()[0], controlHits.getAt(i).id(), controlHits.getAt(i).sortValues()[0]);
|
||||
assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id()));
|
||||
assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0]));
|
||||
}
|
||||
@ -609,7 +609,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
||||
public void testTrackScores() throws Exception {
|
||||
boolean[] trackScores = new boolean[]{true, false};
|
||||
for (boolean trackScore : trackScores) {
|
||||
logger.info("Track score=" + trackScore);
|
||||
logger.info("Track score={}", trackScore);
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing")
|
||||
.setQuery(matchQuery("text", "term rare"))
|
||||
.addAggregation(terms("terms")
|
||||
|
@ -142,7 +142,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase {
|
||||
}
|
||||
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
|
||||
// if we hit only non-critical exceptions we only make sure that the post search works
|
||||
logger.info("Non-CriticalExceptions: " + nonCriticalExceptions.toString());
|
||||
logger.info("Non-CriticalExceptions: {}", nonCriticalExceptions);
|
||||
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, postSearchOK, is(true));
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public class TransportSearchFailuresIT extends ESIntegTestCase {
|
||||
.cluster()
|
||||
.health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0)
|
||||
.waitForActiveShards(test.totalNumShards)).actionGet();
|
||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
||||
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN)));
|
||||
assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards));
|
||||
|
@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
||||
// Create a random geometry collection.
|
||||
GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom());
|
||||
|
||||
logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes");
|
||||
logger.info("Created Random GeometryCollection containing {} shapes", gcb.numShapes());
|
||||
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree")
|
||||
.execute().actionGet();
|
||||
|
@ -2044,7 +2044,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
||||
.query(multiMatchQueryBuilder)
|
||||
.highlighter(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType)
|
||||
.field(new Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>")));
|
||||
logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]");
|
||||
logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType);
|
||||
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("<field1>The quick brown fox</field1> jumps over"),
|
||||
|
@ -419,7 +419,7 @@ public class MoreLikeThisIT extends ESIntegTestCase {
|
||||
.minTermFreq(1)
|
||||
.minDocFreq(1)
|
||||
.minimumShouldMatch(minimumShouldMatch);
|
||||
logger.info("Testing with minimum_should_match = " + minimumShouldMatch);
|
||||
logger.info("Testing with minimum_should_match = {}", minimumShouldMatch);
|
||||
SearchResponse response = client().prepareSearch("test").setTypes("type1")
|
||||
.setQuery(mltQuery).get();
|
||||
assertSearchResponse(response);
|
||||
|
@ -72,7 +72,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
int iters = between(20, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -126,8 +126,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
int iters = between(1, 10);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
||||
logger.info(q.toString());
|
||||
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchRequestBuilder vanilla = client().prepareSearch("test")
|
||||
.setQuery(q)
|
||||
@ -309,7 +308,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
refresh();
|
||||
|
||||
QueryBuilder q = QueryBuilders.boolQuery();
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -360,8 +359,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
|
||||
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
|
||||
|
||||
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -408,7 +406,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two"))
|
||||
.boost(randomFloat())
|
||||
.negativeBoost(randomFloat());
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -455,7 +453,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
QueryBuilder q = QueryBuilders.disMaxQuery()
|
||||
.boost(0.33703882f)
|
||||
.add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true));
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -501,7 +499,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
|
||||
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
||||
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q.toString());
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -547,7 +545,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
|
||||
QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two");
|
||||
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch()
|
||||
.setQuery(q)
|
||||
@ -559,7 +557,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
|
||||
if (resp.getShardFailures().length > 0) {
|
||||
for (ShardSearchFailure f : resp.getShardFailures()) {
|
||||
logger.error(f.toString());
|
||||
logger.error("Shard search failure: {}", f);
|
||||
}
|
||||
fail();
|
||||
}
|
||||
@ -603,7 +601,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||
refresh();
|
||||
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
||||
|
||||
logger.info(q.toString());
|
||||
logger.info("Query: {}", q);
|
||||
|
||||
SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet();
|
||||
assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0));
|
||||
|
@ -282,8 +282,8 @@ public class DuelScrollIT extends ESIntegTestCase {
|
||||
}
|
||||
assertEquals(control.getHits().getTotalHits(), scrollDocs);
|
||||
} catch (AssertionError e) {
|
||||
logger.info("Control:\n" + control);
|
||||
logger.info("Scroll size=" + size + ", from=" + scrollDocs + ":\n" + scroll);
|
||||
logger.info("Control:\n{}", control);
|
||||
logger.info("Scroll size={}, from={}:\n{}", size, scrollDocs, scroll);
|
||||
throw e;
|
||||
} finally {
|
||||
clearScroll(scroll.getScrollId());
|
||||
|
@ -78,8 +78,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
||||
GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)};
|
||||
createShuffeldJSONArray(d2Builder, d2Points);
|
||||
|
||||
logger.info(d1Builder.string());
|
||||
logger.info(d2Builder.string());
|
||||
logger.info("d1: {}", d1Builder);
|
||||
logger.info("d2: {}", d2Builder);
|
||||
indexRandom(true,
|
||||
client().prepareIndex("index", "type", "d1").setSource(d1Builder),
|
||||
client().prepareIndex("index", "type", "d2").setSource(d2Builder));
|
||||
|
@ -761,7 +761,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
Client client = client();
|
||||
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||
.put("location", repo)
|
||||
@ -817,7 +817,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
Client client = client();
|
||||
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||
.put("location", repo)
|
||||
@ -855,7 +855,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
Client client = client();
|
||||
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||
.put("location", repo)
|
||||
@ -889,7 +889,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
Client client = client();
|
||||
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||
.put("location", repo)
|
||||
@ -2159,7 +2159,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
public void testListCorruptedSnapshot() throws Exception {
|
||||
Client client = client();
|
||||
Path repo = randomRepoPath();
|
||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
||||
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||
.put("location", repo)
|
||||
|
@ -215,7 +215,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
||||
logger.info("--> move from 0 to 1 replica");
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
|
||||
}
|
||||
logger.debug("---> repo exists: " + Files.exists(tempDir.resolve("indices/test/0")) + " files: " + Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
|
||||
logger.debug("---> repo exists: {} files: {}", Files.exists(tempDir.resolve("indices/test/0")), Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
|
||||
CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
|
||||
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
|
||||
|
@ -120,7 +120,7 @@ public class MockRepository extends FsRepository {
|
||||
blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false);
|
||||
randomPrefix = repositorySettings.settings().get("random", "default");
|
||||
waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
|
||||
logger.info("starting mock repository with random prefix " + randomPrefix);
|
||||
logger.info("starting mock repository with random prefix {}", randomPrefix);
|
||||
mockBlobStore = new MockBlobStore(super.blobStore());
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||
logger.warn("Actual Exception", e1);
|
||||
}
|
||||
}
|
||||
@ -194,7 +194,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
||||
try {
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1);
|
||||
logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction());
|
||||
logger.warn("Actual Exception", e);
|
||||
}
|
||||
} }
|
||||
|
@ -316,7 +316,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("failed to run " + compiledScript, e);
|
||||
logger.trace("failed to run {}", e, compiledScript);
|
||||
}
|
||||
throw new ScriptException("failed to run " + compiledScript, e);
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ public class EquivalenceTests extends ESIntegTestCase {
|
||||
createIndex("idx");
|
||||
|
||||
final int numDocs = scaledRandomIntBetween(2500, 5000);
|
||||
logger.info("Indexing [" + numDocs +"] docs");
|
||||
logger.info("Indexing [{}] docs", numDocs);
|
||||
List<IndexRequestBuilder> indexingRequests = new ArrayList<>();
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble()));
|
||||
|
@ -543,7 +543,7 @@ public class ExtendedStatsTests extends AbstractNumericTestCase {
|
||||
ShardSearchFailure[] failures = response.getShardFailures();
|
||||
if (failures.length != expectedFailures) {
|
||||
for (ShardSearchFailure failure : failures) {
|
||||
logger.error("Shard Failure: {}", failure);
|
||||
logger.error("Shard Failure: {}", failure.getCause(), failure);
|
||||
}
|
||||
fail("Unexpected shard failures!");
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ public class SearchStatsTests extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
|
||||
logger.debug("###### indices search stats: " + indicesStats.getTotal().getSearch());
|
||||
logger.debug("###### indices search stats: {}", indicesStats.getTotal().getSearch());
|
||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0L));
|
||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0L));
|
||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0L));
|
||||
|
@ -404,7 +404,7 @@ public class StatsTests extends AbstractNumericTestCase {
|
||||
ShardSearchFailure[] failures = response.getShardFailures();
|
||||
if (failures.length != expectedFailures) {
|
||||
for (ShardSearchFailure failure : failures) {
|
||||
logger.error("Shard Failure: {}", failure);
|
||||
logger.error("Shard Failure: {}", failure.getCause(), failure);
|
||||
}
|
||||
fail("Unexpected shard failures!");
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ public class StringTermsTests extends AbstractTermsTestCase {
|
||||
ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH,
|
||||
ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY };
|
||||
for (ExecutionMode executionMode : executionModes) {
|
||||
logger.info("Execution mode:" + executionMode);
|
||||
logger.info("Execution mode: {}", executionMode);
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
.setTypes("type")
|
||||
|
@ -191,7 +191,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.error("Error running " + template, e);
|
||||
logger.error("Error running {}", e, template);
|
||||
throw new ScriptException("Error running " + template, e);
|
||||
}
|
||||
return result.bytes();
|
||||
|
@ -55,7 +55,7 @@ public class AwsSigner {
|
||||
try {
|
||||
validateSignerType(signer);
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.warn(e.getMessage());
|
||||
logger.warn("{}", e.getMessage());
|
||||
}
|
||||
|
||||
configuration.setSignerOverride(signer);
|
||||
|
@ -64,7 +64,7 @@ public class AwsSigner {
|
||||
try {
|
||||
validateSignerType(signer, endpoint);
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.warn(e.getMessage());
|
||||
logger.warn("{}", e.getMessage());
|
||||
}
|
||||
|
||||
configuration.setSignerOverride(signer);
|
||||
|
@ -88,11 +88,11 @@ public final class CorruptionUtils {
|
||||
// we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
|
||||
// in the checksum which is ok though....
|
||||
StringBuilder msg = new StringBuilder();
|
||||
msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]");
|
||||
msg.append(" after: [").append(checksumAfterCorruption).append("]");
|
||||
msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
|
||||
msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||
logger.info(msg.toString());
|
||||
msg.append("before: [").append(checksumBeforeCorruption).append("] ");
|
||||
msg.append("after: [").append(checksumAfterCorruption).append("] ");
|
||||
msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] ");
|
||||
msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||
logger.info("Checksum {}", msg);
|
||||
assumeTrue("Checksum collision - " + msg.toString(),
|
||||
checksumAfterCorruption != checksumBeforeCorruption // collision
|
||||
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
||||
|
@ -883,7 +883,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
||||
sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
|
||||
.append("] id [").append(hit.id()).append("]");
|
||||
}
|
||||
logger.warn(sb.toString());
|
||||
logger.warn("{}", sb);
|
||||
fail(failMsg);
|
||||
}
|
||||
}
|
||||
|
@ -151,8 +151,7 @@ public class RestClient implements Closeable {
|
||||
|
||||
HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
|
||||
for (Map.Entry<String, String> header : headers.entrySet()) {
|
||||
logger.error("Adding header " + header.getKey());
|
||||
logger.error(" with value " + header.getValue());
|
||||
logger.error("Adding header {}\n with value {}", header.getKey(), header.getValue());
|
||||
httpRequestBuilder.addHeader(header.getKey(), header.getValue());
|
||||
}
|
||||
logger.debug("calling api [{}]", apiName);
|
||||
|
@ -61,7 +61,7 @@ public class HttpResponse {
|
||||
try {
|
||||
httpResponse.close();
|
||||
} catch (IOException e) {
|
||||
logger.error(e.getMessage(), e);
|
||||
logger.error("Failed closing response", e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user