mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-28 07:59:10 +00:00
Make logging message String constant to allow static checks
This commit is contained in:
parent
718876a941
commit
04e55ecf6b
@ -270,7 +270,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("Failed to send response for " + actionName, e1);
|
logger.warn("Failed to send response for {}", e1, actionName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -395,7 +395,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(t);
|
channel.sendResponse(t);
|
||||||
} catch (IOException responseException) {
|
} catch (IOException responseException) {
|
||||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||||
logger.warn("actual Exception", t);
|
logger.warn("actual Exception", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1107,7 +1107,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(finalResponse);
|
channel.sendResponse(finalResponse);
|
||||||
} catch (IOException responseException) {
|
} catch (IOException responseException) {
|
||||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||||
}
|
}
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);
|
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);
|
||||||
|
@ -76,7 +76,7 @@ class JNANatives {
|
|||||||
softLimit = rlimit.rlim_cur.longValue();
|
softLimit = rlimit.rlim_cur.longValue();
|
||||||
hardLimit = rlimit.rlim_max.longValue();
|
hardLimit = rlimit.rlim_max.longValue();
|
||||||
} else {
|
} else {
|
||||||
logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError()));
|
logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (UnsatisfiedLinkError e) {
|
} catch (UnsatisfiedLinkError e) {
|
||||||
@ -85,19 +85,20 @@ class JNANatives {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mlockall failed for some reason
|
// mlockall failed for some reason
|
||||||
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
|
logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg);
|
||||||
logger.warn("This can result in part of the JVM being swapped out.");
|
logger.warn("This can result in part of the JVM being swapped out.");
|
||||||
if (errno == JNACLibrary.ENOMEM) {
|
if (errno == JNACLibrary.ENOMEM) {
|
||||||
if (rlimitSuccess) {
|
if (rlimitSuccess) {
|
||||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
|
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit));
|
||||||
if (Constants.LINUX) {
|
if (Constants.LINUX) {
|
||||||
// give specific instructions for the linux case to make it easy
|
// give specific instructions for the linux case to make it easy
|
||||||
String user = System.getProperty("user.name");
|
String user = System.getProperty("user.name");
|
||||||
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
|
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
|
||||||
"\t# allow user '" + user + "' mlockall\n" +
|
"\t# allow user '{}' mlockall\n" +
|
||||||
"\t" + user + " soft memlock unlimited\n" +
|
"\t{} soft memlock unlimited\n" +
|
||||||
"\t" + user + " hard memlock unlimited"
|
"\t{} hard memlock unlimited",
|
||||||
);
|
user, user, user
|
||||||
|
);
|
||||||
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
|
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -155,7 +156,7 @@ class JNANatives {
|
|||||||
// the amount of memory we wish to lock, plus a small overhead (1MB).
|
// the amount of memory we wish to lock, plus a small overhead (1MB).
|
||||||
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
|
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
|
||||||
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
|
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
|
||||||
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError());
|
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError());
|
||||||
} else {
|
} else {
|
||||||
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
|
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
|
||||||
long address = 0;
|
long address = 0;
|
||||||
@ -188,7 +189,7 @@ class JNANatives {
|
|||||||
if (result) {
|
if (result) {
|
||||||
logger.debug("console ctrl handler correctly set");
|
logger.debug("console ctrl handler correctly set");
|
||||||
} else {
|
} else {
|
||||||
logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:");
|
logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError());
|
||||||
}
|
}
|
||||||
} catch (UnsatisfiedLinkError e) {
|
} catch (UnsatisfiedLinkError e) {
|
||||||
// this will have already been logged by Kernel32Library, no need to repeat it
|
// this will have already been logged by Kernel32Library, no need to repeat it
|
||||||
|
@ -200,7 +200,7 @@ final class JVMCheck {
|
|||||||
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
|
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
|
||||||
if (bug != null && bug.check()) {
|
if (bug != null && bug.check()) {
|
||||||
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
|
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
|
||||||
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
|
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException(bug.getErrorMessage());
|
throw new RuntimeException(bug.getErrorMessage());
|
||||||
}
|
}
|
||||||
|
@ -394,7 +394,7 @@ final class Seccomp {
|
|||||||
method = 0;
|
method = 0;
|
||||||
int errno1 = Native.getLastError();
|
int errno1 = Native.getLastError();
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
|
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
|
||||||
}
|
}
|
||||||
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
|
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
|
||||||
int errno2 = Native.getLastError();
|
int errno2 = Native.getLastError();
|
||||||
|
@ -119,7 +119,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||||||
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||||
|
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
|
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||||
@ -318,7 +318,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||||||
transportService.connectToNode(node);
|
transportService.connectToNode(node);
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
it.remove();
|
it.remove();
|
||||||
logger.debug("failed to connect to discovered node [" + node + "]", e);
|
logger.debug("failed to connect to discovered node [{}]", e, node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||||||
if (numberOfUnassignedShards > 0) {
|
if (numberOfUnassignedShards > 0) {
|
||||||
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
|
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace(reason + ", scheduling a reroute");
|
logger.trace("{}, scheduling a reroute", reason);
|
||||||
}
|
}
|
||||||
routingService.reroute(reason);
|
routingService.reroute(reason);
|
||||||
}
|
}
|
||||||
|
@ -436,11 +436,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in [").append(executionTime).append("], state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, previousClusterState.version(), source,
|
||||||
sb.append(previousClusterState.nodes().prettyPrint());
|
previousClusterState.nodes().prettyPrint(), previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
|
||||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
|
||||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
|
||||||
logger.trace(sb.toString(), e);
|
|
||||||
}
|
}
|
||||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||||
@ -523,9 +520,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||||
|
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
logger.trace("cluster state updated, source [{}]\n{}", source, newClusterState.prettyPrint());
|
||||||
sb.append(newClusterState.prettyPrint());
|
|
||||||
logger.trace(sb.toString());
|
|
||||||
} else if (logger.isDebugEnabled()) {
|
} else if (logger.isDebugEnabled()) {
|
||||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||||
}
|
}
|
||||||
@ -612,11 +607,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
|
||||||
sb.append(newClusterState.nodes().prettyPrint());
|
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
|
||||||
sb.append(newClusterState.routingTable().prettyPrint());
|
newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
|
||||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
|
||||||
logger.warn(sb.toString(), t);
|
|
||||||
// TODO: do we want to call updateTask.onFailure here?
|
// TODO: do we want to call updateTask.onFailure here?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
|
|||||||
final String message = "[" + this.name + "] Data too large, data for [" +
|
final String message = "[" + this.name + "] Data too large, data for [" +
|
||||||
fieldName + "] would be larger than limit of [" +
|
fieldName + "] would be larger than limit of [" +
|
||||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||||
logger.debug(message);
|
logger.debug("{}", message);
|
||||||
throw new CircuitBreakingException(message,
|
throw new CircuitBreakingException(message,
|
||||||
bytesNeeded, this.memoryBytesLimit);
|
bytesNeeded, this.memoryBytesLimit);
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
|
|||||||
this.trippedCount.incrementAndGet();
|
this.trippedCount.incrementAndGet();
|
||||||
final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" +
|
final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" +
|
||||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||||
logger.debug(message);
|
logger.debug("{}", message);
|
||||||
throw new CircuitBreakingException(message);
|
throw new CircuitBreakingException(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||||||
for (int i = 0; i < result.length; i++) {
|
for (int i = 0; i < result.length; i++) {
|
||||||
LOGGER.debug("Component [{}]:", i);
|
LOGGER.debug("Component [{}]:", i);
|
||||||
for (int j = 0; j < result[i].length; j++) {
|
for (int j = 0; j < result[i].length; j++) {
|
||||||
LOGGER.debug("\t" + Arrays.toString(result[i][j]));
|
LOGGER.debug("\t{}", Arrays.toString(result[i][j]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -444,7 +444,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||||||
// is an arbitrary point of the hole. The polygon edge next to this point
|
// is an arbitrary point of the hole. The polygon edge next to this point
|
||||||
// is part of the polygon the hole belongs to.
|
// is part of the polygon the hole belongs to.
|
||||||
if (debugEnabled()) {
|
if (debugEnabled()) {
|
||||||
LOGGER.debug("Holes: " + Arrays.toString(holes));
|
LOGGER.debug("Holes: {}", Arrays.toString(holes));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < numHoles; i++) {
|
for (int i = 0; i < numHoles; i++) {
|
||||||
final Edge current = new Edge(holes[i].coordinate, holes[i].next);
|
final Edge current = new Edge(holes[i].coordinate, holes[i].next);
|
||||||
@ -464,9 +464,9 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||||||
final int component = -edges[index].component - numHoles - 1;
|
final int component = -edges[index].component - numHoles - 1;
|
||||||
|
|
||||||
if(debugEnabled()) {
|
if(debugEnabled()) {
|
||||||
LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]);
|
LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]);
|
||||||
LOGGER.debug("\tComponent: " + component);
|
LOGGER.debug("\tComponent: {}", component);
|
||||||
LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges));
|
LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges));
|
||||||
}
|
}
|
||||||
|
|
||||||
components.get(component).add(points[i]);
|
components.get(component).add(points[i]);
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.logging;
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A logger that logs deprecation notices.
|
* A logger that logs deprecation notices.
|
||||||
*/
|
*/
|
||||||
@ -45,6 +47,7 @@ public class DeprecationLogger {
|
|||||||
/**
|
/**
|
||||||
* Logs a deprecated message.
|
* Logs a deprecated message.
|
||||||
*/
|
*/
|
||||||
|
@SuppressLoggerChecks(reason = "safely delegates to logger")
|
||||||
public void deprecated(String msg, Object... params) {
|
public void deprecated(String msg, Object... params) {
|
||||||
logger.debug(msg, params);
|
logger.debug(msg, params);
|
||||||
}
|
}
|
||||||
|
@ -31,14 +31,14 @@ import java.net.SocketException;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
|
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
|
||||||
*/
|
*/
|
||||||
final class IfConfig {
|
final class IfConfig {
|
||||||
|
|
||||||
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
|
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
|
||||||
private static final String INDENT = " ";
|
private static final String INDENT = " ";
|
||||||
|
|
||||||
/** log interface configuration at debug level, if its enabled */
|
/** log interface configuration at debug level, if its enabled */
|
||||||
static void logIfNecessary() {
|
static void logIfNecessary() {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
@ -49,7 +49,7 @@ final class IfConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** perform actual logging: might throw exception if things go wrong */
|
/** perform actual logging: might throw exception if things go wrong */
|
||||||
private static void doLogging() throws IOException {
|
private static void doLogging() throws IOException {
|
||||||
StringBuilder msg = new StringBuilder();
|
StringBuilder msg = new StringBuilder();
|
||||||
@ -59,14 +59,14 @@ final class IfConfig {
|
|||||||
// ordinary name
|
// ordinary name
|
||||||
msg.append(nic.getName());
|
msg.append(nic.getName());
|
||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
|
|
||||||
// display name (e.g. on windows)
|
// display name (e.g. on windows)
|
||||||
if (!nic.getName().equals(nic.getDisplayName())) {
|
if (!nic.getName().equals(nic.getDisplayName())) {
|
||||||
msg.append(INDENT);
|
msg.append(INDENT);
|
||||||
msg.append(nic.getDisplayName());
|
msg.append(nic.getDisplayName());
|
||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
}
|
}
|
||||||
|
|
||||||
// addresses: v4 first, then v6
|
// addresses: v4 first, then v6
|
||||||
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
|
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
|
||||||
for (InterfaceAddress address : addresses) {
|
for (InterfaceAddress address : addresses) {
|
||||||
@ -76,7 +76,7 @@ final class IfConfig {
|
|||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (InterfaceAddress address : addresses) {
|
for (InterfaceAddress address : addresses) {
|
||||||
if (address.getAddress() instanceof Inet6Address) {
|
if (address.getAddress() instanceof Inet6Address) {
|
||||||
msg.append(INDENT);
|
msg.append(INDENT);
|
||||||
@ -84,7 +84,7 @@ final class IfConfig {
|
|||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// hardware address
|
// hardware address
|
||||||
byte hardware[] = nic.getHardwareAddress();
|
byte hardware[] = nic.getHardwareAddress();
|
||||||
if (hardware != null) {
|
if (hardware != null) {
|
||||||
@ -98,19 +98,19 @@ final class IfConfig {
|
|||||||
}
|
}
|
||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
}
|
}
|
||||||
|
|
||||||
// attributes
|
// attributes
|
||||||
msg.append(INDENT);
|
msg.append(INDENT);
|
||||||
msg.append(formatFlags(nic));
|
msg.append(formatFlags(nic));
|
||||||
msg.append(System.lineSeparator());
|
msg.append(System.lineSeparator());
|
||||||
}
|
}
|
||||||
logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString());
|
logger.debug("configuration:{}{}", System.lineSeparator(), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** format internet address: java's default doesn't include everything useful */
|
/** format internet address: java's default doesn't include everything useful */
|
||||||
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
|
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
|
|
||||||
InetAddress address = interfaceAddress.getAddress();
|
InetAddress address = interfaceAddress.getAddress();
|
||||||
if (address instanceof Inet6Address) {
|
if (address instanceof Inet6Address) {
|
||||||
sb.append("inet6 ");
|
sb.append("inet6 ");
|
||||||
@ -122,10 +122,10 @@ final class IfConfig {
|
|||||||
sb.append(NetworkAddress.formatAddress(address));
|
sb.append(NetworkAddress.formatAddress(address));
|
||||||
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
|
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
|
||||||
sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] {
|
sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] {
|
||||||
(byte)(netmask >>> 24),
|
(byte)(netmask >>> 24),
|
||||||
(byte)(netmask >>> 16 & 0xFF),
|
(byte)(netmask >>> 16 & 0xFF),
|
||||||
(byte)(netmask >>> 8 & 0xFF),
|
(byte)(netmask >>> 8 & 0xFF),
|
||||||
(byte)(netmask & 0xFF)
|
(byte)(netmask & 0xFF)
|
||||||
})));
|
})));
|
||||||
InetAddress broadcast = interfaceAddress.getBroadcast();
|
InetAddress broadcast = interfaceAddress.getBroadcast();
|
||||||
if (broadcast != null) {
|
if (broadcast != null) {
|
||||||
@ -141,7 +141,7 @@ final class IfConfig {
|
|||||||
}
|
}
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** format network interface flags */
|
/** format network interface flags */
|
||||||
private static String formatFlags(NetworkInterface nic) throws SocketException {
|
private static String formatFlags(NetworkInterface nic) throws SocketException {
|
||||||
StringBuilder flags = new StringBuilder();
|
StringBuilder flags = new StringBuilder();
|
||||||
|
@ -823,7 +823,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder("full ping responses:");
|
StringBuilder sb = new StringBuilder();
|
||||||
if (fullPingResponses.length == 0) {
|
if (fullPingResponses.length == 0) {
|
||||||
sb.append(" {none}");
|
sb.append(" {none}");
|
||||||
} else {
|
} else {
|
||||||
@ -831,7 +831,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||||||
sb.append("\n\t--> ").append(pingResponse);
|
sb.append("\n\t--> ").append(pingResponse);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.trace(sb.toString());
|
logger.trace("full ping responses:{}", sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
// filter responses
|
// filter responses
|
||||||
@ -848,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])");
|
StringBuilder sb = new StringBuilder();
|
||||||
if (pingResponses.isEmpty()) {
|
if (pingResponses.isEmpty()) {
|
||||||
sb.append(" {none}");
|
sb.append(" {none}");
|
||||||
} else {
|
} else {
|
||||||
@ -856,7 +856,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||||||
sb.append("\n\t--> ").append(pingResponse);
|
sb.append("\n\t--> ").append(pingResponse);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.debug(sb.toString());
|
logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
|
||||||
|
masterElectionFilterDataNodes, sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
final DiscoveryNode localNode = clusterService.localNode();
|
final DiscoveryNode localNode = clusterService.localNode();
|
||||||
@ -918,7 +919,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||||||
// *** called from within an cluster state update task *** //
|
// *** called from within an cluster state update task *** //
|
||||||
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME);
|
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME);
|
||||||
|
|
||||||
logger.warn(reason + ", current nodes: {}", clusterState.nodes());
|
logger.warn("{}, current nodes: {}", reason, clusterState.nodes());
|
||||||
nodesFD.stop();
|
nodesFD.stop();
|
||||||
masterFD.stop(reason);
|
masterFD.stop(reason);
|
||||||
|
|
||||||
|
@ -250,7 +250,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||||||
// We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
|
// We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
// Log one line per path.data:
|
// Log one line per path.data:
|
||||||
StringBuilder sb = new StringBuilder("node data locations details:");
|
StringBuilder sb = new StringBuilder();
|
||||||
for (NodePath nodePath : nodePaths) {
|
for (NodePath nodePath : nodePaths) {
|
||||||
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
|
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||||||
.append(fsPath.getType())
|
.append(fsPath.getType())
|
||||||
.append(']');
|
.append(']');
|
||||||
}
|
}
|
||||||
logger.debug(sb.toString());
|
logger.debug("node data locations details:{}", sb);
|
||||||
} else if (logger.isInfoEnabled()) {
|
} else if (logger.isInfoEnabled()) {
|
||||||
FsInfo.Path totFSPath = new FsInfo.Path();
|
FsInfo.Path totFSPath = new FsInfo.Path();
|
||||||
Set<String> allTypes = new HashSet<>();
|
Set<String> allTypes = new HashSet<>();
|
||||||
@ -306,14 +306,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Just log a 1-line summary:
|
// Just log a 1-line summary:
|
||||||
logger.info(String.format(Locale.ROOT,
|
logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
|
||||||
"using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]",
|
nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes));
|
||||||
nodePaths.length,
|
|
||||||
allMounts,
|
|
||||||
totFSPath.getAvailable(),
|
|
||||||
totFSPath.getTotal(),
|
|
||||||
toString(allSpins),
|
|
||||||
toString(allTypes)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation)) {
|
||||||
for (Path stateFile : stream) {
|
for (Path stateFile : stream) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]");
|
logger.trace("[upgrade]: processing [{}]", stateFile.getFileName());
|
||||||
}
|
}
|
||||||
final String name = stateFile.getFileName().toString();
|
final String name = stateFile.getFileName().toString();
|
||||||
if (name.startsWith("metadata-")) {
|
if (name.startsWith("metadata-")) {
|
||||||
|
@ -161,11 +161,14 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||||||
if (state.nodes().masterNodeId() == null) {
|
if (state.nodes().masterNodeId() == null) {
|
||||||
logger.debug("not recovering from gateway, no master elected yet");
|
logger.debug("not recovering from gateway, no master elected yet");
|
||||||
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
|
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
|
||||||
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
|
logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]",
|
||||||
|
nodes.masterAndDataNodes().size(), recoverAfterNodes);
|
||||||
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
|
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
|
||||||
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
|
logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
|
||||||
|
nodes.dataNodes().size(), recoverAfterDataNodes);
|
||||||
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
|
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
|
||||||
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
|
logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]",
|
||||||
|
nodes.masterNodes().size(), recoverAfterMasterNodes);
|
||||||
} else {
|
} else {
|
||||||
boolean enforceRecoverAfterTime;
|
boolean enforceRecoverAfterTime;
|
||||||
String reason;
|
String reason;
|
||||||
|
@ -262,7 +262,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||||||
|
|
||||||
// validate max content length
|
// validate max content length
|
||||||
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
|
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
|
||||||
logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
|
logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
|
||||||
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
|
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
|
||||||
}
|
}
|
||||||
this.maxContentLength = maxContentLength;
|
this.maxContentLength = maxContentLength;
|
||||||
|
@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
|
|||||||
// because analyzers are aliased, they might be closed several times
|
// because analyzers are aliased, they might be closed several times
|
||||||
// an NPE is thrown in this case, so ignore....
|
// an NPE is thrown in this case, so ignore....
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug("failed to close analyzer " + analyzer);
|
logger.debug("failed to close analyzer {}", analyzer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||||||
merge.rateLimiter.getMBPerSec());
|
merge.rateLimiter.getMBPerSec());
|
||||||
|
|
||||||
if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
|
if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
|
||||||
logger.debug(message);
|
logger.debug("{}", message);
|
||||||
} else if (logger.isTraceEnabled()) {
|
} else if (logger.isTraceEnabled()) {
|
||||||
logger.trace(message);
|
logger.trace("{}", message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -230,13 +230,13 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||||||
IndexFieldData.Builder builder = null;
|
IndexFieldData.Builder builder = null;
|
||||||
String format = type.getFormat(indexSettings.getSettings());
|
String format = type.getFormat(indexSettings.getSettings());
|
||||||
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
|
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
|
||||||
logger.warn("field [" + fieldName + "] has no doc values, will use default field data format");
|
logger.warn("field [{}] has no doc values, will use default field data format", fieldName);
|
||||||
format = null;
|
format = null;
|
||||||
}
|
}
|
||||||
if (format != null) {
|
if (format != null) {
|
||||||
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
|
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
|
||||||
if (builder == null) {
|
if (builder == null) {
|
||||||
logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default");
|
logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (builder == null && docValues) {
|
if (builder == null && docValues) {
|
||||||
|
@ -256,7 +256,7 @@ public class TypeParsers {
|
|||||||
(indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
|
(indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
|
||||||
throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
|
throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
|
||||||
} else {
|
} else {
|
||||||
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.");
|
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
parseCopyFields(propNode, builder);
|
parseCopyFields(propNode, builder);
|
||||||
|
@ -49,7 +49,7 @@ import java.util.Map;
|
|||||||
* be stored as payloads to numeric doc values.
|
* be stored as payloads to numeric doc values.
|
||||||
*/
|
*/
|
||||||
public final class ElasticsearchMergePolicy extends MergePolicy {
|
public final class ElasticsearchMergePolicy extends MergePolicy {
|
||||||
|
|
||||||
private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class);
|
private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class);
|
||||||
|
|
||||||
private final MergePolicy delegate;
|
private final MergePolicy delegate;
|
||||||
@ -69,7 +69,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||||||
|
|
||||||
/** Return an "upgraded" view of the reader. */
|
/** Return an "upgraded" view of the reader. */
|
||||||
static CodecReader filter(CodecReader reader) throws IOException {
|
static CodecReader filter(CodecReader reader) throws IOException {
|
||||||
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
|
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
|
||||||
// the previous code never did this, so some indexes carry around trash.
|
// the previous code never did this, so some indexes carry around trash.
|
||||||
return reader;
|
return reader;
|
||||||
}
|
}
|
||||||
@ -155,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||||||
|
|
||||||
// TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs,
|
// TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs,
|
||||||
// for now we just assume every minor upgrade has a new format.
|
// for now we just assume every minor upgrade has a new format.
|
||||||
logger.debug("Adding segment " + info.info.name + " to be upgraded");
|
logger.debug("Adding segment {} to be upgraded", info.info.name);
|
||||||
spec.add(new OneMerge(Collections.singletonList(info)));
|
spec.add(new OneMerge(Collections.singletonList(info)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||||||
|
|
||||||
if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) {
|
if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) {
|
||||||
// hit our max upgrades, so return the spec. we will get a cascaded call to continue.
|
// hit our max upgrades, so return the spec. we will get a cascaded call to continue.
|
||||||
logger.debug("Returning " + spec.merges.size() + " merges for upgrade");
|
logger.debug("Returning {} merges for upgrade", spec.merges.size());
|
||||||
return spec;
|
return spec;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We must have less than our max upgrade merges, so the next return will be our last in upgrading mode.
|
// We must have less than our max upgrade merges, so the next return will be our last in upgrading mode.
|
||||||
if (spec.merges.isEmpty() == false) {
|
if (spec.merges.isEmpty() == false) {
|
||||||
logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade");
|
logger.debug("Returning {} merges for end of upgrade", spec.merges.size());
|
||||||
return spec;
|
return spec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,9 +128,8 @@ final class StoreRecovery {
|
|||||||
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
|
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
|
||||||
|
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n");
|
|
||||||
RecoveryState.Index index = recoveryState.getIndex();
|
RecoveryState.Index index = recoveryState.getIndex();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [")
|
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [")
|
||||||
.append(new ByteSizeValue(index.totalBytes())).append("], took[")
|
.append(new ByteSizeValue(index.totalBytes())).append("], took[")
|
||||||
.append(TimeValue.timeValueMillis(index.time())).append("]\n");
|
.append(TimeValue.timeValueMillis(index.time())).append("]\n");
|
||||||
@ -142,7 +141,7 @@ final class StoreRecovery {
|
|||||||
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
|
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
|
||||||
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
|
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
|
||||||
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
|
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
|
||||||
logger.trace(sb.toString());
|
logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
|
||||||
} else if (logger.isDebugEnabled()) {
|
} else if (logger.isDebugEnabled()) {
|
||||||
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
|
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||||||
if (isClosed.compareAndSet(false, true)) {
|
if (isClosed.compareAndSet(false, true)) {
|
||||||
// only do this once!
|
// only do this once!
|
||||||
decRef();
|
decRef();
|
||||||
logger.debug("store reference count on close: " + refCounter.refCount());
|
logger.debug("store reference count on close: {}", refCounter.refCount());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||||||
try {
|
try {
|
||||||
removeIndex(index, "shutdown", false);
|
removeIndex(index, "shutdown", false);
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
logger.warn("failed to remove index on stop " + index + "", e);
|
logger.warn("failed to remove index on stop [{}]", e, index);
|
||||||
} finally {
|
} finally {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
|
|||||||
"operations")
|
"operations")
|
||||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
|
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
|
||||||
.append("\n");
|
.append("\n");
|
||||||
logger.trace(sb.toString());
|
logger.trace("{}", sb);
|
||||||
} else {
|
} else {
|
||||||
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime);
|
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime);
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
|||||||
logger.error("bulk deletion failures for [{}]/[{}] items", failedItems, bulkResponse.getItems().length);
|
logger.error("bulk deletion failures for [{}]/[{}] items", failedItems, bulkResponse.getItems().length);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.trace("bulk deletion took " + bulkResponse.getTookInMillis() + "ms");
|
logger.trace("bulk deletion took {}ms", bulkResponse.getTookInMillis());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ public class RestController extends AbstractLifecycleComponent<RestController> {
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.error("failed to send failure response for uri [" + request.uri() + "]", e1);
|
logger.error("failed to send failure response for uri [{}]", e1, request.uri());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -275,7 +275,7 @@ public class RestController extends AbstractLifecycleComponent<RestController> {
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1);
|
logger.error("Failed to send failure response for uri [{}]", e1, request.uri());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -343,9 +343,9 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
|
|||||||
indexShardRepository.snapshot(snapshotId, shardId, snapshotIndexCommit, snapshotStatus);
|
indexShardRepository.snapshot(snapshotId, shardId, snapshotIndexCommit, snapshotStatus);
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("snapshot (").append(snapshotId.getSnapshot()).append(") completed to ").append(indexShardRepository).append(", took [").append(TimeValue.timeValueMillis(snapshotStatus.time())).append("]\n");
|
|
||||||
sb.append(" index : version [").append(snapshotStatus.indexVersion()).append("], number_of_files [").append(snapshotStatus.numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n");
|
sb.append(" index : version [").append(snapshotStatus.indexVersion()).append("], number_of_files [").append(snapshotStatus.numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n");
|
||||||
logger.debug(sb.toString());
|
logger.debug("snapshot ({}) completed to {}, took [{}]\n{}", snapshotId.getSnapshot(), indexShardRepository,
|
||||||
|
TimeValue.timeValueMillis(snapshotStatus.time()), sb);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
indexShard.releaseSnapshot(snapshotIndexCommit);
|
indexShard.releaseSnapshot(snapshotIndexCommit);
|
||||||
|
@ -380,7 +380,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("failed to notify channel of error message for action [" + action + "]", e1);
|
logger.warn("failed to notify channel of error message for action [{}]", e1, action);
|
||||||
logger.warn("actual exception", e);
|
logger.warn("actual exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -391,7 +391,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
|||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("failed to notify channel of error message for action [" + action + "]", e1);
|
logger.warn("failed to notify channel of error message for action [{}]", e1, action);
|
||||||
logger.warn("actual exception", e1);
|
logger.warn("actual exception", e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
|||||||
handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e));
|
handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.warn("Failed to receive message for action [" + action + "]", e);
|
logger.warn("Failed to receive message for action [{}]", e, action);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -314,7 +314,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e1);
|
logger.warn("Failed to send error message back to client for action [{}]", e1, action);
|
||||||
logger.warn("Actual Exception", e);
|
logger.warn("Actual Exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,7 +325,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||||
logger.warn("Actual Exception", e1);
|
logger.warn("Actual Exception", e1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||||
logger.warn("Actual Exception", e1);
|
logger.warn("Actual Exception", e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -336,7 +336,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1);
|
logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction());
|
||||||
logger.warn("Actual Exception", e);
|
logger.warn("Actual Exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,12 +19,14 @@
|
|||||||
|
|
||||||
package org.elasticsearch.transport.netty;
|
package org.elasticsearch.transport.netty;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.jboss.netty.logging.AbstractInternalLogger;
|
import org.jboss.netty.logging.AbstractInternalLogger;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
@SuppressLoggerChecks(reason = "safely delegates to logger")
|
||||||
public class NettyInternalESLogger extends AbstractInternalLogger {
|
public class NettyInternalESLogger extends AbstractInternalLogger {
|
||||||
|
|
||||||
private final ESLogger logger;
|
private final ESLogger logger;
|
||||||
|
@ -231,7 +231,7 @@ public class VersionTests extends ESTestCase {
|
|||||||
assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers()));
|
assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers()));
|
||||||
|
|
||||||
Version v = (Version) versionConstant.get(Version.class);
|
Version v = (Version) versionConstant.get(Version.class);
|
||||||
logger.info("Checking " + v);
|
logger.info("Checking {}", v);
|
||||||
assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
|
assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
|
||||||
assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
|
assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
|
||||||
if (v.major >= 2) {
|
if (v.major >= 2) {
|
||||||
|
@ -294,14 +294,14 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||||||
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||||
@Override
|
@Override
|
||||||
protected NodeResponse nodeOperation(NodeRequest request) {
|
protected NodeResponse nodeOperation(NodeRequest request) {
|
||||||
logger.info("Action on node " + node);
|
logger.info("Action on node {}", node);
|
||||||
actionLatch.countDown();
|
actionLatch.countDown();
|
||||||
try {
|
try {
|
||||||
checkLatch.await();
|
checkLatch.await();
|
||||||
} catch (InterruptedException ex) {
|
} catch (InterruptedException ex) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
logger.info("Action on node " + node + " finished");
|
logger.info("Action on node {} finished", node);
|
||||||
return new NodeResponse(testNodes[node].discoveryNode);
|
return new NodeResponse(testNodes[node].discoveryNode);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -565,7 +565,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||||||
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||||
@Override
|
@Override
|
||||||
protected NodeResponse nodeOperation(NodeRequest request) {
|
protected NodeResponse nodeOperation(NodeRequest request) {
|
||||||
logger.info("Action on node " + node);
|
logger.info("Action on node {}", node);
|
||||||
throw new RuntimeException("Test exception");
|
throw new RuntimeException("Test exception");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -604,9 +604,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||||||
tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
|
||||||
@Override
|
@Override
|
||||||
protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) {
|
protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) {
|
||||||
logger.info("Task action on node " + node);
|
logger.info("Task action on node {}", node);
|
||||||
if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) {
|
if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) {
|
||||||
logger.info("Failing on node " + node);
|
logger.info("Failing on node {}", node);
|
||||||
throw new RuntimeException("Task level failure");
|
throw new RuntimeException("Task level failure");
|
||||||
}
|
}
|
||||||
return new TestTaskResponse("Success on node " + node);
|
return new TestTaskResponse("Success on node " + node);
|
||||||
|
@ -134,7 +134,7 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
|||||||
// means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
|
// means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
|
||||||
assertFalse(hasAncientSegments(client(), indexToUpgrade));
|
assertFalse(hasAncientSegments(client(), indexToUpgrade));
|
||||||
|
|
||||||
logger.info("--> Running upgrade on index " + indexToUpgrade);
|
logger.info("--> Running upgrade on index {}", indexToUpgrade);
|
||||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
|
assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
|
||||||
awaitBusy(() -> {
|
awaitBusy(() -> {
|
||||||
try {
|
try {
|
||||||
@ -228,7 +228,7 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
|||||||
ESLogger logger = Loggers.getLogger(UpgradeIT.class);
|
ESLogger logger = Loggers.getLogger(UpgradeIT.class);
|
||||||
int toUpgrade = 0;
|
int toUpgrade = 0;
|
||||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||||
logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes());
|
logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes());
|
||||||
toUpgrade += status.getToUpgradeBytes();
|
toUpgrade += status.getToUpgradeBytes();
|
||||||
}
|
}
|
||||||
return toUpgrade == 0;
|
return toUpgrade == 0;
|
||||||
|
@ -162,7 +162,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||||
assertFalse(Files.exists(singleDataPath));
|
assertFalse(Files.exists(singleDataPath));
|
||||||
Files.createDirectories(singleDataPath);
|
Files.createDirectories(singleDataPath);
|
||||||
logger.info("--> Single data path: " + singleDataPath.toString());
|
logger.info("--> Single data path: {}", singleDataPath);
|
||||||
|
|
||||||
// find multi data path dirs
|
// find multi data path dirs
|
||||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths();
|
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths();
|
||||||
@ -173,7 +173,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
assertFalse(Files.exists(multiDataPath[1]));
|
assertFalse(Files.exists(multiDataPath[1]));
|
||||||
Files.createDirectories(multiDataPath[0]);
|
Files.createDirectories(multiDataPath[0]);
|
||||||
Files.createDirectories(multiDataPath[1]);
|
Files.createDirectories(multiDataPath[1]);
|
||||||
logger.info("--> Multi data paths: " + multiDataPath[0].toString() + ", " + multiDataPath[1].toString());
|
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
||||||
|
|
||||||
replicas.get(); // wait for replicas
|
replicas.get(); // wait for replicas
|
||||||
}
|
}
|
||||||
@ -239,13 +239,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||||
if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) {
|
if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) {
|
||||||
// skip lock file, we don't need it
|
// skip lock file, we don't need it
|
||||||
logger.trace("Skipping lock file: " + file.toString());
|
logger.trace("Skipping lock file: {}", file);
|
||||||
return FileVisitResult.CONTINUE;
|
return FileVisitResult.CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
Path relativeFile = src.relativize(file);
|
Path relativeFile = src.relativize(file);
|
||||||
Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile);
|
Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile);
|
||||||
logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString());
|
logger.trace("--> Moving {} to {}", relativeFile, destFile);
|
||||||
Files.move(file, destFile);
|
Files.move(file, destFile);
|
||||||
assertFalse(Files.exists(file));
|
assertFalse(Files.exists(file));
|
||||||
assertTrue(Files.exists(destFile));
|
assertTrue(Files.exists(destFile));
|
||||||
@ -269,7 +269,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
for (String index : indexes) {
|
for (String index : indexes) {
|
||||||
if (expectedVersions.remove(index) == false) {
|
if (expectedVersions.remove(index) == false) {
|
||||||
logger.warn("Old indexes tests contain extra index: " + index);
|
logger.warn("Old indexes tests contain extra index: {}", index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (expectedVersions.isEmpty() == false) {
|
if (expectedVersions.isEmpty() == false) {
|
||||||
@ -287,9 +287,9 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
Collections.shuffle(indexes, random());
|
Collections.shuffle(indexes, random());
|
||||||
for (String index : indexes) {
|
for (String index : indexes) {
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
logger.info("--> Testing old index " + index);
|
logger.info("--> Testing old index {}", index);
|
||||||
assertOldIndexWorks(index);
|
assertOldIndexWorks(index);
|
||||||
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
|
logger.info("--> Done testing {}, took {} seconds", index, (System.currentTimeMillis() - startTime) / 1000.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +344,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
SearchResponse searchRsp = searchReq.get();
|
SearchResponse searchRsp = searchReq.get();
|
||||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||||
long numDocs = searchRsp.getHits().getTotalHits();
|
long numDocs = searchRsp.getHits().getTotalHits();
|
||||||
logger.info("Found " + numDocs + " in old index");
|
logger.info("Found {} in old index", numDocs);
|
||||||
|
|
||||||
logger.info("--> testing basic search with sort");
|
logger.info("--> testing basic search with sort");
|
||||||
searchReq.addSort("long_sort", SortOrder.ASC);
|
searchReq.addSort("long_sort", SortOrder.ASC);
|
||||||
@ -523,7 +523,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||||||
for (String indexFile : indexes) {
|
for (String indexFile : indexes) {
|
||||||
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
||||||
Path nodeDir = getNodeDir(indexFile);
|
Path nodeDir = getNodeDir(indexFile);
|
||||||
logger.info("Parsing cluster state files from index [" + indexName + "]");
|
logger.info("Parsing cluster state files from index [{}]", indexName);
|
||||||
assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception
|
assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception
|
||||||
Path indexDir = nodeDir.resolve("indices").resolve(indexName);
|
Path indexDir = nodeDir.resolve("indices").resolve(indexName);
|
||||||
assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception
|
assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception
|
||||||
|
@ -28,7 +28,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati
|
|||||||
public void testUpgradeStartClusterOn_0_20_6() throws Exception {
|
public void testUpgradeStartClusterOn_0_20_6() throws Exception {
|
||||||
String indexName = "unsupported-0.20.6";
|
String indexName = "unsupported-0.20.6";
|
||||||
|
|
||||||
logger.info("Checking static index " + indexName);
|
logger.info("Checking static index {}", indexName);
|
||||||
Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true);
|
Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||||
try {
|
try {
|
||||||
internalCluster().startNode(nodeSettings);
|
internalCluster().startNode(nodeSettings);
|
||||||
|
@ -108,7 +108,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
|
|||||||
|
|
||||||
for (String repoVersion : repoVersions) {
|
for (String repoVersion : repoVersions) {
|
||||||
if (expectedVersions.remove(repoVersion) == false) {
|
if (expectedVersions.remove(repoVersion) == false) {
|
||||||
logger.warn("Old repositories tests contain extra repo: " + repoVersion);
|
logger.warn("Old repositories tests contain extra repo: {}", repoVersion);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (expectedVersions.isEmpty() == false) {
|
if (expectedVersions.isEmpty() == false) {
|
||||||
|
@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
|||||||
public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase {
|
public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void loadIndex(String index, Object... settings) throws Exception {
|
public void loadIndex(String index, Object... settings) throws Exception {
|
||||||
logger.info("Checking static index " + index);
|
logger.info("Checking static index {}", index);
|
||||||
Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings);
|
Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings);
|
||||||
internalCluster().startNode(nodeSettings);
|
internalCluster().startNode(nodeSettings);
|
||||||
ensureGreen(index);
|
ensureGreen(index);
|
||||||
|
@ -608,13 +608,13 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onMaster() {
|
public void onMaster() {
|
||||||
logger.info("on master [" + clusterService.localNode() + "]");
|
logger.info("on master [{}]", clusterService.localNode());
|
||||||
master = true;
|
master = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void offMaster() {
|
public void offMaster() {
|
||||||
logger.info("off master [" + clusterService.localNode() + "]");
|
logger.info("off master [{}]", clusterService.localNode());
|
||||||
master = false;
|
master = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
|
|||||||
ensureSearchable("test1", "test2");
|
ensureSearchable("test1", "test2");
|
||||||
|
|
||||||
ClusterStateResponse clusterState = client().admin().cluster().prepareState().get();
|
ClusterStateResponse clusterState = client().admin().cluster().prepareState().get();
|
||||||
logger.info("Cluster state:\n" + clusterState.getState().prettyPrint());
|
logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint());
|
||||||
|
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
assertTrue(awaitBusy(() -> {
|
assertTrue(awaitBusy(() -> {
|
||||||
|
@ -163,7 +163,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||||||
for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses) {
|
for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses) {
|
||||||
int shardId = shardStoreStatuses.key;
|
int shardId = shardStoreStatuses.key;
|
||||||
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value);
|
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value);
|
||||||
logger.info("--> adding allocation command for shard " + shardId);
|
logger.info("--> adding allocation command for shard {}", shardId);
|
||||||
// force allocation based on node id
|
// force allocation based on node id
|
||||||
if (useStaleReplica) {
|
if (useStaleReplica) {
|
||||||
rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand("test", shardId, storeStatus.getNode().getId(), true));
|
rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand("test", shardId, storeStatus.getNode().getId(), true));
|
||||||
|
@ -63,7 +63,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
|||||||
this.numberOfReplicas = randomIntBetween(1, 5);
|
this.numberOfReplicas = randomIntBetween(1, 5);
|
||||||
this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1);
|
this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1);
|
||||||
this.totalNumberOfShards = this.shardsPerIndex * 2;
|
this.totalNumberOfShards = this.shardsPerIndex * 2;
|
||||||
logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
|
logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas);
|
||||||
this.emptyRoutingTable = new RoutingTable.Builder().build();
|
this.emptyRoutingTable = new RoutingTable.Builder().build();
|
||||||
MetaData metaData = MetaData.builder()
|
MetaData metaData = MetaData.builder()
|
||||||
.put(createIndexMetaData(TEST_INDEX_1))
|
.put(createIndexMetaData(TEST_INDEX_1))
|
||||||
@ -81,7 +81,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
|||||||
* puts primary shard routings into initializing state
|
* puts primary shard routings into initializing state
|
||||||
*/
|
*/
|
||||||
private void initPrimaries() {
|
private void initPrimaries() {
|
||||||
logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
|
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||||
Builder discoBuilder = DiscoveryNodes.builder();
|
Builder discoBuilder = DiscoveryNodes.builder();
|
||||||
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
|
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
|
||||||
discoBuilder = discoBuilder.put(newNode("node" + i));
|
discoBuilder = discoBuilder.put(newNode("node" + i));
|
||||||
@ -95,7 +95,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
|||||||
|
|
||||||
private void startInitializingShards(String index) {
|
private void startInitializingShards(String index) {
|
||||||
this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
|
this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
|
||||||
logger.info("start primary shards for index " + index);
|
logger.info("start primary shards for index {}", index);
|
||||||
RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING));
|
RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING));
|
||||||
this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
|
this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
|
||||||
this.testRoutingTable = rerouteResult.routingTable();
|
this.testRoutingTable = rerouteResult.routingTable();
|
||||||
|
@ -301,7 +301,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
|
|||||||
|
|
||||||
RoutingTable routingTable = routingTableBuilder.build();
|
RoutingTable routingTable = routingTableBuilder.build();
|
||||||
|
|
||||||
logger.info("start " + numberOfNodes + " nodes");
|
logger.info("start {} nodes", numberOfNodes);
|
||||||
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
|
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
|
||||||
for (int i = 0; i < numberOfNodes; i++) {
|
for (int i = 0; i < numberOfNodes; i++) {
|
||||||
nodes.put(newNode("node" + i));
|
nodes.put(newNode("node" + i));
|
||||||
|
@ -221,18 +221,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
|
|||||||
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
|
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
|
||||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
|
|
||||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) {
|
logger.info("Initializing shards: {}", clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
|
||||||
logger.info(shard.toString());
|
logger.info("Started shards: {}", clusterState.getRoutingNodes().shardsWithState(STARTED));
|
||||||
}
|
logger.info("Relocating shards: {}", clusterState.getRoutingNodes().shardsWithState(RELOCATING));
|
||||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(STARTED)) {
|
logger.info("Unassigned shards: {}", clusterState.getRoutingNodes().shardsWithState(UNASSIGNED));
|
||||||
logger.info(shard.toString());
|
|
||||||
}
|
|
||||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(RELOCATING)) {
|
|
||||||
logger.info(shard.toString());
|
|
||||||
}
|
|
||||||
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
|
|
||||||
logger.info(shard.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
|
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
|
||||||
|
|
||||||
|
@ -147,12 +147,12 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
|
|||||||
if (initializing.isEmpty()) {
|
if (initializing.isEmpty()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
logger.debug(initializing.toString());
|
logger.debug("Initializing shards: {}", initializing);
|
||||||
numRelocations += initializing.size();
|
numRelocations += initializing.size();
|
||||||
routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable();
|
routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable();
|
||||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
}
|
}
|
||||||
logger.debug("--> num relocations to get balance: " + numRelocations);
|
logger.debug("--> num relocations to get balance: {}", numRelocations);
|
||||||
return clusterState;
|
return clusterState;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,14 +409,16 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
|||||||
String fromId = r.currentNodeId();
|
String fromId = r.currentNodeId();
|
||||||
assertThat(fromId, notNullValue());
|
assertThat(fromId, notNullValue());
|
||||||
assertThat(toId, notNullValue());
|
assertThat(toId, notNullValue());
|
||||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||||
|
toId, routingNodes.node(toId).node().version());
|
||||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||||
} else {
|
} else {
|
||||||
ShardRouting primary = routingNodes.activePrimary(r);
|
ShardRouting primary = routingNodes.activePrimary(r);
|
||||||
assertThat(primary, notNullValue());
|
assertThat(primary, notNullValue());
|
||||||
String fromId = primary.currentNodeId();
|
String fromId = primary.currentNodeId();
|
||||||
String toId = r.relocatingNodeId();
|
String toId = r.relocatingNodeId();
|
||||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||||
|
toId, routingNodes.node(toId).node().version());
|
||||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -428,7 +430,8 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
|||||||
assertThat(primary, notNullValue());
|
assertThat(primary, notNullValue());
|
||||||
String fromId = primary.currentNodeId();
|
String fromId = primary.currentNodeId();
|
||||||
String toId = r.currentNodeId();
|
String toId = r.currentNodeId();
|
||||||
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
|
logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(),
|
||||||
|
toId, routingNodes.node(toId).node().version());
|
||||||
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
|
|||||||
assertThat(shardRouting.getIndexName(), equalTo("test1"));
|
assertThat(shardRouting.getIndexName(), equalTo("test1"));
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() + " for test, see that things move");
|
logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey());
|
||||||
metaData = MetaData.builder(metaData)
|
metaData = MetaData.builder(metaData)
|
||||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||||
|
@ -55,7 +55,7 @@ public class CacheTests extends ESTestCase {
|
|||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
numberOfEntries = randomIntBetween(1000, 10000);
|
numberOfEntries = randomIntBetween(1000, 10000);
|
||||||
logger.debug("numberOfEntries: " + numberOfEntries);
|
logger.debug("numberOfEntries: {}", numberOfEntries);
|
||||||
}
|
}
|
||||||
|
|
||||||
// cache some entries, then randomly lookup keys that do not exist, then check the stats
|
// cache some entries, then randomly lookup keys that do not exist, then check the stats
|
||||||
|
@ -40,7 +40,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
|||||||
prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
|
prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
|
||||||
|
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
|||||||
allowNodes("test", 2);
|
allowNodes("test", 2);
|
||||||
|
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
allowNodes("test", 3);
|
allowNodes("test", 3);
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
// Figure out what is the elected master node
|
// Figure out what is the elected master node
|
||||||
final String masterNode = internalCluster().getMasterName();
|
final String masterNode = internalCluster().getMasterName();
|
||||||
logger.info("---> legit elected master node=" + masterNode);
|
logger.info("---> legit elected master node={}", masterNode);
|
||||||
|
|
||||||
// Pick a node that isn't the elected master.
|
// Pick a node that isn't the elected master.
|
||||||
Set<String> nonMasters = new HashSet<>(nodes);
|
Set<String> nonMasters = new HashSet<>(nodes);
|
||||||
@ -496,7 +496,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int docsPerIndexer = randomInt(3);
|
int docsPerIndexer = randomInt(3);
|
||||||
logger.info("indexing " + docsPerIndexer + " docs per indexer before partition");
|
logger.info("indexing {} docs per indexer before partition", docsPerIndexer);
|
||||||
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
||||||
for (Semaphore semaphore : semaphores) {
|
for (Semaphore semaphore : semaphores) {
|
||||||
semaphore.release(docsPerIndexer);
|
semaphore.release(docsPerIndexer);
|
||||||
@ -508,7 +508,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
disruptionScheme.startDisrupting();
|
disruptionScheme.startDisrupting();
|
||||||
|
|
||||||
docsPerIndexer = 1 + randomInt(5);
|
docsPerIndexer = 1 + randomInt(5);
|
||||||
logger.info("indexing " + docsPerIndexer + " docs per indexer during partition");
|
logger.info("indexing {} docs per indexer during partition", docsPerIndexer);
|
||||||
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
||||||
Collections.shuffle(semaphores, random());
|
Collections.shuffle(semaphores, random());
|
||||||
for (Semaphore semaphore : semaphores) {
|
for (Semaphore semaphore : semaphores) {
|
||||||
@ -539,11 +539,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (exceptedExceptions.size() > 0) {
|
if (exceptedExceptions.size() > 0) {
|
||||||
StringBuilder sb = new StringBuilder("Indexing exceptions during disruption:");
|
StringBuilder sb = new StringBuilder();
|
||||||
for (Exception e : exceptedExceptions) {
|
for (Exception e : exceptedExceptions) {
|
||||||
sb.append("\n").append(e.getMessage());
|
sb.append("\n").append(e.getMessage());
|
||||||
}
|
}
|
||||||
logger.debug(sb.toString());
|
logger.debug("Indexing exceptions during disruption: {}", sb);
|
||||||
}
|
}
|
||||||
logger.info("shutting down indexers");
|
logger.info("shutting down indexers");
|
||||||
stop.set(true);
|
stop.set(true);
|
||||||
@ -731,7 +731,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
|
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
|
||||||
assertThat(indexResponse.getVersion(), equalTo(1L));
|
assertThat(indexResponse.getVersion(), equalTo(1L));
|
||||||
|
|
||||||
logger.info("Verifying if document exists via node[" + notIsolatedNode + "]");
|
logger.info("Verifying if document exists via node[{}]", notIsolatedNode);
|
||||||
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
|
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
|
||||||
.setPreference("_local")
|
.setPreference("_local")
|
||||||
.get();
|
.get();
|
||||||
@ -745,7 +745,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
for (String node : nodes) {
|
for (String node : nodes) {
|
||||||
logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]");
|
logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node);
|
||||||
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
|
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
|
||||||
.setPreference("_local")
|
.setPreference("_local")
|
||||||
.get();
|
.get();
|
||||||
@ -764,7 +764,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||||||
List<String> nodes = startCluster(4, -1, new int[]{0});
|
List<String> nodes = startCluster(4, -1, new int[]{0});
|
||||||
// Figure out what is the elected master node
|
// Figure out what is the elected master node
|
||||||
final String masterNode = internalCluster().getMasterName();
|
final String masterNode = internalCluster().getMasterName();
|
||||||
logger.info("---> legit elected master node=" + masterNode);
|
logger.info("---> legit elected master node={}", masterNode);
|
||||||
List<String> otherNodes = new ArrayList<>(nodes);
|
List<String> otherNodes = new ArrayList<>(nodes);
|
||||||
otherNodes.remove(masterNode);
|
otherNodes.remove(masterNode);
|
||||||
otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes.
|
otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes.
|
||||||
|
@ -225,7 +225,7 @@ public class MetaDataStateFormatTests extends ESTestCase {
|
|||||||
msg.append(" after: [").append(checksumAfterCorruption).append("]");
|
msg.append(" after: [").append(checksumAfterCorruption).append("]");
|
||||||
msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
|
msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
|
||||||
msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||||
logger.debug(msg.toString());
|
logger.debug("{}", msg.toString());
|
||||||
assumeTrue("Checksum collision - " + msg.toString(),
|
assumeTrue("Checksum collision - " + msg.toString(),
|
||||||
checksumAfterCorruption != checksumBeforeCorruption // collision
|
checksumAfterCorruption != checksumBeforeCorruption // collision
|
||||||
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
||||||
|
@ -82,7 +82,7 @@ public class QuorumGatewayIT extends ESIntegTestCase {
|
|||||||
assertTrue(awaitBusy(() -> {
|
assertTrue(awaitBusy(() -> {
|
||||||
logger.info("--> running cluster_health (wait for the shards to startup)");
|
logger.info("--> running cluster_health (wait for the shards to startup)");
|
||||||
ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet();
|
ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet();
|
||||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||||
return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
|
return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
|
||||||
}, 30, TimeUnit.SECONDS));
|
}, 30, TimeUnit.SECONDS));
|
||||||
logger.info("--> one node is closed -- index 1 document into the remaining nodes");
|
logger.info("--> one node is closed -- index 1 document into the remaining nodes");
|
||||||
|
@ -145,7 +145,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
|||||||
}
|
}
|
||||||
writer.addDocument(d);
|
writer.addDocument(d);
|
||||||
}
|
}
|
||||||
logger.debug(hundred + " " + ten + " " + five);
|
logger.debug("{} {} {}", hundred, ten, five);
|
||||||
writer.forceMerge(1, true);
|
writer.forceMerge(1, true);
|
||||||
LeafReaderContext context = refreshReader();
|
LeafReaderContext context = refreshReader();
|
||||||
String[] formats = new String[] { "paged_bytes"};
|
String[] formats = new String[] { "paged_bytes"};
|
||||||
|
@ -272,7 +272,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||||||
|
|
||||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||||
String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
|
String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
|
||||||
logger.info(mapping);
|
logger.info("Mapping: {}", mapping);
|
||||||
DocumentMapper docMapper = parser.parse("test", new CompressedXContent(mapping));
|
DocumentMapper docMapper = parser.parse("test", new CompressedXContent(mapping));
|
||||||
String builtMapping = docMapper.mappingSource().string();
|
String builtMapping = docMapper.mappingSource().string();
|
||||||
// reparse it
|
// reparse it
|
||||||
|
@ -112,7 +112,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
|
|||||||
assertThat(mappingMetaData, not(nullValue()));
|
assertThat(mappingMetaData, not(nullValue()));
|
||||||
Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
|
Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
|
||||||
Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
|
Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
|
||||||
logger.info("Keys: " + aField.keySet());
|
logger.info("Keys: {}", aField.keySet());
|
||||||
assertThat(aField.size(), equalTo(2));
|
assertThat(aField.size(), equalTo(2));
|
||||||
assertThat(aField.get("type").toString(), equalTo("geo_point"));
|
assertThat(aField.get("type").toString(), equalTo("geo_point"));
|
||||||
assertThat(aField.get("fields"), notNullValue());
|
assertThat(aField.get("fields"), notNullValue());
|
||||||
|
@ -77,7 +77,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
|||||||
// first wait for 2 nodes in the cluster
|
// first wait for 2 nodes in the cluster
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
final String node2 = getLocalNodeId(server_2);
|
final String node2 = getLocalNodeId(server_2);
|
||||||
@ -171,7 +171,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
|||||||
// verify health
|
// verify health
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
|
|
||||||
|
@ -282,7 +282,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||||||
assertEquals("foo", value1.toUtf8());
|
assertEquals("foo", value1.toUtf8());
|
||||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
||||||
assertEquals("bar", value2.toUtf8());
|
assertEquals("bar", value2.toUtf8());
|
||||||
logger.info(requestCacheStats.stats().getMemorySize().toString());
|
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
||||||
assertEquals("baz", value3.toUtf8());
|
assertEquals("baz", value3.toUtf8());
|
||||||
assertEquals(2, cache.count());
|
assertEquals(2, cache.count());
|
||||||
@ -319,7 +319,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||||||
assertEquals("foo", value1.toUtf8());
|
assertEquals("foo", value1.toUtf8());
|
||||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
|
||||||
assertEquals("bar", value2.toUtf8());
|
assertEquals("bar", value2.toUtf8());
|
||||||
logger.info(requestCacheStats.stats().getMemorySize().toString());
|
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
|
||||||
assertEquals("baz", value3.toUtf8());
|
assertEquals("baz", value3.toUtf8());
|
||||||
assertEquals(3, cache.count());
|
assertEquals(3, cache.count());
|
||||||
|
@ -48,7 +48,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
assertAcked(prepareCreate("test", 2));
|
assertAcked(prepareCreate("test", 2));
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
|
|
||||||
NumShards numShards = getNumShards("test");
|
NumShards numShards = getNumShards("test");
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet());
|
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet());
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -88,7 +88,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -106,7 +106,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -128,7 +128,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -140,7 +140,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -153,7 +153,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -166,7 +166,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -183,7 +183,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -195,7 +195,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -208,7 +208,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -221,7 +221,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -237,7 +237,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
@ -253,7 +253,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("--> running cluster health");
|
logger.info("--> running cluster health");
|
||||||
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet();
|
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet();
|
||||||
logger.info("--> done cluster health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
|
||||||
|
@ -258,7 +258,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||||||
assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
|
assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
|
||||||
|
|
||||||
Path server2Shard = shardDirectory(node_2, "test", 0);
|
Path server2Shard = shardDirectory(node_2, "test", 0);
|
||||||
logger.info("--> stopping node " + node_2);
|
logger.info("--> stopping node {}", node_2);
|
||||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
||||||
|
|
||||||
logger.info("--> running cluster_health");
|
logger.info("--> running cluster_health");
|
||||||
@ -268,7 +268,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||||||
.setWaitForRelocatingShards(0)
|
.setWaitForRelocatingShards(0)
|
||||||
.get();
|
.get();
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||||
|
|
||||||
assertThat(Files.exists(server2Shard), equalTo(true));
|
assertThat(Files.exists(server2Shard), equalTo(true));
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||||||
.addField("field1").addField("field2")
|
.addField("field1").addField("field2")
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
if (searchResponse.getFailedShards() > 0) {
|
if (searchResponse.getFailedShards() > 0) {
|
||||||
logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
|
logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures()));
|
||||||
}
|
}
|
||||||
assertHitCount(searchResponse, 1);
|
assertHitCount(searchResponse, 1);
|
||||||
assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
|
assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
|
||||||
|
@ -47,11 +47,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
|||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
||||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||||
|
|
||||||
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
||||||
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
||||||
logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
|
logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId);
|
||||||
|
|
||||||
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
||||||
assertThat(response.getNodes().length, is(2));
|
assertThat(response.getNodes().length, is(2));
|
||||||
@ -91,11 +91,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
|||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
|
||||||
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
|
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
|
||||||
|
|
||||||
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
|
||||||
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
|
||||||
logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
|
logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId);
|
||||||
|
|
||||||
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
|
||||||
|
|
||||||
|
@ -1072,7 +1072,7 @@ public class PercolatorIT extends ESIntegTestCase {
|
|||||||
int numLevels = randomIntBetween(1, 25);
|
int numLevels = randomIntBetween(1, 25);
|
||||||
long numQueriesPerLevel = randomIntBetween(10, 250);
|
long numQueriesPerLevel = randomIntBetween(10, 250);
|
||||||
long totalQueries = numLevels * numQueriesPerLevel;
|
long totalQueries = numLevels * numQueriesPerLevel;
|
||||||
logger.info("--> register " + totalQueries + " queries");
|
logger.info("--> register {} queries", totalQueries);
|
||||||
for (int level = 1; level <= numLevels; level++) {
|
for (int level = 1; level <= numLevels; level++) {
|
||||||
for (int query = 1; query <= numQueriesPerLevel; query++) {
|
for (int query = 1; query <= numQueriesPerLevel; query++) {
|
||||||
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
|
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
|
||||||
@ -1166,7 +1166,7 @@ public class PercolatorIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<>();
|
Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<>();
|
||||||
long numQueries = randomIntBetween(100, 250);
|
long numQueries = randomIntBetween(100, 250);
|
||||||
logger.info("--> register " + numQueries + " queries");
|
logger.info("--> register {} queries", numQueries);
|
||||||
for (int i = 0; i < numQueries; i++) {
|
for (int i = 0; i < numQueries; i++) {
|
||||||
int value = randomInt(10);
|
int value = randomInt(10);
|
||||||
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
|
client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
|
||||||
|
@ -131,7 +131,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
logger.info("Running Cluster Health (wait for the shards to startup)");
|
logger.info("Running Cluster Health (wait for the shards to startup)");
|
||||||
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
SearchResponse countResponse = client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get();
|
SearchResponse countResponse = client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get();
|
||||||
assertHitCount(countResponse, 1L);
|
assertHitCount(countResponse, 1L);
|
||||||
@ -140,7 +140,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase {
|
|||||||
assertThat(actionGet.isAcknowledged(), equalTo(true));
|
assertThat(actionGet.isAcknowledged(), equalTo(true));
|
||||||
assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text"));
|
assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text"));
|
||||||
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(0L));
|
assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(0L));
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||||
if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
|
if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
|
||||||
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
|
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
|
||||||
logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk());
|
logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk());
|
||||||
if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
|
if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
throw new RuntimeException("Caused some truncated files for fun and profit");
|
throw new RuntimeException("Caused some truncated files for fun and profit");
|
||||||
|
@ -185,7 +185,7 @@ children("to_comment", "comment")
|
|||||||
assertThat(categoryTerms.getBuckets().size(), equalTo(3));
|
assertThat(categoryTerms.getBuckets().size(), equalTo(3));
|
||||||
|
|
||||||
for (Terms.Bucket bucket : categoryTerms.getBuckets()) {
|
for (Terms.Bucket bucket : categoryTerms.getBuckets()) {
|
||||||
logger.info("bucket=" + bucket.getKey());
|
logger.info("bucket={}", bucket.getKey());
|
||||||
Children childrenBucket = bucket.getAggregations().get("to_comment");
|
Children childrenBucket = bucket.getAggregations().get("to_comment");
|
||||||
TopHits topHits = childrenBucket.getAggregations().get("top_comments");
|
TopHits topHits = childrenBucket.getAggregations().get("top_comments");
|
||||||
logger.info("total_hits={}", topHits.getHits().getTotalHits());
|
logger.info("total_hits={}", topHits.getHits().getTotalHits());
|
||||||
|
@ -84,7 +84,7 @@ public class NestedIT extends ESIntegTestCase {
|
|||||||
numParents = randomIntBetween(3, 10);
|
numParents = randomIntBetween(3, 10);
|
||||||
numChildren = new int[numParents];
|
numChildren = new int[numParents];
|
||||||
aggCollectionMode = randomFrom(SubAggCollectionMode.values());
|
aggCollectionMode = randomFrom(SubAggCollectionMode.values());
|
||||||
logger.info("AGG COLLECTION MODE: " + aggCollectionMode);
|
logger.info("AGG COLLECTION MODE: {}", aggCollectionMode);
|
||||||
int totalChildren = 0;
|
int totalChildren = 0;
|
||||||
for (int i = 0; i < numParents; ++i) {
|
for (int i = 0; i < numParents; ++i) {
|
||||||
if (i == numParents - 1 && totalChildren == 0) {
|
if (i == numParents - 1 && totalChildren == 0) {
|
||||||
|
@ -433,7 +433,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||||||
assertThat(hits.totalHits(), equalTo(controlHits.totalHits()));
|
assertThat(hits.totalHits(), equalTo(controlHits.totalHits()));
|
||||||
assertThat(hits.getHits().length, equalTo(controlHits.getHits().length));
|
assertThat(hits.getHits().length, equalTo(controlHits.getHits().length));
|
||||||
for (int i = 0; i < hits.getHits().length; i++) {
|
for (int i = 0; i < hits.getHits().length; i++) {
|
||||||
logger.info(i + ": top_hits: [" + hits.getAt(i).id() + "][" + hits.getAt(i).sortValues()[0] + "] control: [" + controlHits.getAt(i).id() + "][" + controlHits.getAt(i).sortValues()[0] + "]");
|
logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).id(), hits.getAt(i).sortValues()[0], controlHits.getAt(i).id(), controlHits.getAt(i).sortValues()[0]);
|
||||||
assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id()));
|
assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id()));
|
||||||
assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0]));
|
assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0]));
|
||||||
}
|
}
|
||||||
@ -609,7 +609,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||||||
public void testTrackScores() throws Exception {
|
public void testTrackScores() throws Exception {
|
||||||
boolean[] trackScores = new boolean[]{true, false};
|
boolean[] trackScores = new boolean[]{true, false};
|
||||||
for (boolean trackScore : trackScores) {
|
for (boolean trackScore : trackScores) {
|
||||||
logger.info("Track score=" + trackScore);
|
logger.info("Track score={}", trackScore);
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing")
|
SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing")
|
||||||
.setQuery(matchQuery("text", "term rare"))
|
.setQuery(matchQuery("text", "term rare"))
|
||||||
.addAggregation(terms("terms")
|
.addAggregation(terms("terms")
|
||||||
|
@ -142,7 +142,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase {
|
|||||||
}
|
}
|
||||||
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
|
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
|
||||||
// if we hit only non-critical exceptions we only make sure that the post search works
|
// if we hit only non-critical exceptions we only make sure that the post search works
|
||||||
logger.info("Non-CriticalExceptions: " + nonCriticalExceptions.toString());
|
logger.info("Non-CriticalExceptions: {}", nonCriticalExceptions);
|
||||||
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, postSearchOK, is(true));
|
assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, postSearchOK, is(true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ public class TransportSearchFailuresIT extends ESIntegTestCase {
|
|||||||
.cluster()
|
.cluster()
|
||||||
.health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0)
|
.health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0)
|
||||||
.waitForActiveShards(test.totalNumShards)).actionGet();
|
.waitForActiveShards(test.totalNumShards)).actionGet();
|
||||||
logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
|
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN)));
|
assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN)));
|
||||||
assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards));
|
assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards));
|
||||||
|
@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
|||||||
// Create a random geometry collection.
|
// Create a random geometry collection.
|
||||||
GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom());
|
GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom());
|
||||||
|
|
||||||
logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes");
|
logger.info("Created Random GeometryCollection containing {} shapes", gcb.numShapes());
|
||||||
|
|
||||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree")
|
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree")
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
@ -2044,7 +2044,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||||||
.query(multiMatchQueryBuilder)
|
.query(multiMatchQueryBuilder)
|
||||||
.highlighter(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType)
|
.highlighter(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType)
|
||||||
.field(new Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>")));
|
.field(new Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>")));
|
||||||
logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]");
|
logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType);
|
||||||
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
|
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
|
||||||
assertHitCount(searchResponse, 1L);
|
assertHitCount(searchResponse, 1L);
|
||||||
assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("<field1>The quick brown fox</field1> jumps over"),
|
assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("<field1>The quick brown fox</field1> jumps over"),
|
||||||
|
@ -419,7 +419,7 @@ public class MoreLikeThisIT extends ESIntegTestCase {
|
|||||||
.minTermFreq(1)
|
.minTermFreq(1)
|
||||||
.minDocFreq(1)
|
.minDocFreq(1)
|
||||||
.minimumShouldMatch(minimumShouldMatch);
|
.minimumShouldMatch(minimumShouldMatch);
|
||||||
logger.info("Testing with minimum_should_match = " + minimumShouldMatch);
|
logger.info("Testing with minimum_should_match = {}", minimumShouldMatch);
|
||||||
SearchResponse response = client().prepareSearch("test").setTypes("type1")
|
SearchResponse response = client().prepareSearch("test").setTypes("type1")
|
||||||
.setQuery(mltQuery).get();
|
.setQuery(mltQuery).get();
|
||||||
assertSearchResponse(response);
|
assertSearchResponse(response);
|
||||||
|
@ -72,7 +72,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
int iters = between(20, 100);
|
int iters = between(20, 100);
|
||||||
for (int i = 0; i < iters; i++) {
|
for (int i = 0; i < iters; i++) {
|
||||||
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -126,8 +126,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
int iters = between(1, 10);
|
int iters = between(1, 10);
|
||||||
for (int i = 0; i < iters; i++) {
|
for (int i = 0; i < iters; i++) {
|
||||||
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
|
|
||||||
SearchRequestBuilder vanilla = client().prepareSearch("test")
|
SearchRequestBuilder vanilla = client().prepareSearch("test")
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -309,7 +308,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
refresh();
|
refresh();
|
||||||
|
|
||||||
QueryBuilder q = QueryBuilders.boolQuery();
|
QueryBuilder q = QueryBuilders.boolQuery();
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -360,8 +359,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
|
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
|
||||||
|
|
||||||
|
logger.info("Query: {}", q);
|
||||||
logger.info(q.toString());
|
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -408,7 +406,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two"))
|
QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two"))
|
||||||
.boost(randomFloat())
|
.boost(randomFloat())
|
||||||
.negativeBoost(randomFloat());
|
.negativeBoost(randomFloat());
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -455,7 +453,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
QueryBuilder q = QueryBuilders.disMaxQuery()
|
QueryBuilder q = QueryBuilders.disMaxQuery()
|
||||||
.boost(0.33703882f)
|
.boost(0.33703882f)
|
||||||
.add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true));
|
.add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true));
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -501,7 +499,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
||||||
|
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q.toString());
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -547,7 +545,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two");
|
QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two");
|
||||||
|
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
@ -559,7 +557,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
|
|
||||||
if (resp.getShardFailures().length > 0) {
|
if (resp.getShardFailures().length > 0) {
|
||||||
for (ShardSearchFailure f : resp.getShardFailures()) {
|
for (ShardSearchFailure f : resp.getShardFailures()) {
|
||||||
logger.error(f.toString());
|
logger.error("Shard search failure: {}", f);
|
||||||
}
|
}
|
||||||
fail();
|
fail();
|
||||||
}
|
}
|
||||||
@ -603,7 +601,7 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||||||
refresh();
|
refresh();
|
||||||
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
|
||||||
|
|
||||||
logger.info(q.toString());
|
logger.info("Query: {}", q);
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet();
|
SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet();
|
||||||
assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0));
|
assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0));
|
||||||
|
@ -282,8 +282,8 @@ public class DuelScrollIT extends ESIntegTestCase {
|
|||||||
}
|
}
|
||||||
assertEquals(control.getHits().getTotalHits(), scrollDocs);
|
assertEquals(control.getHits().getTotalHits(), scrollDocs);
|
||||||
} catch (AssertionError e) {
|
} catch (AssertionError e) {
|
||||||
logger.info("Control:\n" + control);
|
logger.info("Control:\n{}", control);
|
||||||
logger.info("Scroll size=" + size + ", from=" + scrollDocs + ":\n" + scroll);
|
logger.info("Scroll size={}, from={}:\n{}", size, scrollDocs, scroll);
|
||||||
throw e;
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
clearScroll(scroll.getScrollId());
|
clearScroll(scroll.getScrollId());
|
||||||
|
@ -78,8 +78,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||||||
GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)};
|
GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)};
|
||||||
createShuffeldJSONArray(d2Builder, d2Points);
|
createShuffeldJSONArray(d2Builder, d2Points);
|
||||||
|
|
||||||
logger.info(d1Builder.string());
|
logger.info("d1: {}", d1Builder);
|
||||||
logger.info(d2Builder.string());
|
logger.info("d2: {}", d2Builder);
|
||||||
indexRandom(true,
|
indexRandom(true,
|
||||||
client().prepareIndex("index", "type", "d1").setSource(d1Builder),
|
client().prepareIndex("index", "type", "d1").setSource(d1Builder),
|
||||||
client().prepareIndex("index", "type", "d2").setSource(d2Builder));
|
client().prepareIndex("index", "type", "d2").setSource(d2Builder));
|
||||||
|
@ -761,7 +761,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||||
.put("location", repo)
|
.put("location", repo)
|
||||||
@ -817,7 +817,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||||
.put("location", repo)
|
.put("location", repo)
|
||||||
@ -855,7 +855,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||||
.put("location", repo)
|
.put("location", repo)
|
||||||
@ -889,7 +889,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||||
.put("location", repo)
|
.put("location", repo)
|
||||||
@ -2159,7 +2159,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
public void testListCorruptedSnapshot() throws Exception {
|
public void testListCorruptedSnapshot() throws Exception {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
logger.info("--> creating repository at " + repo.toAbsolutePath());
|
logger.info("--> creating repository at {}", repo.toAbsolutePath());
|
||||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(Settings.settingsBuilder()
|
.setType("fs").setSettings(Settings.settingsBuilder()
|
||||||
.put("location", repo)
|
.put("location", repo)
|
||||||
|
@ -215,7 +215,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||||||
logger.info("--> move from 0 to 1 replica");
|
logger.info("--> move from 0 to 1 replica");
|
||||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
|
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
|
||||||
}
|
}
|
||||||
logger.debug("---> repo exists: " + Files.exists(tempDir.resolve("indices/test/0")) + " files: " + Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
|
logger.debug("---> repo exists: {} files: {}", Files.exists(tempDir.resolve("indices/test/0")), Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
|
||||||
CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
|
CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
|
||||||
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
|
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
|
||||||
|
@ -120,7 +120,7 @@ public class MockRepository extends FsRepository {
|
|||||||
blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false);
|
blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false);
|
||||||
randomPrefix = repositorySettings.settings().get("random", "default");
|
randomPrefix = repositorySettings.settings().get("random", "default");
|
||||||
waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
|
waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
|
||||||
logger.info("starting mock repository with random prefix " + randomPrefix);
|
logger.info("starting mock repository with random prefix {}", randomPrefix);
|
||||||
mockBlobStore = new MockBlobStore(super.blobStore());
|
mockBlobStore = new MockBlobStore(super.blobStore());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
|
logger.warn("Failed to send error message back to client for action [{}]", e, action);
|
||||||
logger.warn("Actual Exception", e1);
|
logger.warn("Actual Exception", e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,7 +194,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
|||||||
try {
|
try {
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (Throwable e1) {
|
} catch (Throwable e1) {
|
||||||
logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1);
|
logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction());
|
||||||
logger.warn("Actual Exception", e);
|
logger.warn("Actual Exception", e);
|
||||||
}
|
}
|
||||||
} }
|
} }
|
||||||
|
@ -316,7 +316,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
|||||||
});
|
});
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("failed to run " + compiledScript, e);
|
logger.trace("failed to run {}", e, compiledScript);
|
||||||
}
|
}
|
||||||
throw new ScriptException("failed to run " + compiledScript, e);
|
throw new ScriptException("failed to run " + compiledScript, e);
|
||||||
}
|
}
|
||||||
|
@ -332,7 +332,7 @@ public class EquivalenceTests extends ESIntegTestCase {
|
|||||||
createIndex("idx");
|
createIndex("idx");
|
||||||
|
|
||||||
final int numDocs = scaledRandomIntBetween(2500, 5000);
|
final int numDocs = scaledRandomIntBetween(2500, 5000);
|
||||||
logger.info("Indexing [" + numDocs +"] docs");
|
logger.info("Indexing [{}] docs", numDocs);
|
||||||
List<IndexRequestBuilder> indexingRequests = new ArrayList<>();
|
List<IndexRequestBuilder> indexingRequests = new ArrayList<>();
|
||||||
for (int i = 0; i < numDocs; ++i) {
|
for (int i = 0; i < numDocs; ++i) {
|
||||||
indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble()));
|
indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble()));
|
||||||
|
@ -543,7 +543,7 @@ public class ExtendedStatsTests extends AbstractNumericTestCase {
|
|||||||
ShardSearchFailure[] failures = response.getShardFailures();
|
ShardSearchFailure[] failures = response.getShardFailures();
|
||||||
if (failures.length != expectedFailures) {
|
if (failures.length != expectedFailures) {
|
||||||
for (ShardSearchFailure failure : failures) {
|
for (ShardSearchFailure failure : failures) {
|
||||||
logger.error("Shard Failure: {}", failure);
|
logger.error("Shard Failure: {}", failure.getCause(), failure);
|
||||||
}
|
}
|
||||||
fail("Unexpected shard failures!");
|
fail("Unexpected shard failures!");
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ public class SearchStatsTests extends ESIntegTestCase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
|
IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
|
||||||
logger.debug("###### indices search stats: " + indicesStats.getTotal().getSearch());
|
logger.debug("###### indices search stats: {}", indicesStats.getTotal().getSearch());
|
||||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0L));
|
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0L));
|
||||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0L));
|
assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0L));
|
||||||
assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0L));
|
assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0L));
|
||||||
|
@ -404,7 +404,7 @@ public class StatsTests extends AbstractNumericTestCase {
|
|||||||
ShardSearchFailure[] failures = response.getShardFailures();
|
ShardSearchFailure[] failures = response.getShardFailures();
|
||||||
if (failures.length != expectedFailures) {
|
if (failures.length != expectedFailures) {
|
||||||
for (ShardSearchFailure failure : failures) {
|
for (ShardSearchFailure failure : failures) {
|
||||||
logger.error("Shard Failure: {}", failure);
|
logger.error("Shard Failure: {}", failure.getCause(), failure);
|
||||||
}
|
}
|
||||||
fail("Unexpected shard failures!");
|
fail("Unexpected shard failures!");
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ public class StringTermsTests extends AbstractTermsTestCase {
|
|||||||
ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH,
|
ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH,
|
||||||
ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY };
|
ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY };
|
||||||
for (ExecutionMode executionMode : executionModes) {
|
for (ExecutionMode executionMode : executionModes) {
|
||||||
logger.info("Execution mode:" + executionMode);
|
logger.info("Execution mode: {}", executionMode);
|
||||||
SearchResponse response = client()
|
SearchResponse response = client()
|
||||||
.prepareSearch("idx")
|
.prepareSearch("idx")
|
||||||
.setTypes("type")
|
.setTypes("type")
|
||||||
|
@ -191,7 +191,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Error running " + template, e);
|
logger.error("Error running {}", e, template);
|
||||||
throw new ScriptException("Error running " + template, e);
|
throw new ScriptException("Error running " + template, e);
|
||||||
}
|
}
|
||||||
return result.bytes();
|
return result.bytes();
|
||||||
|
@ -55,7 +55,7 @@ public class AwsSigner {
|
|||||||
try {
|
try {
|
||||||
validateSignerType(signer);
|
validateSignerType(signer);
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
logger.warn(e.getMessage());
|
logger.warn("{}", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
configuration.setSignerOverride(signer);
|
configuration.setSignerOverride(signer);
|
||||||
|
@ -64,7 +64,7 @@ public class AwsSigner {
|
|||||||
try {
|
try {
|
||||||
validateSignerType(signer, endpoint);
|
validateSignerType(signer, endpoint);
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
logger.warn(e.getMessage());
|
logger.warn("{}", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
configuration.setSignerOverride(signer);
|
configuration.setSignerOverride(signer);
|
||||||
|
@ -88,11 +88,11 @@ public final class CorruptionUtils {
|
|||||||
// we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
|
// we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
|
||||||
// in the checksum which is ok though....
|
// in the checksum which is ok though....
|
||||||
StringBuilder msg = new StringBuilder();
|
StringBuilder msg = new StringBuilder();
|
||||||
msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]");
|
msg.append("before: [").append(checksumBeforeCorruption).append("] ");
|
||||||
msg.append(" after: [").append(checksumAfterCorruption).append("]");
|
msg.append("after: [").append(checksumAfterCorruption).append("] ");
|
||||||
msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
|
msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] ");
|
||||||
msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||||
logger.info(msg.toString());
|
logger.info("Checksum {}", msg);
|
||||||
assumeTrue("Checksum collision - " + msg.toString(),
|
assumeTrue("Checksum collision - " + msg.toString(),
|
||||||
checksumAfterCorruption != checksumBeforeCorruption // collision
|
checksumAfterCorruption != checksumBeforeCorruption // collision
|
||||||
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
|
||||||
|
@ -883,7 +883,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||||||
sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
|
sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
|
||||||
.append("] id [").append(hit.id()).append("]");
|
.append("] id [").append(hit.id()).append("]");
|
||||||
}
|
}
|
||||||
logger.warn(sb.toString());
|
logger.warn("{}", sb);
|
||||||
fail(failMsg);
|
fail(failMsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,8 +151,7 @@ public class RestClient implements Closeable {
|
|||||||
|
|
||||||
HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
|
HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
|
||||||
for (Map.Entry<String, String> header : headers.entrySet()) {
|
for (Map.Entry<String, String> header : headers.entrySet()) {
|
||||||
logger.error("Adding header " + header.getKey());
|
logger.error("Adding header {}\n with value {}", header.getKey(), header.getValue());
|
||||||
logger.error(" with value " + header.getValue());
|
|
||||||
httpRequestBuilder.addHeader(header.getKey(), header.getValue());
|
httpRequestBuilder.addHeader(header.getKey(), header.getValue());
|
||||||
}
|
}
|
||||||
logger.debug("calling api [{}]", apiName);
|
logger.debug("calling api [{}]", apiName);
|
||||||
|
@ -61,7 +61,7 @@ public class HttpResponse {
|
|||||||
try {
|
try {
|
||||||
httpResponse.close();
|
httpResponse.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error(e.getMessage(), e);
|
logger.error("Failed closing response", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user