merge from master
This commit is contained in:
commit
df03ed94a0
|
@ -97,6 +97,7 @@ subprojects {
|
|||
// the "value" -quiet is added, separated by a space. This is ok since the javadoc
|
||||
// command already adds -quiet, so we are just duplicating it
|
||||
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
|
||||
javadoc.options.encoding='UTF8'
|
||||
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
@Input
|
||||
String argLine = null
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
Map<String, Object> systemProperties = new HashMap<>()
|
||||
PatternFilterable patternSet = new PatternSet()
|
||||
|
||||
RandomizedTestingTask() {
|
||||
|
@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
jvmArgs.add(argument)
|
||||
}
|
||||
|
||||
void systemProperty(String property, String value) {
|
||||
void systemProperty(String property, Object value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
|
@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
exclude(name: excludePattern)
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, String> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue()
|
||||
for (Map.Entry<String, Object> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue().toString()
|
||||
}
|
||||
makeListeners()
|
||||
}
|
||||
|
|
|
@ -61,11 +61,14 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds a pattern to forbid. T */
|
||||
|
|
|
@ -33,10 +33,10 @@ class ClusterConfiguration {
|
|||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int baseHttpPort = 9400
|
||||
int httpPort = 0
|
||||
|
||||
@Input
|
||||
int baseTransportPort = 9500
|
||||
int transportPort = 0
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
@ -55,7 +55,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://localhost:${node.httpPort()}",
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -38,8 +38,10 @@ class ClusterFormationTasks {
|
|||
|
||||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
|
||||
*/
|
||||
static void setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static Object setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
|
@ -55,6 +57,9 @@ class ClusterFormationTasks {
|
|||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return "${-> nodes[0].transportUri()}"
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
|
@ -200,17 +205,24 @@ class ClusterFormationTasks {
|
|||
/** Adds a task to write elasticsearch.yml for the given node configuration */
|
||||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'http.port' : node.httpPort(),
|
||||
'transport.tcp.port' : node.transportPort(),
|
||||
'pidfile' : node.pidFile,
|
||||
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.baseTransportPort + it}"}.join(','),
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls' : 'http://snapshot.test*'
|
||||
'cluster.name' : node.clusterName,
|
||||
'pidfile' : node.pidFile,
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
if (node.config.numNodes == 1) {
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
} else {
|
||||
// TODO: fix multi node so it doesn't use hardcoded prots
|
||||
esConfig['http.port'] = 9400 + node.nodeNum
|
||||
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
|
||||
|
||||
}
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
|
@ -400,7 +412,12 @@ class ClusterFormationTasks {
|
|||
resourceexists {
|
||||
file(file: node.pidFile.toString())
|
||||
}
|
||||
socket(server: '127.0.0.1', port: node.httpPort())
|
||||
resourceexists {
|
||||
file(file: node.httpPortsFile.toString())
|
||||
}
|
||||
resourceexists {
|
||||
file(file: node.transportPortsFile.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -444,6 +461,8 @@ class ClusterFormationTasks {
|
|||
logger.error("|-----------------------------------------")
|
||||
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
|
||||
logger.error("| pid file exists: ${node.pidFile.exists()}")
|
||||
logger.error("| http ports file exists: ${node.httpPortsFile.exists()}")
|
||||
logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}")
|
||||
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
|
||||
logger.error("|\n| [ant output]")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
|
||||
|
|
|
@ -43,6 +43,12 @@ class NodeInfo {
|
|||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for http */
|
||||
File httpPortsFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for transport */
|
||||
File transportPortsFile
|
||||
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
|
@ -92,6 +98,10 @@ class NodeInfo {
|
|||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
httpPortsFile = new File(logsDir, 'http.ports')
|
||||
transportPortsFile = new File(logsDir, 'transport.ports')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
|
@ -119,6 +129,7 @@ class NodeInfo {
|
|||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args.add("-Des.tests.portsfile=true")
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
|
@ -159,14 +170,14 @@ class NodeInfo {
|
|||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
String httpUri() {
|
||||
return httpPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the transport port for this node */
|
||||
int transportPort() {
|
||||
return config.baseTransportPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over transport protocol */
|
||||
String transportUri() {
|
||||
return transportPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
|
|
|
@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
|
||||
}
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.cluster', clusterUri)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil
|
|||
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
|
|
|
@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r
|
|||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
||||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
||||
java.util.Random#<init>() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness
|
||||
|
|
|
@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
static {
|
||||
Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>();
|
||||
fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension());
|
||||
fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension());
|
||||
FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -38,7 +39,9 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Transport action for get repositories operation
|
||||
|
@ -78,8 +81,20 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio
|
|||
}
|
||||
} else {
|
||||
if (repositories != null) {
|
||||
Set<String> repositoriesToGet = new LinkedHashSet<>(); // to keep insertion order
|
||||
for (String repositoryOrPattern : request.repositories()) {
|
||||
if (Regex.isSimpleMatchPattern(repositoryOrPattern) == false) {
|
||||
repositoriesToGet.add(repositoryOrPattern);
|
||||
} else {
|
||||
for (RepositoryMetaData repository : repositories.repositories()) {
|
||||
if (Regex.simpleMatch(repositoryOrPattern, repository.name())) {
|
||||
repositoriesToGet.add(repository.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
List<RepositoryMetaData> repositoryListBuilder = new ArrayList<>();
|
||||
for (String repository : request.repositories()) {
|
||||
for (String repository : repositoriesToGet) {
|
||||
RepositoryMetaData repositoryMetaData = repositories.repository(repository);
|
||||
if (repositoryMetaData == null) {
|
||||
listener.onFailure(new RepositoryMissingException(repository));
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
@ -38,7 +39,9 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Transport Action for get snapshots operation
|
||||
|
@ -84,8 +87,24 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
snapshotInfoBuilder.add(new SnapshotInfo(snapshot));
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < request.snapshots().length; i++) {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshots()[i]);
|
||||
Set<String> snapshotsToGet = new LinkedHashSet<>(); // to keep insertion order
|
||||
List<Snapshot> snapshots = null;
|
||||
for (String snapshotOrPattern : request.snapshots()) {
|
||||
if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
|
||||
snapshotsToGet.add(snapshotOrPattern);
|
||||
} else {
|
||||
if (snapshots == null) { // lazily load snapshots
|
||||
snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable());
|
||||
}
|
||||
for (Snapshot snapshot : snapshots) {
|
||||
if (Regex.simpleMatch(snapshotOrPattern, snapshot.name())) {
|
||||
snapshotsToGet.add(snapshot.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (String snapshot : snapshotsToGet) {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), snapshot);
|
||||
snapshotInfoBuilder.add(new SnapshotInfo(snapshotsService.snapshot(snapshotId)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,12 +51,12 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
builder.startObject(opType);
|
||||
if (failure == null) {
|
||||
response.toXContent(builder, params);
|
||||
builder.field(Fields.STATUS, response.status());
|
||||
builder.field(Fields.STATUS, response.status().getStatus());
|
||||
} else {
|
||||
builder.field(Fields._INDEX, failure.getIndex());
|
||||
builder.field(Fields._TYPE, failure.getType());
|
||||
builder.field(Fields._ID, failure.getId());
|
||||
builder.field(Fields.STATUS, failure.getStatus());
|
||||
builder.field(Fields.STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(Fields.ERROR);
|
||||
ElasticsearchException.toXContent(builder, params, failure.getCause());
|
||||
builder.endObject();
|
||||
|
|
|
@ -238,17 +238,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
XContentType contentType = XContentFactory.xContentType(doc);
|
||||
if (contentType == builder.contentType()) {
|
||||
builder.rawField("doc", doc);
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(doc)) {
|
||||
parser.nextToken();
|
||||
builder.field("doc");
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
return builder.rawField("doc", doc);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.support.broadcast.node;
|
||||
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.support.*;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
|
@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
e.setIndex(shardRouting.getIndex());
|
||||
e.setShard(shardRouting.shardId());
|
||||
shardResults[shardIndex] = e;
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (t instanceof RetryOnReplicaException) {
|
||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request);
|
||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
threadPool.executor(executor).execute(AsyncReplicaAction.this);
|
||||
// Forking a thread on local node via transport service so that custom transport service have an
|
||||
// opportunity to execute custom logic before the replica operation begins
|
||||
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
||||
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
|
||||
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -164,10 +164,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
|
||||
registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME);
|
||||
registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.POSITIVE_BYTES_SIZE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS, Validator.EMPTY);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
|
||||
|
|
|
@ -746,8 +746,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
/** All known byte-sized cluster settings. */
|
||||
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
|
||||
RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
|
||||
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
@ -251,11 +250,8 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
|
||||
}
|
||||
// this will just throw exceptions in case of problems
|
||||
existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
|
|
|
@ -27,18 +27,11 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -673,7 +666,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
|
||||
public void shuffle() {
|
||||
Collections.shuffle(unassigned);
|
||||
Randomness.shuffle(unassigned);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
|||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
/**
|
||||
* Provides factory methods for producing reproducible sources of
|
||||
* randomness. Reproducible sources of randomness contribute to
|
||||
* reproducible tests. When running the Elasticsearch test suite, the
|
||||
* test runner will establish a global random seed accessible via the
|
||||
* system property "tests.seed". By seeding a random number generator
|
||||
* with this global seed, we ensure that instances of Random produced
|
||||
* with this class produce reproducible sources of randomness under
|
||||
* when running under the Elasticsearch test suite. Alternatively,
|
||||
* a reproducible source of randomness can be produced by providing a
|
||||
* setting a reproducible seed. When running the Elasticsearch server
|
||||
* process, non-reproducible sources of randomness are provided (unless
|
||||
* a setting is provided for a module that exposes a seed setting (e.g.,
|
||||
* DiscoveryService#SETTING_DISCOVERY_SEED)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
private static final Method currentMethod;
|
||||
private static final Method getRandomMethod;
|
||||
|
||||
static {
|
||||
Method maybeCurrentMethod;
|
||||
Method maybeGetRandomMethod;
|
||||
try {
|
||||
Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext");
|
||||
maybeCurrentMethod = clazz.getMethod("current");
|
||||
maybeGetRandomMethod = clazz.getMethod("getRandom");
|
||||
} catch (Throwable t) {
|
||||
maybeCurrentMethod = null;
|
||||
maybeGetRandomMethod = null;
|
||||
}
|
||||
currentMethod = maybeCurrentMethod;
|
||||
getRandomMethod = maybeGetRandomMethod;
|
||||
}
|
||||
|
||||
private Randomness() {}
|
||||
|
||||
/**
|
||||
* Provides a reproducible source of randomness seeded by a long
|
||||
* seed in the settings with the key setting.
|
||||
*
|
||||
* @param settings the settings containing the seed
|
||||
* @param setting the key to access the seed
|
||||
* @return a reproducible source of randomness
|
||||
*/
|
||||
public static Random get(Settings settings, String setting) {
|
||||
Long maybeSeed = settings.getAsLong(setting, null);
|
||||
if (maybeSeed != null) {
|
||||
return new Random(maybeSeed);
|
||||
} else {
|
||||
return get();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a source of randomness that is reproducible when
|
||||
* running under the Elasticsearch test suite, and otherwise
|
||||
* produces a non-reproducible source of randomness. Reproducible
|
||||
* sources of randomness are created when the system property
|
||||
* "tests.seed" is set and the security policy allows reading this
|
||||
* system property. Otherwise, non-reproducible sources of
|
||||
* randomness are created.
|
||||
*
|
||||
* @return a source of randomness
|
||||
* @throws IllegalStateException if running tests but was not able
|
||||
* to acquire an instance of Random from
|
||||
* RandomizedContext or tests are
|
||||
* running but tests.seed is not set
|
||||
*/
|
||||
public static Random get() {
|
||||
if (currentMethod != null && getRandomMethod != null) {
|
||||
try {
|
||||
Object randomizedContext = currentMethod.invoke(null);
|
||||
return (Random) getRandomMethod.invoke(randomizedContext);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
// unexpected, bail
|
||||
throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e);
|
||||
}
|
||||
} else {
|
||||
return getWithoutSeed();
|
||||
}
|
||||
}
|
||||
|
||||
private static Random getWithoutSeed() {
|
||||
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
|
||||
return ThreadLocalRandom.current();
|
||||
}
|
||||
|
||||
public static void shuffle(List<?> list) {
|
||||
Collections.shuffle(list, get());
|
||||
}
|
||||
}
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.common.io;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -68,6 +70,7 @@ public abstract class Streams {
|
|||
public static long copy(InputStream in, OutputStream out, byte[] buffer) throws IOException {
|
||||
Objects.requireNonNull(in, "No InputStream specified");
|
||||
Objects.requireNonNull(out, "No OutputStream specified");
|
||||
boolean success = false;
|
||||
try {
|
||||
long byteCount = 0;
|
||||
int bytesRead;
|
||||
|
@ -76,17 +79,13 @@ public abstract class Streams {
|
|||
byteCount += bytesRead;
|
||||
}
|
||||
out.flush();
|
||||
success = true;
|
||||
return byteCount;
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
}
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
if (success) {
|
||||
IOUtils.close(in, out);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,6 +129,7 @@ public abstract class Streams {
|
|||
public static int copy(Reader in, Writer out) throws IOException {
|
||||
Objects.requireNonNull(in, "No Reader specified");
|
||||
Objects.requireNonNull(out, "No Writer specified");
|
||||
boolean success = false;
|
||||
try {
|
||||
int byteCount = 0;
|
||||
char[] buffer = new char[BUFFER_SIZE];
|
||||
|
@ -139,17 +139,13 @@ public abstract class Streams {
|
|||
byteCount += bytesRead;
|
||||
}
|
||||
out.flush();
|
||||
success = true;
|
||||
return byteCount;
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
}
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
if (success) {
|
||||
IOUtils.close(in, out);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ public class Regex {
|
|||
pFlags |= Pattern.LITERAL;
|
||||
} else if ("COMMENTS".equals(s)) {
|
||||
pFlags |= Pattern.COMMENTS;
|
||||
} else if ("UNICODE_CHAR_CLASS".equals(s)) {
|
||||
} else if (("UNICODE_CHAR_CLASS".equals(s)) || ("UNICODE_CHARACTER_CLASS".equals(s))) {
|
||||
pFlags |= UNICODE_CHARACTER_CLASS;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unknown regex flag [" + s + "]");
|
||||
|
|
|
@ -45,11 +45,6 @@ public interface XContent {
|
|||
*/
|
||||
XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a new generator using the provided writer.
|
||||
*/
|
||||
XContentGenerator createGenerator(Writer writer) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a parser over the provided string content.
|
||||
*/
|
||||
|
|
|
@ -920,23 +920,18 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
return this;
|
||||
}
|
||||
|
||||
public XContentBuilder rawField(String fieldName, byte[] content) throws IOException {
|
||||
generator.writeRawField(fieldName, content, bos);
|
||||
return this;
|
||||
}
|
||||
|
||||
public XContentBuilder rawField(String fieldName, byte[] content, int offset, int length) throws IOException {
|
||||
generator.writeRawField(fieldName, content, offset, length, bos);
|
||||
return this;
|
||||
}
|
||||
|
||||
public XContentBuilder rawField(String fieldName, InputStream content, XContentType contentType) throws IOException {
|
||||
generator.writeRawField(fieldName, content, bos, contentType);
|
||||
public XContentBuilder rawField(String fieldName, InputStream content) throws IOException {
|
||||
generator.writeRawField(fieldName, content);
|
||||
return this;
|
||||
}
|
||||
|
||||
public XContentBuilder rawField(String fieldName, BytesReference content) throws IOException {
|
||||
generator.writeRawField(fieldName, content, bos);
|
||||
generator.writeRawField(fieldName, content);
|
||||
return this;
|
||||
}
|
||||
|
||||
public XContentBuilder rawValue(BytesReference content) throws IOException {
|
||||
generator.writeRawValue(content);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -1202,24 +1197,12 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
return this.generator;
|
||||
}
|
||||
|
||||
public OutputStream stream() {
|
||||
return this.bos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference bytes() {
|
||||
close();
|
||||
return ((BytesStream) bos).bytes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the actual stream used.
|
||||
*/
|
||||
public BytesStream bytesStream() throws IOException {
|
||||
close();
|
||||
return (BytesStream) bos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a string representation of the builder (only applicable for text based xcontent).
|
||||
*/
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -112,13 +111,11 @@ public interface XContentGenerator extends Closeable {
|
|||
|
||||
void writeObjectFieldStart(XContentString fieldName) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException;
|
||||
void writeRawField(String fieldName, InputStream content) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException;
|
||||
void writeRawField(String fieldName, BytesReference content) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException;
|
||||
void writeRawValue(BytesReference content) throws IOException;
|
||||
|
||||
void copyCurrentStructure(XContentParser parser) throws IOException;
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
|
@ -102,9 +101,7 @@ public class XContentHelper {
|
|||
BytesArray bytesArray = bytes.toBytesArray();
|
||||
return new String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length(), StandardCharsets.UTF_8);
|
||||
}
|
||||
XContentParser parser = null;
|
||||
try {
|
||||
parser = XContentFactory.xContent(xContentType).createParser(bytes.streamInput());
|
||||
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(bytes.streamInput())) {
|
||||
parser.nextToken();
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
if (prettyPrint) {
|
||||
|
@ -112,10 +109,6 @@ public class XContentHelper {
|
|||
}
|
||||
builder.copyCurrentStructure(parser);
|
||||
return builder.string();
|
||||
} finally {
|
||||
if (parser != null) {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,9 +121,7 @@ public class XContentHelper {
|
|||
if (xContentType == XContentType.JSON && !reformatJson) {
|
||||
return new String(data, offset, length, StandardCharsets.UTF_8);
|
||||
}
|
||||
XContentParser parser = null;
|
||||
try {
|
||||
parser = XContentFactory.xContent(xContentType).createParser(data, offset, length);
|
||||
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(data, offset, length)) {
|
||||
parser.nextToken();
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
if (prettyPrint) {
|
||||
|
@ -138,10 +129,6 @@ public class XContentHelper {
|
|||
}
|
||||
builder.copyCurrentStructure(parser);
|
||||
return builder.string();
|
||||
} finally {
|
||||
if (parser != null) {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,38 +365,6 @@ public class XContentHelper {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Directly writes the source to the output builder
|
||||
*/
|
||||
public static void writeDirect(BytesReference source, XContentBuilder rawBuilder, ToXContent.Params params) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
if (contentType == rawBuilder.contentType()) {
|
||||
Streams.copy(compressedStreamInput, rawBuilder.stream());
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput)) {
|
||||
parser.nextToken();
|
||||
rawBuilder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (contentType == rawBuilder.contentType()) {
|
||||
source.writeTo(rawBuilder.stream());
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(source)) {
|
||||
parser.nextToken();
|
||||
rawBuilder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using
|
||||
* {@link XContentBuilder#rawField(String, org.elasticsearch.common.bytes.BytesReference)}.
|
||||
|
@ -418,30 +373,9 @@ public class XContentHelper {
|
|||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
if (contentType == builder.contentType()) {
|
||||
builder.rawField(field, compressedStreamInput, contentType);
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput)) {
|
||||
parser.nextToken();
|
||||
builder.field(field);
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
builder.rawField(field, compressedStreamInput);
|
||||
} else {
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (contentType == builder.contentType()) {
|
||||
builder.rawField(field, source);
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(source)) {
|
||||
parser.nextToken();
|
||||
builder.field(field);
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
builder.rawField(field, source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,17 +61,12 @@ public class CborXContent implements XContent {
|
|||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8));
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(Writer writer) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(writer));
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,13 +20,9 @@
|
|||
package org.elasticsearch.common.xcontent.cbor;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORParser;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
|
@ -34,8 +30,8 @@ import java.io.OutputStream;
|
|||
*/
|
||||
public class CborXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
|
||||
super(jsonGenerator, filters);
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -49,46 +45,7 @@ public class CborXContentGenerator extends JsonXContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (CBORParser parser = CborXContent.cborFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (CBORParser parser = CborXContent.cborFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
CBORParser parser;
|
||||
if (content.hasArray()) {
|
||||
parser = CborXContent.cborFactory.createParser(content.array(), content.arrayOffset(), content.length());
|
||||
} else {
|
||||
parser = CborXContent.cborFactory.createParser(content.streamInput());
|
||||
}
|
||||
try {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
} finally {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (CBORParser parser = CborXContent.cborFactory.createParser(content, offset, length)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
protected boolean supportsRawWrites() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,17 +65,12 @@ public class JsonXContent implements XContent {
|
|||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8));
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(Writer writer) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(writer));
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,13 +26,13 @@ import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate;
|
|||
import com.fasterxml.jackson.core.io.SerializedString;
|
||||
import com.fasterxml.jackson.core.util.DefaultIndenter;
|
||||
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
@ -58,11 +58,14 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
*/
|
||||
private final FilteringGeneratorDelegate filter;
|
||||
|
||||
private final OutputStream os;
|
||||
|
||||
private boolean writeLineFeedAtEnd;
|
||||
private static final SerializedString LF = new SerializedString("\n");
|
||||
private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue());
|
||||
private boolean prettyPrint = false;
|
||||
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
if (jsonGenerator instanceof GeneratorBase) {
|
||||
this.base = (GeneratorBase) jsonGenerator;
|
||||
} else {
|
||||
|
@ -76,6 +79,8 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
this.filter = new FilteringGeneratorDelegate(jsonGenerator, new FilterPathBasedFilter(filters), true, true);
|
||||
this.generator = this.filter;
|
||||
}
|
||||
|
||||
this.os = os;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,6 +91,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
@Override
|
||||
public final void usePrettyPrint() {
|
||||
generator.setPrettyPrinter(new DefaultPrettyPrinter().withObjectIndenter(INDENTER));
|
||||
prettyPrint = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -323,22 +329,16 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
|
||||
writeRawField(fieldName, new BytesArray(content), bos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
|
||||
writeRawField(fieldName, new BytesArray(content, offset, length), bos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
|
||||
if (isFiltered() || (contentType != contentType())) {
|
||||
// When the current generator is filtered (ie filter != null)
|
||||
// or the content is in a different format than the current generator,
|
||||
// we need to copy the whole structure so that it will be correctly
|
||||
// filtered or converted
|
||||
public void writeRawField(String fieldName, InputStream content) throws IOException {
|
||||
if (content.markSupported() == false) {
|
||||
// needed for the XContentFactory.xContentType call
|
||||
content = new BufferedInputStream(content);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(content);
|
||||
if (contentType == null) {
|
||||
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
|
||||
}
|
||||
if (mayWriteRawData(contentType) == false) {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(content)) {
|
||||
parser.nextToken();
|
||||
writeFieldName(fieldName);
|
||||
|
@ -347,49 +347,59 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
} else {
|
||||
writeStartRaw(fieldName);
|
||||
flush();
|
||||
Streams.copy(content, bos);
|
||||
Streams.copy(content, os);
|
||||
writeEndRaw();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException {
|
||||
public final void writeRawField(String fieldName, BytesReference content) throws IOException {
|
||||
XContentType contentType = XContentFactory.xContentType(content);
|
||||
if (contentType != null) {
|
||||
if (isFiltered() || (contentType != contentType())) {
|
||||
// When the current generator is filtered (ie filter != null)
|
||||
// or the content is in a different format than the current generator,
|
||||
// we need to copy the whole structure so that it will be correctly
|
||||
// filtered or converted
|
||||
copyRawField(fieldName, content, contentType.xContent());
|
||||
} else {
|
||||
// Otherwise, the generator is not filtered and has the same type: we can potentially optimize the write
|
||||
writeObjectRaw(fieldName, content, bos);
|
||||
}
|
||||
} else {
|
||||
if (contentType == null) {
|
||||
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
|
||||
}
|
||||
if (mayWriteRawData(contentType) == false) {
|
||||
writeFieldName(fieldName);
|
||||
// we could potentially optimize this to not rely on exception logic...
|
||||
String sValue = content.toUtf8();
|
||||
try {
|
||||
writeNumber(Long.parseLong(sValue));
|
||||
} catch (NumberFormatException e) {
|
||||
try {
|
||||
writeNumber(Double.parseDouble(sValue));
|
||||
} catch (NumberFormatException e1) {
|
||||
writeString(sValue);
|
||||
}
|
||||
}
|
||||
copyRawValue(content, contentType.xContent());
|
||||
} else {
|
||||
writeStartRaw(fieldName);
|
||||
flush();
|
||||
content.writeTo(os);
|
||||
writeEndRaw();
|
||||
}
|
||||
}
|
||||
|
||||
protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
|
||||
writeStartRaw(fieldName);
|
||||
flush();
|
||||
content.writeTo(bos);
|
||||
writeEndRaw();
|
||||
public final void writeRawValue(BytesReference content) throws IOException {
|
||||
XContentType contentType = XContentFactory.xContentType(content);
|
||||
if (contentType == null) {
|
||||
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
|
||||
}
|
||||
if (mayWriteRawData(contentType) == false) {
|
||||
copyRawValue(content, contentType.xContent());
|
||||
} else {
|
||||
flush();
|
||||
content.writeTo(os);
|
||||
writeEndRaw();
|
||||
}
|
||||
}
|
||||
|
||||
protected void copyRawField(String fieldName, BytesReference content, XContent xContent) throws IOException {
|
||||
private boolean mayWriteRawData(XContentType contentType) {
|
||||
// When the current generator is filtered (ie filter != null)
|
||||
// or the content is in a different format than the current generator,
|
||||
// we need to copy the whole structure so that it will be correctly
|
||||
// filtered or converted
|
||||
return supportsRawWrites()
|
||||
&& isFiltered() == false
|
||||
&& contentType == contentType()
|
||||
&& prettyPrint == false;
|
||||
}
|
||||
|
||||
/** Whether this generator supports writing raw data directly */
|
||||
protected boolean supportsRawWrites() {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected void copyRawValue(BytesReference content, XContent xContent) throws IOException {
|
||||
XContentParser parser = null;
|
||||
try {
|
||||
if (content.hasArray()) {
|
||||
|
@ -397,9 +407,6 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
} else {
|
||||
parser = xContent.createParser(content.streamInput());
|
||||
}
|
||||
if (fieldName != null) {
|
||||
writeFieldName(fieldName);
|
||||
}
|
||||
copyCurrentStructure(parser);
|
||||
} finally {
|
||||
if (parser != null) {
|
||||
|
|
|
@ -62,17 +62,12 @@ public class SmileXContent implements XContent {
|
|||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8));
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(Writer writer) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(writer));
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,13 +20,9 @@
|
|||
package org.elasticsearch.common.xcontent.smile;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileParser;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
|
@ -34,8 +30,8 @@ import java.io.OutputStream;
|
|||
*/
|
||||
public class SmileXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
|
||||
super(jsonGenerator, filters);
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -49,46 +45,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (SmileParser parser = SmileXContent.smileFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (SmileParser parser = SmileXContent.smileFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
SmileParser parser;
|
||||
if (content.hasArray()) {
|
||||
parser = SmileXContent.smileFactory.createParser(content.array(), content.arrayOffset(), content.length());
|
||||
} else {
|
||||
parser = SmileXContent.smileFactory.createParser(content.streamInput());
|
||||
}
|
||||
try {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
} finally {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (SmileParser parser = SmileXContent.smileFactory.createParser(content, offset, length)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
protected boolean supportsRawWrites() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,17 +60,12 @@ public class YamlXContent implements XContent {
|
|||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8));
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(Writer writer) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(writer));
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,13 +20,9 @@
|
|||
package org.elasticsearch.common.xcontent.yaml;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLParser;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
|
@ -34,8 +30,8 @@ import java.io.OutputStream;
|
|||
*/
|
||||
public class YamlXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
|
||||
super(jsonGenerator, filters);
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -49,46 +45,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (YAMLParser parser = YamlXContent.yamlFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (YAMLParser parser = YamlXContent.yamlFactory.createParser(content)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
YAMLParser parser;
|
||||
if (content.hasArray()) {
|
||||
parser = YamlXContent.yamlFactory.createParser(content.array(), content.arrayOffset(), content.length());
|
||||
} else {
|
||||
parser = YamlXContent.yamlFactory.createParser(content.streamInput());
|
||||
}
|
||||
try {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
} finally {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
try (YAMLParser parser = YamlXContent.yamlFactory.createParser(content, offset, length)) {
|
||||
parser.nextToken();
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
protected boolean supportsRawWrites() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
|
||||
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
|
||||
|
||||
|
||||
private static class InitialStateListener implements InitialStateDiscoveryListener {
|
||||
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
@ -132,10 +132,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
String seed = settings.get(DiscoveryService.SETTING_DISCOVERY_SEED);
|
||||
if (seed != null) {
|
||||
return Strings.randomBase64UUID(new Random(Long.parseLong(seed)));
|
||||
}
|
||||
return Strings.randomBase64UUID();
|
||||
Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -805,10 +805,14 @@ public class InternalEngine extends Engine {
|
|||
// we need to fail the engine. it might have already been failed before
|
||||
// but we are double-checking it's failed and closed
|
||||
if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
|
||||
failEngine("already closed by tragic event", indexWriter.getTragicException());
|
||||
failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException());
|
||||
} else if (translog.isOpen() == false && translog.getTragicException() != null) {
|
||||
failEngine("already closed by tragic event on the translog", translog.getTragicException());
|
||||
}
|
||||
return true;
|
||||
} else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) {
|
||||
} else if (t != null &&
|
||||
((indexWriter.isOpen() == false && indexWriter.getTragicException() == t)
|
||||
|| (translog.isOpen() == false && translog.getTragicException() == t))) {
|
||||
// this spot on - we are handling the tragic event exception here so we have to fail the engine
|
||||
// right away
|
||||
failEngine(source, t);
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -117,7 +118,7 @@ public class DocumentMapper implements ToXContent {
|
|||
|
||||
private volatile CompressedXContent mappingSource;
|
||||
|
||||
private final Mapping mapping;
|
||||
private volatile Mapping mapping;
|
||||
|
||||
private final DocumentParser documentParser;
|
||||
|
||||
|
@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent {
|
|||
mapperService.addMappers(type, objectMappers, fieldMappers);
|
||||
}
|
||||
|
||||
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
try (ReleasableLock lock = mappingWriteLock.acquire()) {
|
||||
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
|
||||
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
|
||||
this.mapping.merge(mapping, mergeResult);
|
||||
// do the merge even if simulate == false so that we get exceptions
|
||||
Mapping merged = this.mapping.merge(mapping, updateAllTypes);
|
||||
if (simulate == false) {
|
||||
addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes);
|
||||
this.mapping = merged;
|
||||
Collection<ObjectMapper> objectMappers = new ArrayList<>();
|
||||
Collection<FieldMapper> fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers));
|
||||
MapperUtils.collect(merged.root, objectMappers, fieldMappers);
|
||||
addMappers(objectMappers, fieldMappers, updateAllTypes);
|
||||
refreshSource();
|
||||
}
|
||||
return mergeResult;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ class DocumentParser implements Closeable {
|
|||
if (update == null) {
|
||||
update = newUpdate;
|
||||
} else {
|
||||
MapperUtils.merge(update, newUpdate);
|
||||
update = update.merge(newUpdate, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -759,7 +759,7 @@ class DocumentParser implements Closeable {
|
|||
private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException {
|
||||
final Mapper update = parseObjectOrField(context, mapper);
|
||||
if (update != null) {
|
||||
MapperUtils.merge(mapper, update);
|
||||
mapper = (M) mapper.merge(update, false);
|
||||
}
|
||||
return mapper;
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public abstract class FieldMapper extends Mapper {
|
||||
public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
|
||||
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
|
||||
|
||||
|
@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper {
|
|||
* if the fieldType has a non-null option we are all good it might have been set through a different
|
||||
* call.
|
||||
*/
|
||||
final IndexOptions options = getDefaultIndexOption();
|
||||
assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing";
|
||||
IndexOptions options = getDefaultIndexOption();
|
||||
if (options == IndexOptions.NONE) {
|
||||
// can happen when an existing type on the same index has disabled indexing
|
||||
// since we inherit the default field type from the first mapper that is
|
||||
// created on an index
|
||||
throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index");
|
||||
}
|
||||
fieldType.setIndexOptions(options);
|
||||
}
|
||||
} else {
|
||||
|
@ -270,7 +275,7 @@ public abstract class FieldMapper extends Mapper {
|
|||
|
||||
protected MappedFieldTypeReference fieldTypeRef;
|
||||
protected final MappedFieldType defaultFieldType;
|
||||
protected final MultiFields multiFields;
|
||||
protected MultiFields multiFields;
|
||||
protected CopyTo copyTo;
|
||||
protected final boolean indexCreatedBefore2x;
|
||||
|
||||
|
@ -359,26 +364,41 @@ public abstract class FieldMapper extends Mapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected FieldMapper clone() {
|
||||
try {
|
||||
return (FieldMapper) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
FieldMapper merged = clone();
|
||||
merged.doMerge(mergeWith, updateAllTypes);
|
||||
return merged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge changes coming from {@code mergeWith} in place.
|
||||
* @param updateAllTypes TODO
|
||||
*/
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
String mergedType = mergeWith.getClass().getSimpleName();
|
||||
if (mergeWith instanceof FieldMapper) {
|
||||
mergedType = ((FieldMapper) mergeWith).contentType();
|
||||
}
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
|
||||
// different types, return
|
||||
return;
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
|
||||
}
|
||||
FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
|
||||
multiFields.merge(mergeWith, mergeResult);
|
||||
multiFields = multiFields.merge(fieldMergeWith.multiFields);
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
// apply changeable values
|
||||
MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
|
||||
fieldType.freeze();
|
||||
fieldTypeRef.set(fieldType);
|
||||
this.copyTo = fieldMergeWith.copyTo;
|
||||
}
|
||||
// apply changeable values
|
||||
MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
|
||||
fieldType.freeze();
|
||||
fieldTypeRef.set(fieldType);
|
||||
this.copyTo = fieldMergeWith.copyTo;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -565,18 +585,20 @@ public abstract class FieldMapper extends Mapper {
|
|||
}
|
||||
|
||||
private final ContentPath.Type pathType;
|
||||
private volatile ImmutableOpenMap<String, FieldMapper> mappers;
|
||||
private final ImmutableOpenMap<String, FieldMapper> mappers;
|
||||
|
||||
public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) {
|
||||
private MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) {
|
||||
this.pathType = pathType;
|
||||
this.mappers = mappers;
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>();
|
||||
// we disable the all in multi-field mappers
|
||||
for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
|
||||
for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) {
|
||||
FieldMapper mapper = cursor.value;
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
|
||||
mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
|
||||
}
|
||||
builder.put(cursor.key, mapper);
|
||||
}
|
||||
this.mappers = builder.build();
|
||||
}
|
||||
|
||||
public void parse(FieldMapper mainField, ParseContext context) throws IOException {
|
||||
|
@ -598,47 +620,29 @@ public abstract class FieldMapper extends Mapper {
|
|||
context.path().pathType(origPathType);
|
||||
}
|
||||
|
||||
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
|
||||
public MultiFields merge(MultiFields mergeWith) {
|
||||
if (pathType != mergeWith.pathType) {
|
||||
throw new IllegalArgumentException("Can't change path type from [" + pathType + "] to [" + mergeWith.pathType + "]");
|
||||
}
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = ImmutableOpenMap.builder(mappers);
|
||||
|
||||
List<FieldMapper> newFieldMappers = null;
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = null;
|
||||
|
||||
for (ObjectCursor<FieldMapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
|
||||
for (ObjectCursor<FieldMapper> cursor : mergeWith.mappers.values()) {
|
||||
FieldMapper mergeWithMapper = cursor.value;
|
||||
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
if (mergeIntoMapper == null) {
|
||||
// no mapping, simply add it if not simulating
|
||||
if (!mergeResult.simulate()) {
|
||||
// we disable the all in multi-field mappers
|
||||
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
|
||||
}
|
||||
if (newMappersBuilder == null) {
|
||||
newMappersBuilder = ImmutableOpenMap.builder(mappers);
|
||||
}
|
||||
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
|
||||
if (mergeWithMapper instanceof FieldMapper) {
|
||||
if (newFieldMappers == null) {
|
||||
newFieldMappers = new ArrayList<>(2);
|
||||
}
|
||||
newFieldMappers.add(mergeWithMapper);
|
||||
}
|
||||
// we disable the all in multi-field mappers
|
||||
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
|
||||
}
|
||||
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
|
||||
} else {
|
||||
mergeIntoMapper.merge(mergeWithMapper, mergeResult);
|
||||
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
|
||||
newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
|
||||
}
|
||||
}
|
||||
|
||||
// first add all field mappers
|
||||
if (newFieldMappers != null) {
|
||||
mergeResult.addFieldMappers(newFieldMappers);
|
||||
}
|
||||
// now publish mappers
|
||||
if (newMappersBuilder != null) {
|
||||
mappers = newMappersBuilder.build();
|
||||
}
|
||||
ImmutableOpenMap<String, FieldMapper> mappers = newMappersBuilder.build();
|
||||
return new MultiFields(pathType, mappers);
|
||||
}
|
||||
|
||||
public Iterator<Mapper> iterator() {
|
||||
|
|
|
@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
|
||||
public abstract String name();
|
||||
|
||||
public abstract void merge(Mapper mergeWith, MergeResult mergeResult);
|
||||
/** Return the merge of {@code mergeWith} into this.
|
||||
* Both {@code this} and {@code mergeWith} will be left unmodified. */
|
||||
public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock());
|
||||
|
||||
private volatile FieldTypeLookup fieldTypes;
|
||||
private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of();
|
||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
|
||||
private boolean hasNested = false; // updated dynamically to true when a nested object is added
|
||||
|
||||
private final DocumentMapperParser documentParser;
|
||||
|
@ -251,14 +250,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
|
||||
if (oldMapper != null) {
|
||||
// simulate first
|
||||
MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
|
||||
if (result.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
|
||||
}
|
||||
// then apply for real
|
||||
result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
assert result.hasConflicts() == false; // we already simulated
|
||||
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
return oldMapper;
|
||||
} else {
|
||||
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
|
||||
|
@ -300,19 +292,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return true;
|
||||
}
|
||||
|
||||
private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
final Set<String> objectFullNames = new HashSet<>();
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
final String fullPath = objectMapper.fullPath();
|
||||
if (objectFullNames.add(fullPath) == false) {
|
||||
throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) {
|
||||
// Before 3.0 some metadata mappers are also registered under the root object mapper
|
||||
// So we avoid false positives by deduplicating mappers
|
||||
// given that we check exact equality, this would still catch the case that a mapper
|
||||
// is defined under the root object
|
||||
Collection<FieldMapper> uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
uniqueFieldMappers.addAll(fieldMappers);
|
||||
fieldMappers = uniqueFieldMappers;
|
||||
}
|
||||
|
||||
final Set<String> fieldNames = new HashSet<>();
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
final String name = fieldMapper.name();
|
||||
if (objectFullNames.contains(name)) {
|
||||
throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]");
|
||||
} else if (fieldNames.add(name) == false) {
|
||||
throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
|
||||
checkFieldUniqueness(type, objectMappers, fieldMappers);
|
||||
|
||||
for (ObjectMapper newObjectMapper : objectMappers) {
|
||||
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
|
||||
if (existingObjectMapper != null) {
|
||||
MergeResult result = new MergeResult(true, updateAllTypes);
|
||||
existingObjectMapper.merge(newObjectMapper, result);
|
||||
if (result.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" +
|
||||
Arrays.toString(result.buildConflicts()));
|
||||
}
|
||||
// simulate a merge and ignore the result, we are just interested
|
||||
// in exceptions here
|
||||
existingObjectMapper.merge(newObjectMapper, updateAllTypes);
|
||||
}
|
||||
}
|
||||
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
if (fullPathObjectMappers.containsKey(fieldMapper.name())) {
|
||||
throw new IllegalArgumentException("Field [{}] is defined as a field in mapping [" + fieldMapper.name() + "] but this name is already used for an object in other types");
|
||||
}
|
||||
}
|
||||
|
||||
fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes);
|
||||
}
|
||||
|
||||
|
@ -320,9 +349,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
String type, Mapping mapping, boolean updateAllTypes) {
|
||||
List<ObjectMapper> objectMappers = new ArrayList<>();
|
||||
List<FieldMapper> fieldMappers = new ArrayList<>();
|
||||
for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) {
|
||||
fieldMappers.add(metadataMapper);
|
||||
}
|
||||
Collections.addAll(fieldMappers, mapping.metadataMappers);
|
||||
MapperUtils.collect(mapping.root, objectMappers, fieldMappers);
|
||||
checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes);
|
||||
return new Tuple<>(objectMappers, fieldMappers);
|
||||
|
@ -330,14 +357,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
|
||||
Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
|
||||
if (objectMapper.nested().isNested()) {
|
||||
hasNested = true;
|
||||
}
|
||||
}
|
||||
this.fullPathObjectMappers = fullPathObjectMappers.build();
|
||||
this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
|
||||
this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,52 +27,6 @@ import java.util.Collection;
|
|||
public enum MapperUtils {
|
||||
;
|
||||
|
||||
private static MergeResult newStrictMergeResult() {
|
||||
return new MergeResult(false, false) {
|
||||
|
||||
@Override
|
||||
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<FieldMapper> getNewFieldMappers() {
|
||||
throw new UnsupportedOperationException("Strict merge result does not support new field mappers");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ObjectMapper> getNewObjectMappers() {
|
||||
throw new UnsupportedOperationException("Strict merge result does not support new object mappers");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addConflict(String mergeFailure) {
|
||||
throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
|
||||
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
|
||||
*/
|
||||
public static void merge(Mapper mergeInto, Mapper mergeWith) {
|
||||
mergeInto.merge(mergeWith, newStrictMergeResult());
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
|
||||
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
|
||||
*/
|
||||
public static void merge(Mapping mergeInto, Mapping mergeWith) {
|
||||
mergeInto.merge(mergeWith, newStrictMergeResult());
|
||||
}
|
||||
|
||||
/** Split mapper and its descendants into object and field mappers. */
|
||||
public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
if (mapper instanceof RootObjectMapper) {
|
||||
|
|
|
@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap;
|
|||
*/
|
||||
public final class Mapping implements ToXContent {
|
||||
|
||||
public static final List<String> LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl");
|
||||
// Set of fields that were included into the root object mapper before 2.0
|
||||
public static final Set<String> LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>(
|
||||
Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl")));
|
||||
|
||||
final Version indexCreated;
|
||||
final RootObjectMapper root;
|
||||
final MetadataFieldMapper[] metadataMappers;
|
||||
final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap;
|
||||
volatile Map<String, Object> meta;
|
||||
final Map<String, Object> meta;
|
||||
|
||||
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
|
||||
this.indexCreated = indexCreated;
|
||||
this.root = rootObjectMapper;
|
||||
this.metadataMappers = metadataMappers;
|
||||
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>();
|
||||
for (MetadataFieldMapper metadataMapper : metadataMappers) {
|
||||
if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) {
|
||||
root.putMapper(metadataMapper);
|
||||
rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper);
|
||||
}
|
||||
metadataMappersMap.put(metadataMapper.getClass(), metadataMapper);
|
||||
}
|
||||
this.root = rootObjectMapper;
|
||||
// keep root mappers sorted for consistent serialization
|
||||
Arrays.sort(metadataMappers, new Comparator<Mapper>() {
|
||||
@Override
|
||||
|
@ -90,21 +94,20 @@ public final class Mapping implements ToXContent {
|
|||
}
|
||||
|
||||
/** @see DocumentMapper#merge(Mapping, boolean, boolean) */
|
||||
public void merge(Mapping mergeWith, MergeResult mergeResult) {
|
||||
assert metadataMappers.length == mergeWith.metadataMappers.length;
|
||||
|
||||
root.merge(mergeWith.root, mergeResult);
|
||||
for (MetadataFieldMapper metadataMapper : metadataMappers) {
|
||||
MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass());
|
||||
if (mergeWithMetadataMapper != null) {
|
||||
metadataMapper.merge(mergeWithMetadataMapper, mergeResult);
|
||||
public Mapping merge(Mapping mergeWith, boolean updateAllTypes) {
|
||||
RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes);
|
||||
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
|
||||
for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
|
||||
MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
|
||||
MetadataFieldMapper merged;
|
||||
if (mergeInto == null) {
|
||||
merged = metaMergeWith;
|
||||
} else {
|
||||
merged = mergeInto.merge(metaMergeWith, updateAllTypes);
|
||||
}
|
||||
mergedMetaDataMappers.put(merged.getClass(), merged);
|
||||
}
|
||||
|
||||
if (mergeResult.simulate() == false) {
|
||||
// let the merge with attributes to override the attributes
|
||||
meta = mergeWith.meta;
|
||||
}
|
||||
return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/** A container for tracking results of a mapping merge. */
|
||||
public class MergeResult {
|
||||
|
||||
private final boolean simulate;
|
||||
private final boolean updateAllTypes;
|
||||
|
||||
private final List<String> conflicts = new ArrayList<>();
|
||||
private final List<FieldMapper> newFieldMappers = new ArrayList<>();
|
||||
private final List<ObjectMapper> newObjectMappers = new ArrayList<>();
|
||||
|
||||
public MergeResult(boolean simulate, boolean updateAllTypes) {
|
||||
this.simulate = simulate;
|
||||
this.updateAllTypes = updateAllTypes;
|
||||
}
|
||||
|
||||
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
|
||||
assert simulate() == false;
|
||||
newFieldMappers.addAll(fieldMappers);
|
||||
}
|
||||
|
||||
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
|
||||
assert simulate() == false;
|
||||
newObjectMappers.addAll(objectMappers);
|
||||
}
|
||||
|
||||
public Collection<FieldMapper> getNewFieldMappers() {
|
||||
return newFieldMappers;
|
||||
}
|
||||
|
||||
public Collection<ObjectMapper> getNewObjectMappers() {
|
||||
return newObjectMappers;
|
||||
}
|
||||
|
||||
public boolean simulate() {
|
||||
return simulate;
|
||||
}
|
||||
|
||||
public boolean updateAllTypes() {
|
||||
return updateAllTypes;
|
||||
}
|
||||
|
||||
public void addConflict(String mergeFailure) {
|
||||
conflicts.add(mergeFailure);
|
||||
}
|
||||
|
||||
public boolean hasConflicts() {
|
||||
return conflicts.isEmpty() == false;
|
||||
}
|
||||
|
||||
public String[] buildConflicts() {
|
||||
return conflicts.toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
|
@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
|||
*/
|
||||
public abstract void postParse(ParseContext context) throws IOException;
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -616,7 +616,7 @@ public abstract class ParseContext {
|
|||
if (dynamicMappingsUpdate == null) {
|
||||
dynamicMappingsUpdate = mapper;
|
||||
} else {
|
||||
MapperUtils.merge(dynamicMappingsUpdate, mapper);
|
||||
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ public class ParsedDocument {
|
|||
if (dynamicMappingsUpdate == null) {
|
||||
dynamicMappingsUpdate = update;
|
||||
} else {
|
||||
MapperUtils.merge(dynamicMappingsUpdate, update);
|
||||
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
|
||||
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
this.maxInputLength = fieldMergeWith.maxInputLength;
|
||||
}
|
||||
this.maxInputLength = fieldMergeWith.maxInputLength;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||
fieldType.setNullValue(nullValue);
|
||||
DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
|
||||
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (DateFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||
IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (LongFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
}
|
||||
|
||||
@Override
|
||||
public void includeInAll(Boolean includeInAll) {
|
||||
protected NumberFieldMapper clone() {
|
||||
return (NumberFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
this.includeInAll = includeInAll;
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void includeInAllIfNotSet(Boolean includeInAll) {
|
||||
public Mapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
this.includeInAll = includeInAll;
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetIncludeInAll() {
|
||||
includeInAll = null;
|
||||
public Mapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith;
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
this.includeInAll = nfmMergeWith.includeInAll;
|
||||
if (nfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (nfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = nfmMergeWith.coerce;
|
||||
}
|
||||
this.includeInAll = nfmMergeWith.includeInAll;
|
||||
if (nfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (nfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = nfmMergeWith.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||
ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
|
||||
|
@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
StringFieldMapper fieldMapper = new StringFieldMapper(
|
||||
name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove,
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
|
||||
@Override
|
||||
public void includeInAll(Boolean includeInAll) {
|
||||
protected StringFieldMapper clone() {
|
||||
return (StringFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public StringFieldMapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
this.includeInAll = includeInAll;
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void includeInAllIfNotSet(Boolean includeInAll) {
|
||||
public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
this.includeInAll = includeInAll;
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetIncludeInAll() {
|
||||
includeInAll = null;
|
||||
public StringFieldMapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
if (!mergeResult.simulate()) {
|
||||
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
|
||||
|
||||
|
@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
|||
TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context), context.indexSettings(),
|
||||
analyzer, multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
if (!mergeResult.simulate()) {
|
||||
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
|
@ -388,17 +387,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith;
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gpfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (gpfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.index.mapper.ContentPath;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
||||
|
@ -297,23 +296,18 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
|
||||
GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith;
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) {
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
|
||||
}
|
||||
}
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = gpfmMergeWith.coerce;
|
||||
}
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = gpfmMergeWith.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -193,7 +192,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
} else if (Names.COERCE.equals(fieldName)) {
|
||||
builder.coerce(nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) {
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)
|
||||
&& builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) {
|
||||
builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
}
|
||||
|
@ -284,6 +284,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName());
|
||||
termStrategy.setDistErrPct(distanceErrorPct());
|
||||
defaultStrategy = resolveStrategy(strategyName);
|
||||
defaultStrategy.setPointsOnly(pointsOnly);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -347,6 +348,9 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
public void setStrategyName(String strategyName) {
|
||||
checkIfFrozen();
|
||||
this.strategyName = strategyName;
|
||||
if (this.strategyName.equals(SpatialStrategy.TERM)) {
|
||||
this.pointsOnly = true;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean pointsOnly() {
|
||||
|
@ -406,7 +410,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
|
||||
public PrefixTreeStrategy resolveStrategy(String strategyName) {
|
||||
if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) {
|
||||
recursiveStrategy.setPointsOnly(pointsOnly());
|
||||
return recursiveStrategy;
|
||||
}
|
||||
if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) {
|
||||
|
@ -446,7 +449,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
}
|
||||
shape = shapeBuilder.build();
|
||||
}
|
||||
if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) {
|
||||
if (fieldType().pointsOnly() && !(shape instanceof Point)) {
|
||||
throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " +
|
||||
((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
|
||||
}
|
||||
|
@ -471,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
|
||||
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gsfm.coerce.explicit()) {
|
||||
this.coerce = gsfm.coerce;
|
||||
}
|
||||
if (gsfm.coerce.explicit()) {
|
||||
this.coerce = gsfm.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public interface IncludeInAll {
|
||||
|
||||
void includeInAll(Boolean includeInAll);
|
||||
/**
|
||||
* If {@code includeInAll} is not null then return a copy of this mapper
|
||||
* that will include values in the _all field according to {@code includeInAll}.
|
||||
*/
|
||||
Mapper includeInAll(Boolean includeInAll);
|
||||
|
||||
void includeInAllIfNotSet(Boolean includeInAll);
|
||||
/**
|
||||
* If {@code includeInAll} is not null and not set on this mapper yet, then
|
||||
* return a copy of this mapper that will include values in the _all field
|
||||
* according to {@code includeInAll}.
|
||||
*/
|
||||
Mapper includeInAllIfNotSet(Boolean includeInAll);
|
||||
|
||||
void unsetIncludeInAll();
|
||||
/**
|
||||
* If {@code includeInAll} was already set on this mapper then return a copy
|
||||
* of this mapper that has {@code includeInAll} not set.
|
||||
*/
|
||||
Mapper unsetIncludeInAll();
|
||||
}
|
||||
|
||||
public static final String NAME = "_all";
|
||||
|
@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
|
||||
}
|
||||
super.merge(mergeWith, mergeResult);
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = indexFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = indexFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
|
||||
if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
|
||||
mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
|
||||
throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
|
||||
}
|
||||
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
|
@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here
|
||||
if (childJoinFieldType != null) {
|
||||
// TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type.
|
||||
childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false);
|
||||
childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false);
|
||||
}
|
||||
for (String conflict : conflicts) {
|
||||
mergeResult.addConflict(conflict);
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Merge conflicts: " + conflicts);
|
||||
}
|
||||
|
||||
if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (active()) {
|
||||
childJoinFieldType = fieldMergeWith.childJoinFieldType.clone();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
|
@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// nothing to do
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,13 +29,10 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -44,21 +41,17 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -72,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
public static class Defaults {
|
||||
public static final String NAME = SourceFieldMapper.NAME;
|
||||
public static final boolean ENABLED = true;
|
||||
public static final long COMPRESS_THRESHOLD = -1;
|
||||
public static final String FORMAT = null; // default format is to use the one provided
|
||||
|
||||
public static final MappedFieldType FIELD_TYPE = new SourceFieldType();
|
||||
|
||||
|
@ -93,12 +84,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
private boolean enabled = Defaults.ENABLED;
|
||||
|
||||
private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
|
||||
|
||||
private Boolean compress = null;
|
||||
|
||||
private String format = Defaults.FORMAT;
|
||||
|
||||
private String[] includes = null;
|
||||
private String[] excludes = null;
|
||||
|
||||
|
@ -111,21 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder compress(boolean compress) {
|
||||
this.compress = compress;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder compressThreshold(long compressThreshold) {
|
||||
this.compressThreshold = compressThreshold;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder format(String format) {
|
||||
this.format = format;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder includes(String[] includes) {
|
||||
this.includes = includes;
|
||||
return this;
|
||||
|
@ -138,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public SourceFieldMapper build(BuilderContext context) {
|
||||
return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings());
|
||||
return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,24 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
if (fieldNode != null) {
|
||||
builder.compress(nodeBooleanValue(fieldNode));
|
||||
}
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
if (fieldNode != null) {
|
||||
if (fieldNode instanceof Number) {
|
||||
builder.compressThreshold(((Number) fieldNode).longValue());
|
||||
builder.compress(true);
|
||||
} else {
|
||||
builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes());
|
||||
builder.compress(true);
|
||||
}
|
||||
}
|
||||
iterator.remove();
|
||||
} else if ("format".equals(fieldName)) {
|
||||
builder.format(nodeStringValue(fieldNode, null));
|
||||
} else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) {
|
||||
// ignore on old indices, reject on and after 3.0
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("includes")) {
|
||||
List<Object> values = (List<Object>) fieldNode;
|
||||
|
@ -242,30 +196,18 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
/** indicates whether the source will always exist and be complete, for use by features like the update API */
|
||||
private final boolean complete;
|
||||
|
||||
private Boolean compress;
|
||||
private long compressThreshold;
|
||||
|
||||
private final String[] includes;
|
||||
private final String[] excludes;
|
||||
|
||||
private String format;
|
||||
|
||||
private XContentType formatContentType;
|
||||
|
||||
private SourceFieldMapper(Settings indexSettings) {
|
||||
this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings);
|
||||
this(Defaults.ENABLED, null, null, indexSettings);
|
||||
}
|
||||
|
||||
private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold,
|
||||
String[] includes, String[] excludes, Settings indexSettings) {
|
||||
private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) {
|
||||
super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored.
|
||||
this.enabled = enabled;
|
||||
this.compress = compress;
|
||||
this.compressThreshold = compressThreshold;
|
||||
this.includes = includes;
|
||||
this.excludes = excludes;
|
||||
this.format = format;
|
||||
this.formatContentType = format == null ? null : XContentType.fromRestContentType(format);
|
||||
this.complete = enabled && includes == null && excludes == null;
|
||||
}
|
||||
|
||||
|
@ -321,71 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
|
||||
Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput streamOutput = bStream;
|
||||
if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) {
|
||||
streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
}
|
||||
XContentType contentType = formatContentType;
|
||||
if (contentType == null) {
|
||||
contentType = mapTuple.v1();
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource);
|
||||
XContentType contentType = mapTuple.v1();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource);
|
||||
builder.close();
|
||||
|
||||
source = bStream.bytes();
|
||||
} else if (compress != null && compress && !CompressorFactory.isCompressed(source)) {
|
||||
if (compressThreshold == -1 || source.length() > compressThreshold) {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (formatContentType != null && formatContentType != contentType) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream));
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
|
||||
builder.close();
|
||||
} else {
|
||||
StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
source.writeTo(streamOutput);
|
||||
streamOutput.close();
|
||||
}
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so it can be compressed and stored compressed outside...
|
||||
context.source(source);
|
||||
}
|
||||
} else if (formatContentType != null) {
|
||||
// see if we need to convert the content type
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
if (contentType != formatContentType) {
|
||||
// we need to reread and store back, compressed....
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput);
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput));
|
||||
builder.close();
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so we store it in the translog in this format
|
||||
context.source(source);
|
||||
} else {
|
||||
compressedStreamInput.close();
|
||||
}
|
||||
} else {
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (contentType != formatContentType) {
|
||||
// we need to reread and store back
|
||||
// we need to reread and store back, compressed....
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream);
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
|
||||
builder.close();
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so we store it in the translog in this format
|
||||
context.source(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!source.hasArray()) {
|
||||
source = source.toBytesArray();
|
||||
|
@ -403,26 +285,13 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
|
||||
|
||||
// all are defaults, no need to write it at all
|
||||
if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) {
|
||||
if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) {
|
||||
return builder;
|
||||
}
|
||||
builder.startObject(contentType());
|
||||
if (includeDefaults || enabled != Defaults.ENABLED) {
|
||||
builder.field("enabled", enabled);
|
||||
}
|
||||
if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) {
|
||||
builder.field("format", format);
|
||||
}
|
||||
if (compress != null) {
|
||||
builder.field("compress", compress);
|
||||
} else if (includeDefaults) {
|
||||
builder.field("compress", false);
|
||||
}
|
||||
if (compressThreshold != -1) {
|
||||
builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
|
||||
} else if (includeDefaults) {
|
||||
builder.field("compress_threshold", -1);
|
||||
}
|
||||
|
||||
if (includes != null) {
|
||||
builder.field("includes", includes);
|
||||
|
@ -441,25 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
|
||||
if (mergeResult.simulate()) {
|
||||
if (this.enabled != sourceMergeWith.enabled) {
|
||||
mergeResult.addConflict("Cannot update enabled setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
|
||||
mergeResult.addConflict("Cannot update includes setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
|
||||
mergeResult.addConflict("Cannot update excludes setting for [_source]");
|
||||
}
|
||||
} else {
|
||||
if (sourceMergeWith.compress != null) {
|
||||
this.compress = sourceMergeWith.compress;
|
||||
}
|
||||
if (sourceMergeWith.compressThreshold != -1) {
|
||||
this.compressThreshold = sourceMergeWith.compressThreshold;
|
||||
}
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
if (this.enabled != sourceMergeWith.enabled) {
|
||||
conflicts.add("Cannot update enabled setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
|
||||
conflicts.add("Cannot update includes setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
|
||||
conflicts.add("Cannot update excludes setting for [_source]");
|
||||
}
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
|
@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
|
||||
if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
|
||||
if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) {
|
||||
mergeResult.addConflict("_ttl cannot be disabled once it was enabled.");
|
||||
if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
|
||||
if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) {
|
||||
throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled.");
|
||||
} else {
|
||||
if (!mergeResult.simulate()) {
|
||||
this.enabledState = ttlMergeWith.enabledState;
|
||||
}
|
||||
this.enabledState = ttlMergeWith.enabledState;
|
||||
}
|
||||
}
|
||||
if (ttlMergeWith.defaultTTL != -1) {
|
||||
// we never build the default when the field is disabled so we should also not set it
|
||||
// (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster)
|
||||
if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) {
|
||||
if (enabledState == EnabledAttributeMapper.ENABLED) {
|
||||
this.defaultTTL = ttlMergeWith.defaultTTL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!mergeResult.simulate()) {
|
||||
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = timestampFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
} else {
|
||||
if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
|
||||
return;
|
||||
}
|
||||
if (defaultTimestamp == null) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
} else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
|
||||
} else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
}
|
||||
if (this.path != null) {
|
||||
if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
|
||||
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
|
||||
}
|
||||
} else if (timestampFieldMapperMergeWith.path() != null) {
|
||||
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = timestampFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
|
||||
return;
|
||||
}
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
if (defaultTimestamp == null) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
} else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
|
||||
} else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
}
|
||||
if (this.path != null) {
|
||||
if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
|
||||
conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
|
||||
}
|
||||
} else if (timestampFieldMapperMergeWith.path() != null) {
|
||||
conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
|
||||
}
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Conflicts: " + conflicts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
|
@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
|
@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// nothing to do
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (IpFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -160,7 +160,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
context.path().remove();
|
||||
|
||||
ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings());
|
||||
objectMapper.includeInAllIfNotSet(includeInAll);
|
||||
objectMapper = objectMapper.includeInAllIfNotSet(includeInAll);
|
||||
|
||||
return (Y) objectMapper;
|
||||
}
|
||||
|
@ -389,41 +389,53 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
}
|
||||
|
||||
@Override
|
||||
public void includeInAll(Boolean includeInAll) {
|
||||
public ObjectMapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll == null) {
|
||||
return;
|
||||
return this;
|
||||
}
|
||||
this.includeInAll = includeInAll;
|
||||
|
||||
ObjectMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
// when called from outside, apply this on all the inner mappers
|
||||
for (Mapper mapper : mappers.values()) {
|
||||
for (Mapper mapper : clone.mappers.values()) {
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll);
|
||||
clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll));
|
||||
}
|
||||
}
|
||||
return clone;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (this.includeInAll == null) {
|
||||
this.includeInAll = includeInAll;
|
||||
public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll == null || this.includeInAll != null) {
|
||||
return this;
|
||||
}
|
||||
|
||||
ObjectMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
// when called from outside, apply this on all the inner mappers
|
||||
for (Mapper mapper : mappers.values()) {
|
||||
for (Mapper mapper : clone.mappers.values()) {
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
|
||||
clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll));
|
||||
}
|
||||
}
|
||||
return clone;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetIncludeInAll() {
|
||||
includeInAll = null;
|
||||
public ObjectMapper unsetIncludeInAll() {
|
||||
if (includeInAll == null) {
|
||||
return this;
|
||||
}
|
||||
ObjectMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
// when called from outside, apply this on all the inner mappers
|
||||
for (Mapper mapper : mappers.values()) {
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
|
||||
clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll());
|
||||
}
|
||||
}
|
||||
return clone;
|
||||
}
|
||||
|
||||
public Nested nested() {
|
||||
|
@ -434,14 +446,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
return this.nestedTypeFilter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Put a new mapper.
|
||||
* NOTE: this method must be called under the current {@link DocumentMapper}
|
||||
* lock if concurrent updates are expected.
|
||||
*/
|
||||
public void putMapper(Mapper mapper) {
|
||||
protected void putMapper(Mapper mapper) {
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
|
||||
mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
|
||||
}
|
||||
mappers = mappers.copyAndPut(mapper.simpleName(), mapper);
|
||||
}
|
||||
|
@ -464,64 +471,43 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(final Mapper mergeWith, final MergeResult mergeResult) {
|
||||
public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
if (!(mergeWith instanceof ObjectMapper)) {
|
||||
mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
|
||||
return;
|
||||
throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
|
||||
}
|
||||
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
|
||||
|
||||
if (nested().isNested()) {
|
||||
if (!mergeWithObject.nested().isNested()) {
|
||||
mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (mergeWithObject.nested().isNested()) {
|
||||
mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mergeResult.simulate()) {
|
||||
if (mergeWithObject.dynamic != null) {
|
||||
this.dynamic = mergeWithObject.dynamic;
|
||||
}
|
||||
}
|
||||
|
||||
doMerge(mergeWithObject, mergeResult);
|
||||
|
||||
List<Mapper> mappersToPut = new ArrayList<>();
|
||||
List<ObjectMapper> newObjectMappers = new ArrayList<>();
|
||||
List<FieldMapper> newFieldMappers = new ArrayList<>();
|
||||
for (Mapper mapper : mergeWithObject) {
|
||||
Mapper mergeWithMapper = mapper;
|
||||
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
if (mergeIntoMapper == null) {
|
||||
// no mapping, simply add it if not simulating
|
||||
if (!mergeResult.simulate()) {
|
||||
mappersToPut.add(mergeWithMapper);
|
||||
MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers);
|
||||
}
|
||||
} else if (mergeIntoMapper instanceof MetadataFieldMapper == false) {
|
||||
// root mappers can only exist here for backcompat, and are merged in Mapping
|
||||
mergeIntoMapper.merge(mergeWithMapper, mergeResult);
|
||||
}
|
||||
}
|
||||
if (!newFieldMappers.isEmpty()) {
|
||||
mergeResult.addFieldMappers(newFieldMappers);
|
||||
}
|
||||
if (!newObjectMappers.isEmpty()) {
|
||||
mergeResult.addObjectMappers(newObjectMappers);
|
||||
}
|
||||
// add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock)
|
||||
for (Mapper mapper : mappersToPut) {
|
||||
putMapper(mapper);
|
||||
}
|
||||
ObjectMapper merged = clone();
|
||||
merged.doMerge(mergeWithObject, updateAllTypes);
|
||||
return merged;
|
||||
}
|
||||
|
||||
protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) {
|
||||
if (nested().isNested()) {
|
||||
if (!mergeWith.nested().isNested()) {
|
||||
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested");
|
||||
}
|
||||
} else {
|
||||
if (mergeWith.nested().isNested()) {
|
||||
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested");
|
||||
}
|
||||
}
|
||||
|
||||
if (mergeWith.dynamic != null) {
|
||||
this.dynamic = mergeWith.dynamic;
|
||||
}
|
||||
|
||||
for (Mapper mergeWithMapper : mergeWith) {
|
||||
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
Mapper merged;
|
||||
if (mergeIntoMapper == null) {
|
||||
// no mapping, simply add it
|
||||
merged = mergeWithMapper;
|
||||
} else {
|
||||
// root mappers can only exist here for backcompat, and are merged in Mapping
|
||||
merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes);
|
||||
}
|
||||
putMapper(merged);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -205,6 +205,14 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
this.numericDetection = numericDetection;
|
||||
}
|
||||
|
||||
/** Return a copy of this mapper that has the given {@code mapper} as a
|
||||
* sub mapper. */
|
||||
public RootObjectMapper copyAndPutMapper(Mapper mapper) {
|
||||
RootObjectMapper clone = (RootObjectMapper) clone();
|
||||
clone.putMapper(mapper);
|
||||
return clone;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectMapper mappingUpdate(Mapper mapper) {
|
||||
RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper);
|
||||
|
@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) {
|
||||
public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
return (RootObjectMapper) super.merge(mergeWith, updateAllTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
// merge them
|
||||
List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
|
||||
for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
|
||||
boolean replaced = false;
|
||||
for (int i = 0; i < mergedTemplates.size(); i++) {
|
||||
if (mergedTemplates.get(i).name().equals(template.name())) {
|
||||
mergedTemplates.set(i, template);
|
||||
replaced = true;
|
||||
}
|
||||
}
|
||||
if (!replaced) {
|
||||
mergedTemplates.add(template);
|
||||
// merge them
|
||||
List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
|
||||
for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
|
||||
boolean replaced = false;
|
||||
for (int i = 0; i < mergedTemplates.size(); i++) {
|
||||
if (mergedTemplates.get(i).name().equals(template.name())) {
|
||||
mergedTemplates.set(i, template);
|
||||
replaced = true;
|
||||
}
|
||||
}
|
||||
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
|
||||
if (!replaced) {
|
||||
mergedTemplates.add(template);
|
||||
}
|
||||
}
|
||||
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,234 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Constructs a filter that have only null values or no value in the original field.
|
||||
*/
|
||||
public class MissingQueryBuilder extends AbstractQueryBuilder<MissingQueryBuilder> {
|
||||
|
||||
public static final String NAME = "missing";
|
||||
|
||||
public static final boolean DEFAULT_NULL_VALUE = false;
|
||||
|
||||
public static final boolean DEFAULT_EXISTENCE_VALUE = true;
|
||||
|
||||
private final String fieldPattern;
|
||||
|
||||
private final boolean nullValue;
|
||||
|
||||
private final boolean existence;
|
||||
|
||||
static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
|
||||
|
||||
/**
|
||||
* Constructs a filter that returns documents with only null values or no value in the original field.
|
||||
* @param fieldPattern the field to query
|
||||
* @param nullValue should the missing filter automatically include fields with null value configured in the
|
||||
* mappings. Defaults to <tt>false</tt>.
|
||||
* @param existence should the missing filter include documents where the field doesn't exist in the docs.
|
||||
* Defaults to <tt>true</tt>.
|
||||
* @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
|
||||
*/
|
||||
public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) {
|
||||
if (Strings.isEmpty(fieldPattern)) {
|
||||
throw new IllegalArgumentException("missing query must be provided with a [field]");
|
||||
}
|
||||
if (nullValue == false && existence == false) {
|
||||
throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true");
|
||||
}
|
||||
this.fieldPattern = fieldPattern;
|
||||
this.nullValue = nullValue;
|
||||
this.existence = existence;
|
||||
}
|
||||
|
||||
public MissingQueryBuilder(String fieldPattern) {
|
||||
this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
|
||||
}
|
||||
|
||||
public String fieldPattern() {
|
||||
return this.fieldPattern;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the missing filter will include documents where the field contains a null value, otherwise
|
||||
* these documents will not be included.
|
||||
*/
|
||||
public boolean nullValue() {
|
||||
return this.nullValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the missing filter will include documents where the field has no values, otherwise
|
||||
* these documents will not be included.
|
||||
*/
|
||||
public boolean existence() {
|
||||
return this.existence;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(NAME);
|
||||
builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern);
|
||||
builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue);
|
||||
builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence);
|
||||
printBoostAndQueryName(builder);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
return newFilter(context, fieldPattern, existence, nullValue);
|
||||
}
|
||||
|
||||
public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) {
|
||||
if (!existence && !nullValue) {
|
||||
throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true");
|
||||
}
|
||||
|
||||
final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
if (fieldNamesFieldType == null) {
|
||||
// can only happen when no types exist, so no docs exist either
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
}
|
||||
|
||||
ObjectMapper objectMapper = context.getObjectMapper(fieldPattern);
|
||||
if (objectMapper != null) {
|
||||
// automatic make the object mapper pattern
|
||||
fieldPattern = fieldPattern + ".*";
|
||||
}
|
||||
|
||||
Collection<String> fields = context.simpleMatchToIndexNames(fieldPattern);
|
||||
if (fields.isEmpty()) {
|
||||
if (existence) {
|
||||
// if we ask for existence of fields, and we found none, then we should match on all
|
||||
return Queries.newMatchAllQuery();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
Query existenceFilter = null;
|
||||
Query nullFilter = null;
|
||||
|
||||
if (existence) {
|
||||
BooleanQuery.Builder boolFilter = new BooleanQuery.Builder();
|
||||
for (String field : fields) {
|
||||
MappedFieldType fieldType = context.fieldMapper(field);
|
||||
Query filter = null;
|
||||
if (fieldNamesFieldType.isEnabled()) {
|
||||
final String f;
|
||||
if (fieldType != null) {
|
||||
f = fieldType.names().indexName();
|
||||
} else {
|
||||
f = field;
|
||||
}
|
||||
filter = fieldNamesFieldType.termQuery(f, context);
|
||||
}
|
||||
// if _field_names are not indexed, we need to go the slow way
|
||||
if (filter == null && fieldType != null) {
|
||||
filter = fieldType.rangeQuery(null, null, true, true);
|
||||
}
|
||||
if (filter == null) {
|
||||
filter = new TermRangeQuery(field, null, null, true, true);
|
||||
}
|
||||
boolFilter.add(filter, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
|
||||
existenceFilter = boolFilter.build();
|
||||
existenceFilter = Queries.not(existenceFilter);;
|
||||
}
|
||||
|
||||
if (nullValue) {
|
||||
for (String field : fields) {
|
||||
MappedFieldType fieldType = context.fieldMapper(field);
|
||||
if (fieldType != null) {
|
||||
nullFilter = fieldType.nullValueQuery();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Query filter;
|
||||
if (nullFilter != null) {
|
||||
if (existenceFilter != null) {
|
||||
filter = new BooleanQuery.Builder()
|
||||
.add(existenceFilter, BooleanClause.Occur.SHOULD)
|
||||
.add(nullFilter, BooleanClause.Occur.SHOULD)
|
||||
.build();
|
||||
} else {
|
||||
filter = nullFilter;
|
||||
}
|
||||
} else {
|
||||
filter = existenceFilter;
|
||||
}
|
||||
|
||||
if (filter == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new ConstantScoreQuery(filter);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException {
|
||||
return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fieldPattern);
|
||||
out.writeBoolean(nullValue);
|
||||
out.writeBoolean(existence);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(fieldPattern, nullValue, existence);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(MissingQueryBuilder other) {
|
||||
return Objects.equals(fieldPattern, other.fieldPattern) &&
|
||||
Objects.equals(nullValue, other.nullValue) &&
|
||||
Objects.equals(existence, other.existence);
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Parser for missing query
|
||||
*/
|
||||
public class MissingQueryParser implements QueryParser<MissingQueryBuilder> {
|
||||
|
||||
public static final ParseField FIELD_FIELD = new ParseField("field");
|
||||
public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value");
|
||||
public static final ParseField EXISTENCE_FIELD = new ParseField("existence");
|
||||
|
||||
@Override
|
||||
public String[] names() {
|
||||
return new String[]{MissingQueryBuilder.NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
String fieldPattern = null;
|
||||
String queryName = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE;
|
||||
boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE;
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) {
|
||||
fieldPattern = parser.text();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) {
|
||||
nullValue = parser.booleanValue();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) {
|
||||
existence = parser.booleanValue();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (fieldPattern == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]");
|
||||
}
|
||||
return new MissingQueryBuilder(fieldPattern, nullValue, existence)
|
||||
.boost(boost)
|
||||
.queryName(queryName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MissingQueryBuilder getBuilderPrototype() {
|
||||
return MissingQueryBuilder.PROTOTYPE;
|
||||
}
|
||||
}
|
|
@ -810,27 +810,6 @@ public abstract class QueryBuilders {
|
|||
return new ExistsQueryBuilder(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter to filter only documents where a field does not exists in them.
|
||||
* @param name the field to query
|
||||
*/
|
||||
public static MissingQueryBuilder missingQuery(String name) {
|
||||
return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter to filter only documents where a field does not exists in them.
|
||||
* @param name the field to query
|
||||
* @param nullValue should the missing filter automatically include fields with null value configured in the
|
||||
* mappings. Defaults to <tt>false</tt>.
|
||||
* @param existence should the missing filter include documents where the field doesn't exist in the docs.
|
||||
* Defaults to <tt>true</tt>.
|
||||
* @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
|
||||
*/
|
||||
public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) {
|
||||
return new MissingQueryBuilder(name, nullValue, existence);
|
||||
}
|
||||
|
||||
private QueryBuilders() {
|
||||
|
||||
}
|
||||
|
|
|
@ -19,13 +19,11 @@
|
|||
|
||||
package org.elasticsearch.index.query.functionscore;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
|
||||
import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser;
|
||||
import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
|
||||
|
@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper {
|
|||
return functionParsers.get(parserName);
|
||||
}
|
||||
|
||||
private static void addParser(ScoreFunctionParser<?> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
private static void addParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
for (String name : scoreFunctionParser.getNames()) {
|
||||
map.put(name, scoreFunctionParser);
|
||||
|
||||
}
|
||||
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype());
|
||||
@SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = scoreFunctionParser.getBuilderPrototype();
|
||||
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer {
|
|||
if (currentUpdate == null) {
|
||||
recoveredTypes.put(type, update);
|
||||
} else {
|
||||
MapperUtils.merge(currentUpdate, update);
|
||||
currentUpdate = currentUpdate.merge(update, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,22 +48,27 @@ public final class BufferingTranslogWriter extends TranslogWriter {
|
|||
public Translog.Location add(BytesReference data) throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
operationCounter++;
|
||||
final long offset = totalOffset;
|
||||
if (data.length() >= buffer.length) {
|
||||
flush();
|
||||
// we use the channel to write, since on windows, writing to the RAF might not be reflected
|
||||
// when reading through the channel
|
||||
data.writeTo(channel);
|
||||
try {
|
||||
data.writeTo(channel);
|
||||
} catch (Throwable ex) {
|
||||
closeWithTragicEvent(ex);
|
||||
throw ex;
|
||||
}
|
||||
writtenOffset += data.length();
|
||||
totalOffset += data.length();
|
||||
return new Translog.Location(generation, offset, data.length());
|
||||
} else {
|
||||
if (data.length() > buffer.length - bufferCount) {
|
||||
flush();
|
||||
}
|
||||
data.writeTo(bufferOs);
|
||||
totalOffset += data.length();
|
||||
}
|
||||
if (data.length() > buffer.length - bufferCount) {
|
||||
flush();
|
||||
}
|
||||
data.writeTo(bufferOs);
|
||||
totalOffset += data.length();
|
||||
operationCounter++;
|
||||
return new Translog.Location(generation, offset, data.length());
|
||||
}
|
||||
}
|
||||
|
@ -71,10 +76,17 @@ public final class BufferingTranslogWriter extends TranslogWriter {
|
|||
protected final void flush() throws IOException {
|
||||
assert writeLock.isHeldByCurrentThread();
|
||||
if (bufferCount > 0) {
|
||||
ensureOpen();
|
||||
// we use the channel to write, since on windows, writing to the RAF might not be reflected
|
||||
// when reading through the channel
|
||||
Channels.writeToChannel(buffer, 0, bufferCount, channel);
|
||||
writtenOffset += bufferCount;
|
||||
final int bufferSize = bufferCount;
|
||||
try {
|
||||
Channels.writeToChannel(buffer, 0, bufferSize, channel);
|
||||
} catch (Throwable ex) {
|
||||
closeWithTragicEvent(ex);
|
||||
throw ex;
|
||||
}
|
||||
writtenOffset += bufferSize;
|
||||
bufferCount = 0;
|
||||
}
|
||||
}
|
||||
|
@ -102,20 +114,28 @@ public final class BufferingTranslogWriter extends TranslogWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void sync() throws IOException {
|
||||
if (!syncNeeded()) {
|
||||
return;
|
||||
}
|
||||
synchronized (this) {
|
||||
public synchronized void sync() throws IOException {
|
||||
if (syncNeeded()) {
|
||||
ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event
|
||||
channelReference.incRef();
|
||||
try {
|
||||
final long offsetToSync;
|
||||
final int opsCounter;
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
flush();
|
||||
lastSyncedOffset = totalOffset;
|
||||
offsetToSync = totalOffset;
|
||||
opsCounter = operationCounter;
|
||||
}
|
||||
// we can do this outside of the write lock but we have to protect from
|
||||
// concurrent syncs
|
||||
checkpoint(lastSyncedOffset, operationCounter, channelReference);
|
||||
ensureOpen(); // just for kicks - the checkpoint happens or not either way
|
||||
try {
|
||||
checkpoint(offsetToSync, opsCounter, channelReference);
|
||||
} catch (Throwable ex) {
|
||||
closeWithTragicEvent(ex);
|
||||
throw ex;
|
||||
}
|
||||
lastSyncedOffset = offsetToSync;
|
||||
} finally {
|
||||
channelReference.decRef();
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
private final Path location;
|
||||
private TranslogWriter current;
|
||||
private volatile ImmutableTranslogReader currentCommittingTranslog;
|
||||
private long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion.
|
||||
private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion.
|
||||
private final AtomicBoolean closed = new AtomicBoolean();
|
||||
private final TranslogConfig config;
|
||||
private final String translogUUID;
|
||||
|
@ -279,7 +279,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
}
|
||||
|
||||
boolean isOpen() {
|
||||
/** Returns {@code true} if this {@code Translog} is still open. */
|
||||
public boolean isOpen() {
|
||||
return closed.get() == false;
|
||||
}
|
||||
|
||||
|
@ -288,10 +289,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
if (closed.compareAndSet(false, true)) {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
try {
|
||||
IOUtils.close(current, currentCommittingTranslog);
|
||||
current.sync();
|
||||
} finally {
|
||||
IOUtils.close(recoveredTranslogs);
|
||||
recoveredTranslogs.clear();
|
||||
try {
|
||||
IOUtils.close(current, currentCommittingTranslog);
|
||||
} finally {
|
||||
IOUtils.close(recoveredTranslogs);
|
||||
recoveredTranslogs.clear();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
FutureUtils.cancel(syncScheduler);
|
||||
|
@ -354,7 +359,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
TranslogWriter createWriter(long fileGeneration) throws IOException {
|
||||
TranslogWriter newFile;
|
||||
try {
|
||||
newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize());
|
||||
newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize(), getChannelFactory());
|
||||
} catch (IOException e) {
|
||||
throw new TranslogException(shardId, "failed to create new translog file", e);
|
||||
}
|
||||
|
@ -393,7 +398,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
* @see Index
|
||||
* @see org.elasticsearch.index.translog.Translog.Delete
|
||||
*/
|
||||
public Location add(Operation operation) throws TranslogException {
|
||||
public Location add(Operation operation) throws IOException {
|
||||
final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays);
|
||||
try {
|
||||
final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
|
||||
|
@ -415,7 +420,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
assert current.assertBytesAtLocation(location, bytes);
|
||||
return location;
|
||||
}
|
||||
} catch (AlreadyClosedException ex) {
|
||||
} catch (AlreadyClosedException | IOException ex) {
|
||||
if (current.getTragicException() != null) {
|
||||
try {
|
||||
close();
|
||||
} catch (Exception inner) {
|
||||
ex.addSuppressed(inner);
|
||||
}
|
||||
}
|
||||
throw ex;
|
||||
} catch (Throwable e) {
|
||||
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
|
||||
|
@ -429,6 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
* Snapshots are fixed in time and will not be updated with future operations.
|
||||
*/
|
||||
public Snapshot newSnapshot() {
|
||||
ensureOpen();
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ArrayList<TranslogReader> toOpen = new ArrayList<>();
|
||||
toOpen.addAll(recoveredTranslogs);
|
||||
|
@ -493,6 +506,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
if (closed.get() == false) {
|
||||
current.sync();
|
||||
}
|
||||
} catch (AlreadyClosedException | IOException ex) {
|
||||
if (current.getTragicException() != null) {
|
||||
try {
|
||||
close();
|
||||
} catch (Exception inner) {
|
||||
ex.addSuppressed(inner);
|
||||
}
|
||||
}
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -520,6 +542,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
public boolean ensureSynced(Location location) throws IOException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
if (location.generation == current.generation) { // if we have a new one it's already synced
|
||||
ensureOpen();
|
||||
return current.syncUpTo(location.translogLocation + location.size);
|
||||
}
|
||||
}
|
||||
|
@ -548,31 +571,29 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
private final class OnCloseRunnable implements Callback<ChannelReference> {
|
||||
@Override
|
||||
public void handle(ChannelReference channelReference) {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
if (isReferencedGeneration(channelReference.getGeneration()) == false) {
|
||||
Path translogPath = channelReference.getPath();
|
||||
assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath;
|
||||
// if the given translogPath is not the current we can safely delete the file since all references are released
|
||||
logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
|
||||
IOUtils.deleteFilesIgnoringExceptions(translogPath);
|
||||
IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
|
||||
if (isReferencedGeneration(channelReference.getGeneration()) == false) {
|
||||
Path translogPath = channelReference.getPath();
|
||||
assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath;
|
||||
// if the given translogPath is not the current we can safely delete the file since all references are released
|
||||
logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
|
||||
IOUtils.deleteFilesIgnoringExceptions(translogPath);
|
||||
IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
|
||||
|
||||
}
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) {
|
||||
for (Path path : stream) {
|
||||
Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
|
||||
if (matcher.matches()) {
|
||||
long generation = Long.parseLong(matcher.group(1));
|
||||
if (isReferencedGeneration(generation) == false) {
|
||||
logger.trace("delete translog file - not referenced and not current anymore {}", path);
|
||||
IOUtils.deleteFilesIgnoringExceptions(path);
|
||||
IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
|
||||
}
|
||||
}
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) {
|
||||
for (Path path : stream) {
|
||||
Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
|
||||
if (matcher.matches()) {
|
||||
long generation = Long.parseLong(matcher.group(1));
|
||||
if (isReferencedGeneration(generation) == false) {
|
||||
logger.trace("delete translog file - not referenced and not current anymore {}", path);
|
||||
IOUtils.deleteFilesIgnoringExceptions(path);
|
||||
IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed to delete unreferenced translog files", e);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed to delete unreferenced translog files", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1318,6 +1339,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration());
|
||||
}
|
||||
final TranslogWriter oldCurrent = current;
|
||||
oldCurrent.ensureOpen();
|
||||
oldCurrent.sync();
|
||||
currentCommittingTranslog = current.immutableReader();
|
||||
Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME);
|
||||
|
@ -1413,7 +1435,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
|
||||
private void ensureOpen() {
|
||||
if (closed.get()) {
|
||||
throw new AlreadyClosedException("translog is already closed");
|
||||
throw new AlreadyClosedException("translog is already closed", current.getTragicException());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1424,4 +1446,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
return outstandingViews.size();
|
||||
}
|
||||
|
||||
TranslogWriter.ChannelFactory getChannelFactory() {
|
||||
return TranslogWriter.ChannelFactory.DEFAULT;
|
||||
}
|
||||
|
||||
/** If this {@code Translog} was closed as a side-effect of a tragic exception,
|
||||
* e.g. disk full while flushing a new segment, this returns the root cause exception.
|
||||
* Otherwise (no tragic exception has occurred) it returns null. */
|
||||
public Throwable getTragicException() {
|
||||
return current.getTragicException();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -140,16 +140,16 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe
|
|||
@Override
|
||||
public void close() throws IOException {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
doClose();
|
||||
channelReference.decRef();
|
||||
}
|
||||
}
|
||||
|
||||
protected void doClose() throws IOException {
|
||||
channelReference.decRef();
|
||||
protected final boolean isClosed() {
|
||||
return closed.get();
|
||||
}
|
||||
|
||||
protected void ensureOpen() {
|
||||
if (closed.get()) {
|
||||
if (isClosed()) {
|
||||
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.OutputStreamDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -54,6 +55,9 @@ public class TranslogWriter extends TranslogReader {
|
|||
protected volatile int operationCounter;
|
||||
/* the offset in bytes written to the file */
|
||||
protected volatile long writtenOffset;
|
||||
/* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */
|
||||
private volatile Throwable tragedy;
|
||||
|
||||
|
||||
public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference) throws IOException {
|
||||
super(generation, channelReference, channelReference.getChannel().position());
|
||||
|
@ -65,10 +69,10 @@ public class TranslogWriter extends TranslogReader {
|
|||
this.lastSyncedOffset = channelReference.getChannel().position();;
|
||||
}
|
||||
|
||||
public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize) throws IOException {
|
||||
public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize, ChannelFactory channelFactory) throws IOException {
|
||||
final BytesRef ref = new BytesRef(translogUUID);
|
||||
final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT;
|
||||
final FileChannel channel = FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
|
||||
final FileChannel channel = channelFactory.open(file);
|
||||
try {
|
||||
// This OutputStreamDataOutput is intentionally not closed because
|
||||
// closing it will close the FileChannel
|
||||
|
@ -90,6 +94,12 @@ public class TranslogWriter extends TranslogReader {
|
|||
throw throwable;
|
||||
}
|
||||
}
|
||||
/** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception,
|
||||
* e.g. disk full while flushing a new segment, this returns the root cause exception.
|
||||
* Otherwise (no tragic exception has occurred) it returns null. */
|
||||
public Throwable getTragicException() {
|
||||
return tragedy;
|
||||
}
|
||||
|
||||
public enum Type {
|
||||
|
||||
|
@ -118,6 +128,16 @@ public class TranslogWriter extends TranslogReader {
|
|||
}
|
||||
}
|
||||
|
||||
protected final void closeWithTragicEvent(Throwable throwable) throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
if (tragedy == null) {
|
||||
tragedy = throwable;
|
||||
} else {
|
||||
tragedy.addSuppressed(throwable);
|
||||
}
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* add the given bytes to the translog and return the location they were written at
|
||||
|
@ -127,9 +147,14 @@ public class TranslogWriter extends TranslogReader {
|
|||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
position = writtenOffset;
|
||||
data.writeTo(channel);
|
||||
try {
|
||||
data.writeTo(channel);
|
||||
} catch (Throwable e) {
|
||||
closeWithTragicEvent(e);
|
||||
throw e;
|
||||
}
|
||||
writtenOffset = writtenOffset + data.length();
|
||||
operationCounter = operationCounter + 1;
|
||||
operationCounter++;;
|
||||
}
|
||||
return new Translog.Location(generation, position, data.length());
|
||||
}
|
||||
|
@ -143,12 +168,13 @@ public class TranslogWriter extends TranslogReader {
|
|||
/**
|
||||
* write all buffered ops to disk and fsync file
|
||||
*/
|
||||
public void sync() throws IOException {
|
||||
public synchronized void sync() throws IOException { // synchronized to ensure only one sync happens a time
|
||||
// check if we really need to sync here...
|
||||
if (syncNeeded()) {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
checkpoint(writtenOffset, operationCounter, channelReference);
|
||||
lastSyncedOffset = writtenOffset;
|
||||
checkpoint(lastSyncedOffset, operationCounter, channelReference);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -262,15 +288,6 @@ public class TranslogWriter extends TranslogReader {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void doClose() throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
sync();
|
||||
} finally {
|
||||
super.doClose();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
|
@ -288,4 +305,20 @@ public class TranslogWriter extends TranslogReader {
|
|||
Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation);
|
||||
Checkpoint.write(checkpointFile, checkpoint, options);
|
||||
}
|
||||
|
||||
static class ChannelFactory {
|
||||
|
||||
static final ChannelFactory DEFAULT = new ChannelFactory();
|
||||
|
||||
// only for testing until we have a disk-full FileSystemt
|
||||
public FileChannel open(Path file) throws IOException {
|
||||
return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
|
||||
}
|
||||
}
|
||||
|
||||
protected final void ensureOpen() {
|
||||
if (isClosed()) {
|
||||
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,6 @@ public class IndicesModule extends AbstractModule {
|
|||
registerQueryParser(GeohashCellQuery.Parser.class);
|
||||
registerQueryParser(GeoPolygonQueryParser.class);
|
||||
registerQueryParser(ExistsQueryParser.class);
|
||||
registerQueryParser(MissingQueryParser.class);
|
||||
registerQueryParser(MatchNoneQueryParser.class);
|
||||
|
||||
if (ShapesAvailability.JTS_AVAILABLE) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.indices.query;
|
|||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.EmptyQueryBuilder;
|
||||
|
@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent {
|
|||
public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
super(settings);
|
||||
Map<String, QueryParser<?>> queryParsers = new HashMap<>();
|
||||
for (QueryParser<?> queryParser : injectedQueryParsers) {
|
||||
for (@SuppressWarnings("unchecked") QueryParser<? extends QueryBuilder> queryParser : injectedQueryParsers) {
|
||||
for (String name : queryParser.names()) {
|
||||
queryParsers.put(name, queryParser);
|
||||
}
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype());
|
||||
@SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = queryParser.getBuilderPrototype();
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb);
|
||||
}
|
||||
// EmptyQueryBuilder is not registered as query parser but used internally.
|
||||
// We need to register it with the NamedWriteableRegistry in order to serialize it
|
||||
|
@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent {
|
|||
public Map<String, QueryParser<?>> queryParsers() {
|
||||
return queryParsers;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,10 +40,6 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class RecoverySettings extends AbstractComponent implements Closeable {
|
||||
|
||||
public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size";
|
||||
public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops";
|
||||
public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size";
|
||||
public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress";
|
||||
public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
|
||||
public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
|
||||
public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
|
||||
|
@ -75,11 +71,7 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
|
||||
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes();
|
||||
|
||||
private volatile ByteSizeValue fileChunkSize;
|
||||
|
||||
private volatile boolean compress;
|
||||
private volatile int translogOps;
|
||||
private volatile ByteSizeValue translogSize;
|
||||
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
|
||||
|
||||
private volatile int concurrentStreams;
|
||||
private volatile int concurrentSmallFileStreams;
|
||||
|
@ -94,16 +86,12 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
private volatile TimeValue internalActionTimeout;
|
||||
private volatile TimeValue internalActionLongTimeout;
|
||||
|
||||
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;
|
||||
|
||||
@Inject
|
||||
public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
|
||||
super(settings);
|
||||
|
||||
this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
|
||||
this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, 1000);
|
||||
this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
|
||||
this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true);
|
||||
|
||||
this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500));
|
||||
// doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes)
|
||||
// and we want to give the master time to remove a faulty node
|
||||
|
@ -132,8 +120,8 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
|
||||
}
|
||||
|
||||
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]",
|
||||
maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress);
|
||||
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]",
|
||||
maxBytesPerSec, concurrentStreams);
|
||||
|
||||
nodeSettingsService.addListener(new ApplySettings());
|
||||
}
|
||||
|
@ -144,26 +132,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
ThreadPool.terminate(concurrentSmallFileStreamPool, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public ByteSizeValue fileChunkSize() {
|
||||
return fileChunkSize;
|
||||
}
|
||||
|
||||
public boolean compress() {
|
||||
return compress;
|
||||
}
|
||||
|
||||
public int translogOps() {
|
||||
return translogOps;
|
||||
}
|
||||
|
||||
public ByteSizeValue translogSize() {
|
||||
return translogSize;
|
||||
}
|
||||
|
||||
public int concurrentStreams() {
|
||||
return concurrentStreams;
|
||||
}
|
||||
|
||||
public ThreadPoolExecutor concurrentStreamPool() {
|
||||
return concurrentStreamPool;
|
||||
}
|
||||
|
@ -196,6 +164,15 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
return internalActionLongTimeout;
|
||||
}
|
||||
|
||||
public ByteSizeValue getChunkSize() { return chunkSize; }
|
||||
|
||||
void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests
|
||||
if (chunkSize.bytesAsInt() <= 0) {
|
||||
throw new IllegalArgumentException("chunkSize must be > 0");
|
||||
}
|
||||
this.chunkSize = chunkSize;
|
||||
}
|
||||
|
||||
|
||||
class ApplySettings implements NodeSettingsService.Listener {
|
||||
@Override
|
||||
|
@ -213,30 +190,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize);
|
||||
if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) {
|
||||
logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize);
|
||||
RecoverySettings.this.fileChunkSize = fileChunkSize;
|
||||
}
|
||||
|
||||
int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps);
|
||||
if (translogOps != RecoverySettings.this.translogOps) {
|
||||
logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps);
|
||||
RecoverySettings.this.translogOps = translogOps;
|
||||
}
|
||||
|
||||
ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize);
|
||||
if (!translogSize.equals(RecoverySettings.this.translogSize)) {
|
||||
logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize);
|
||||
RecoverySettings.this.translogSize = translogSize;
|
||||
}
|
||||
|
||||
boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress);
|
||||
if (compress != RecoverySettings.this.compress) {
|
||||
logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress);
|
||||
RecoverySettings.this.compress = compress;
|
||||
}
|
||||
|
||||
int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
|
||||
if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
|
||||
logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
|
||||
|
|
|
@ -36,7 +36,9 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.CancellableThreads.Interruptable;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
|
@ -52,6 +54,7 @@ import org.elasticsearch.transport.RemoteTransportException;
|
|||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
|
@ -60,6 +63,7 @@ import java.util.List;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
@ -80,9 +84,9 @@ public class RecoverySourceHandler {
|
|||
private final StartRecoveryRequest request;
|
||||
private final RecoverySettings recoverySettings;
|
||||
private final TransportService transportService;
|
||||
private final int chunkSizeInBytes;
|
||||
|
||||
protected final RecoveryResponse response;
|
||||
private final TransportRequestOptions requestOptions;
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads() {
|
||||
@Override
|
||||
|
@ -109,14 +113,8 @@ public class RecoverySourceHandler {
|
|||
this.transportService = transportService;
|
||||
this.indexName = this.request.shardId().index().name();
|
||||
this.shardId = this.request.shardId().id();
|
||||
|
||||
this.chunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt();
|
||||
this.response = new RecoveryResponse();
|
||||
this.requestOptions = TransportRequestOptions.builder()
|
||||
.withCompress(recoverySettings.compress())
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -252,7 +250,7 @@ public class RecoverySourceHandler {
|
|||
});
|
||||
// How many bytes we've copied since we last called RateLimiter.pause
|
||||
final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new RecoveryOutputStream(md, bytesSinceLastPause, translogView);
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes);
|
||||
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
|
||||
cancellableThreads.execute(() -> {
|
||||
// Send the CLEAN_FILES request, which takes all of the files that
|
||||
|
@ -436,7 +434,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder()
|
||||
.withCompress(recoverySettings.compress())
|
||||
.withCompress(true)
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionLongTimeout())
|
||||
.build();
|
||||
|
@ -455,9 +453,9 @@ public class RecoverySourceHandler {
|
|||
size += operation.estimateSize();
|
||||
totalOperations++;
|
||||
|
||||
// Check if this request is past the size or bytes threshold, and
|
||||
// Check if this request is past bytes threshold, and
|
||||
// if so, send it off
|
||||
if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {
|
||||
if (size >= chunkSizeInBytes) {
|
||||
|
||||
// don't throttle translog, since we lock for phase3 indexing,
|
||||
// so we need to move it as fast as possible. Note, since we
|
||||
|
@ -541,7 +539,7 @@ public class RecoverySourceHandler {
|
|||
|
||||
@Override
|
||||
public final void write(int b) throws IOException {
|
||||
write(new byte[]{(byte) b}, 0, 1);
|
||||
throw new UnsupportedOperationException("we can't send single bytes over the wire");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -552,6 +550,11 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException {
|
||||
final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder()
|
||||
.withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so we are safing the cpu for other things
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
cancellableThreads.execute(() -> {
|
||||
// Pause using the rate limiter, if desired, to throttle the recovery
|
||||
final long throttleTimeInNanos;
|
||||
|
@ -581,7 +584,7 @@ public class RecoverySourceHandler {
|
|||
* see how many translog ops we accumulate while copying files across the network. A future optimization
|
||||
* would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
|
||||
*/
|
||||
throttleTimeInNanos), requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
throttleTimeInNanos), chunkSendOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
|
||||
throw new IndexShardClosedException(request.shardId());
|
||||
|
@ -674,9 +677,10 @@ public class RecoverySourceHandler {
|
|||
pool = recoverySettings.concurrentSmallFileStreamPool();
|
||||
}
|
||||
Future<Void> future = pool.submit(() -> {
|
||||
try (final OutputStream outputStream = outputStreamFactory.apply(md);
|
||||
final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
|
||||
Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStream);
|
||||
try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
|
||||
// it's fine that we are only having the indexInput int he try/with block. The copy methods handles
|
||||
// exceptions during close correctly and doesn't hide the original exception.
|
||||
Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md));
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.common.StopWatch;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
|
@ -42,11 +43,14 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
|
@ -59,6 +63,7 @@ import org.elasticsearch.gateway.GatewayModule;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.http.HttpServer;
|
||||
import org.elasticsearch.http.HttpServerModule;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
|
@ -97,7 +102,16 @@ import org.elasticsearch.tribe.TribeService;
|
|||
import org.elasticsearch.watcher.ResourceWatcherModule;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -274,6 +288,15 @@ public class Node implements Releasable {
|
|||
injector.getInstance(ResourceWatcherService.class).start();
|
||||
injector.getInstance(TribeService.class).start();
|
||||
|
||||
if (System.getProperty("es.tests.portsfile", "false").equals("true")) {
|
||||
if (settings.getAsBoolean("http.enabled", true)) {
|
||||
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
|
||||
writePortsFile("http", http.boundAddress());
|
||||
}
|
||||
TransportService transport = injector.getInstance(TransportService.class);
|
||||
writePortsFile("transport", transport.boundAddress());
|
||||
}
|
||||
|
||||
logger.info("started");
|
||||
|
||||
return this;
|
||||
|
@ -425,4 +448,27 @@ public class Node implements Releasable {
|
|||
public Injector injector() {
|
||||
return this.injector;
|
||||
}
|
||||
|
||||
/** Writes a file to the logs dir containing the ports for the given transport type */
|
||||
private void writePortsFile(String type, BoundTransportAddress boundAddress) {
|
||||
Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp");
|
||||
try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) {
|
||||
for (TransportAddress address : boundAddress.boundAddresses()) {
|
||||
InetAddress inetAddress = InetAddress.getByName(address.getAddress());
|
||||
if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) {
|
||||
// no link local, just causes problems
|
||||
continue;
|
||||
}
|
||||
writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to write ports file", e);
|
||||
}
|
||||
Path portsFile = environment.logsFile().resolve(type + ".ports");
|
||||
try {
|
||||
Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to rename ports file", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.plugins;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.*;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -66,7 +67,7 @@ public class PluginManager {
|
|||
"plugin",
|
||||
"plugin.bat",
|
||||
"service.bat"));
|
||||
|
||||
|
||||
static final Set<String> MODULES = unmodifiableSet(newHashSet(
|
||||
"lang-expression",
|
||||
"lang-groovy"));
|
||||
|
@ -89,6 +90,7 @@ public class PluginManager {
|
|||
"mapper-murmur3",
|
||||
"mapper-size",
|
||||
"repository-azure",
|
||||
"repository-hdfs",
|
||||
"repository-s3",
|
||||
"store-smb"));
|
||||
|
||||
|
@ -124,7 +126,7 @@ public class PluginManager {
|
|||
checkForForbiddenName(pluginHandle.name);
|
||||
} else {
|
||||
// if we have no name but url, use temporary name that will be overwritten later
|
||||
pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null);
|
||||
pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null);
|
||||
}
|
||||
|
||||
Path pluginFile = download(pluginHandle, terminal);
|
||||
|
@ -224,7 +226,7 @@ public class PluginManager {
|
|||
PluginInfo info = PluginInfo.readFromProperties(root);
|
||||
terminal.println(VERBOSE, "%s", info);
|
||||
|
||||
// don't let luser install plugin as a module...
|
||||
// don't let luser install plugin as a module...
|
||||
// they might be unavoidably in maven central and are packaged up the same way)
|
||||
if (MODULES.contains(info.getName())) {
|
||||
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.client.Client;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.rest.*;
|
||||
import org.elasticsearch.rest.action.support.RestResponseListener;
|
||||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
|
@ -77,7 +76,7 @@ public class RestGetSourceAction extends BaseRestHandler {
|
|||
if (!response.isExists()) {
|
||||
return new BytesRestResponse(NOT_FOUND, builder);
|
||||
} else {
|
||||
XContentHelper.writeDirect(response.getSourceInternal(), builder, request);
|
||||
builder.rawValue(response.getSourceInternal());
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,12 +83,14 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
|
|||
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField;
|
||||
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.internal.*;
|
||||
import org.elasticsearch.search.internal.SearchContext.Lifetime;
|
||||
import org.elasticsearch.search.query.*;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
@ -656,7 +658,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
}
|
||||
}
|
||||
|
||||
private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException {
|
||||
private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchContextException {
|
||||
// nothing to parse...
|
||||
if (source == null) {
|
||||
return;
|
||||
|
@ -807,19 +809,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
fieldDataFieldsContext.setHitExecutionNeeded(true);
|
||||
}
|
||||
if (source.highlighter() != null) {
|
||||
XContentParser highlighterParser = null;
|
||||
HighlightBuilder highlightBuilder = source.highlighter();
|
||||
try {
|
||||
highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter());
|
||||
this.elementParsers.get("highlight").parse(highlighterParser, context);
|
||||
} catch (Exception e) {
|
||||
String sSource = "_na_";
|
||||
try {
|
||||
sSource = source.toString();
|
||||
} catch (Throwable e1) {
|
||||
// ignore
|
||||
}
|
||||
XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null;
|
||||
throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e);
|
||||
context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext()));
|
||||
} catch (IOException e) {
|
||||
throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
|
||||
}
|
||||
}
|
||||
if (source.innerHits() != null) {
|
||||
|
|
|
@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms<LongTerms, LongTerms.Bucket> {
|
|||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
return String.valueOf(term);
|
||||
return formatter.format(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics.tophits;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
|
@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder {
|
|||
return sourceBuilder;
|
||||
}
|
||||
|
||||
public BytesReference highlighter() {
|
||||
public HighlightBuilder highlighter() {
|
||||
return sourceBuilder().highlighter();
|
||||
}
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
|
||||
private List<BytesReference> aggregations;
|
||||
|
||||
private BytesReference highlightBuilder;
|
||||
private HighlightBuilder highlightBuilder;
|
||||
|
||||
private BytesReference suggestBuilder;
|
||||
|
||||
|
@ -405,22 +405,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
* Adds highlight to perform as part of the search.
|
||||
*/
|
||||
public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
highlightBuilder.innerXContent(builder);
|
||||
builder.endObject();
|
||||
this.highlightBuilder = builder.bytes();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
this.highlightBuilder = highlightBuilder;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the bytes representing the hightlighter builder for this request.
|
||||
* Gets the hightlighter builder for this request.
|
||||
*/
|
||||
public BytesReference highlighter() {
|
||||
public HighlightBuilder highlighter() {
|
||||
return highlightBuilder;
|
||||
}
|
||||
|
||||
|
@ -813,8 +805,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
}
|
||||
builder.aggregations = aggregations;
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.highlightBuilder = xContentBuilder.bytes();
|
||||
builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
builder.innerHitsBuilder = xContentBuilder.bytes();
|
||||
|
@ -1012,10 +1003,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
}
|
||||
|
||||
if (highlightBuilder != null) {
|
||||
builder.field(HIGHLIGHT_FIELD.getPreferredName());
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder);
|
||||
parser.nextToken();
|
||||
builder.copyCurrentStructure(parser);
|
||||
this.highlightBuilder.toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (innerHitsBuilder != null) {
|
||||
|
@ -1158,7 +1146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
}
|
||||
builder.from = in.readVInt();
|
||||
if (in.readBoolean()) {
|
||||
builder.highlightBuilder = in.readBytesReference();
|
||||
builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
boolean hasIndexBoost = in.readBoolean();
|
||||
if (hasIndexBoost) {
|
||||
|
@ -1259,7 +1247,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
boolean hasHighlightBuilder = highlightBuilder != null;
|
||||
out.writeBoolean(hasHighlightBuilder);
|
||||
if (hasHighlightBuilder) {
|
||||
out.writeBytesReference(highlightBuilder);
|
||||
highlightBuilder.writeTo(out);
|
||||
}
|
||||
boolean hasIndexBoost = indexBoost != null;
|
||||
out.writeBoolean(hasIndexBoost);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.search.fetch.innerhits;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
public BytesReference highlighter() {
|
||||
public HighlightBuilder highlighter() {
|
||||
return sourceBuilder().highlighter();
|
||||
}
|
||||
|
||||
|
|
|
@ -22,13 +22,19 @@ package org.elasticsearch.search.highlight;
|
|||
import org.apache.lucene.search.highlight.SimpleFragmenter;
|
||||
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder.Order;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -74,7 +80,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
|
||||
protected QueryBuilder<?> highlightQuery;
|
||||
|
||||
protected String order;
|
||||
protected Order order;
|
||||
|
||||
protected Boolean highlightFilter;
|
||||
|
||||
|
@ -213,18 +219,26 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
/**
|
||||
* The order of fragments per field. By default, ordered by the order in the
|
||||
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
|
||||
* by score of the fragments.
|
||||
* by score of the fragments, or <tt>none</TT>.
|
||||
*/
|
||||
public HB order(String order) {
|
||||
return order(Order.fromString(order));
|
||||
}
|
||||
|
||||
/**
|
||||
* By default, fragments of a field are ordered by the order in the highlighted text.
|
||||
* If set to {@link Order#SCORE}, this changes order to score of the fragments.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB order(String order) {
|
||||
this.order = order;
|
||||
public HB order(Order scoreOrdered) {
|
||||
this.order = scoreOrdered;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #order(String)}
|
||||
* @return the value set by {@link #order(Order)}
|
||||
*/
|
||||
public String order() {
|
||||
public Order order() {
|
||||
return this.order;
|
||||
}
|
||||
|
||||
|
@ -391,7 +405,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery);
|
||||
}
|
||||
if (order != null) {
|
||||
builder.field(ORDER_FIELD.getPreferredName(), order);
|
||||
builder.field(ORDER_FIELD.getPreferredName(), order.toString());
|
||||
}
|
||||
if (highlightFilter != null) {
|
||||
builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter);
|
||||
|
@ -419,6 +433,100 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
|
||||
* in {@link org.elasticsearch.common.xcontent.XContent} format
|
||||
*
|
||||
* @param parseContext containing the parser positioned at the structure to be parsed from.
|
||||
* the state on the parser contained in this context will be changed as a side effect of this
|
||||
* method call
|
||||
* @return the new {@link AbstractHighlighterBuilder}
|
||||
*/
|
||||
public HB fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = null;
|
||||
HB highlightBuilder = createInstance(parser);
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
|
||||
List<String> preTagsList = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
preTagsList.add(parser.text());
|
||||
}
|
||||
highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
|
||||
List<String> postTagsList = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
postTagsList.add(parser.text());
|
||||
}
|
||||
highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
|
||||
} else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
|
||||
highlightBuilder.order(Order.fromString(parser.text()));
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
|
||||
highlightBuilder.highlightFilter(parser.booleanValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
|
||||
highlightBuilder.fragmentSize(parser.intValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
|
||||
highlightBuilder.numOfFragments(parser.intValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
|
||||
highlightBuilder.requireFieldMatch(parser.booleanValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
|
||||
highlightBuilder.boundaryMaxScan(parser.intValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
|
||||
highlightBuilder.boundaryChars(parser.text().toCharArray());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
|
||||
highlightBuilder.highlighterType(parser.text());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
|
||||
highlightBuilder.fragmenter(parser.text());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
|
||||
highlightBuilder.noMatchSize(parser.intValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
|
||||
highlightBuilder.forceSource(parser.booleanValue());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
|
||||
highlightBuilder.phraseLimit(parser.intValue());
|
||||
} else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
|
||||
if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
|
||||
highlightBuilder.options(parser.map());
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
|
||||
highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
|
||||
} else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
|
||||
}
|
||||
} else if (currentFieldName != null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
|
||||
}
|
||||
}
|
||||
|
||||
if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
|
||||
}
|
||||
return highlightBuilder;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param parser the input parser. Implementing classes might advance the parser depending on the
|
||||
* information they need to instantiate a new instance
|
||||
* @return a new instance
|
||||
*/
|
||||
protected abstract HB createInstance(XContentParser parser) throws IOException;
|
||||
|
||||
/**
|
||||
* Implementing subclasses can handle parsing special options depending on the
|
||||
* current token, field name and the parse context.
|
||||
* @return <tt>true</tt> if an option was found and successfully parsed, otherwise <tt>false</tt>
|
||||
*/
|
||||
protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException;
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize,
|
||||
|
@ -480,7 +588,9 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
if (in.readBoolean()) {
|
||||
highlightQuery(in.readQuery());
|
||||
}
|
||||
order(in.readOptionalString());
|
||||
if (in.readBoolean()) {
|
||||
order(Order.PROTOTYPE.readFrom(in));
|
||||
}
|
||||
highlightFilter(in.readOptionalBoolean());
|
||||
forceSource(in.readOptionalBoolean());
|
||||
boundaryMaxScan(in.readOptionalVInt());
|
||||
|
@ -511,7 +621,11 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
if (hasQuery) {
|
||||
out.writeQuery(highlightQuery);
|
||||
}
|
||||
out.writeOptionalString(order);
|
||||
boolean hasSetOrder = order != null;
|
||||
out.writeBoolean(hasSetOrder);
|
||||
if (hasSetOrder) {
|
||||
order.writeTo(out);
|
||||
}
|
||||
out.writeOptionalBoolean(highlightFilter);
|
||||
out.writeOptionalBoolean(forceSource);
|
||||
out.writeOptionalVInt(boundaryMaxScan);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue