Merge branch 'master' into feature/aggs-refactoring
This commit is contained in:
commit
ed3f7903f4
|
@ -223,6 +223,10 @@ tasks.idea.dependsOn(buildSrcIdea)
|
|||
// eclipse configuration
|
||||
allprojects {
|
||||
apply plugin: 'eclipse'
|
||||
// Name all the non-root projects after their path so that paths get grouped together when imported into eclipse.
|
||||
if (path != ':') {
|
||||
eclipse.project.name = path
|
||||
}
|
||||
|
||||
plugins.withType(JavaBasePlugin) {
|
||||
File eclipseBuild = project.file('build-eclipse')
|
||||
|
|
|
@ -649,8 +649,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltCacheFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltTokenFilters.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]breaker[/\\]HierarchyCircuitBreakerService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]query[/\\]terms[/\\]TermsLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]request[/\\]IndicesRequestCache.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cluster[/\\]IndicesClusterStateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCache.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCacheListener.java" checks="LineLength" />
|
||||
|
@ -662,9 +660,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySource.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySourceHandler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryStatus.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryTarget.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]SharedFSRecoverySourceHandler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]StartRecoveryRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]TransportNodesListShardStoreMetaData.java" checks="LineLength" />
|
||||
|
@ -1309,7 +1304,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltAnalyzerIntegrationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analyze[/\\]AnalyzeActionIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analyze[/\\]HunspellServiceIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]query[/\\]IndicesRequestCacheIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]exists[/\\]indices[/\\]IndicesExistsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]exists[/\\]types[/\\]TypesExistsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]flush[/\\]FlushIT.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-4de5f1d
|
||||
lucene = 5.5.0-snapshot-850c6c2
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -98,7 +97,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
}
|
||||
if (request.requestCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
indicesService.getIndicesRequestCache().clear(shard);
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
if (request.recycler()) {
|
||||
logger.debug("Clear CacheRecycler on index [{}]", service.index());
|
||||
|
@ -114,7 +113,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
} else {
|
||||
service.cache().clear("api");
|
||||
service.fieldData().clear();
|
||||
indicesService.getIndicesRequestCache().clear(shard);
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
|
@ -41,13 +40,11 @@ import org.elasticsearch.index.refresh.RefreshStats;
|
|||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.suggest.stats.SuggestStats;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -175,7 +175,7 @@ final class Bootstrap {
|
|||
JarHell.checkJarHell();
|
||||
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
setupSecurity(settings, environment);
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
|
@ -188,14 +188,6 @@ final class Bootstrap {
|
|||
node = new Node(nodeSettings);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void setupSecurity(Settings settings, Environment environment) throws Exception {
|
||||
if (BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(settings)) {
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Exception#printStackTrace()")
|
||||
private static void setupLogging(Settings settings, Environment environment) {
|
||||
try {
|
||||
|
|
|
@ -27,14 +27,6 @@ public final class BootstrapSettings {
|
|||
private BootstrapSettings() {
|
||||
}
|
||||
|
||||
// TODO: remove this: http://www.openbsd.org/papers/hackfest2015-pledge/mgp00005.jpg
|
||||
/**
|
||||
* option to turn off our security manager completely, for example
|
||||
* if you want to have your own configuration or just disable
|
||||
*/
|
||||
public static final Setting<Boolean> SECURITY_MANAGER_ENABLED_SETTING =
|
||||
Setting.boolSetting("security.manager.enabled", true, false, Scope.CLUSTER);
|
||||
|
||||
// TODO: remove this hack when insecure defaults are removed from java
|
||||
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
|
||||
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
|
||||
|
|
|
@ -20,8 +20,11 @@
|
|||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.GeoUtils;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.mortonEncode;
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLat;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLon;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -81,14 +84,14 @@ public final class GeoPoint {
|
|||
}
|
||||
|
||||
public GeoPoint resetFromIndexHash(long hash) {
|
||||
lon = GeoUtils.mortonUnhashLon(hash);
|
||||
lat = GeoUtils.mortonUnhashLat(hash);
|
||||
lon = mortonUnhashLon(hash);
|
||||
lat = mortonUnhashLat(hash);
|
||||
return this;
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(String geohash) {
|
||||
final long hash = GeoHashUtils.mortonEncode(geohash);
|
||||
return this.reset(GeoUtils.mortonUnhashLat(hash), GeoUtils.mortonUnhashLon(hash));
|
||||
final long hash = mortonEncode(geohash);
|
||||
return this.reset(mortonUnhashLat(hash), mortonUnhashLon(hash));
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(long geohashLong) {
|
||||
|
@ -113,11 +116,11 @@ public final class GeoPoint {
|
|||
}
|
||||
|
||||
public final String geohash() {
|
||||
return GeoHashUtils.stringEncode(lon, lat);
|
||||
return stringEncode(lon, lat);
|
||||
}
|
||||
|
||||
public final String getGeohash() {
|
||||
return GeoHashUtils.stringEncode(lon, lat);
|
||||
return stringEncode(lon, lat);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.geo;
|
|||
|
||||
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
import org.apache.lucene.util.GeoDistanceUtils;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
|
@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoDistanceUtils.maxRadialDistanceMeters;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
|
@ -70,7 +71,7 @@ public class GeoUtils {
|
|||
* maximum distance/radius from the point 'center' before overlapping
|
||||
**/
|
||||
public static double maxRadialDistance(GeoPoint center, double initialRadius) {
|
||||
final double maxRadius = GeoDistanceUtils.maxRadialDistanceMeters(center.lon(), center.lat());
|
||||
final double maxRadius = maxRadialDistanceMeters(center.lon(), center.lat());
|
||||
return Math.min(initialRadius, maxRadius);
|
||||
}
|
||||
|
||||
|
|
|
@ -65,8 +65,8 @@ import org.elasticsearch.index.store.IndexStoreConfig;
|
|||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.HunspellService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
|
@ -307,11 +307,10 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
|
||||
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
|
@ -395,7 +394,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
|
||||
PageCacheRecycler.TYPE_SETTING,
|
||||
PluginsService.MANDATORY_SETTING,
|
||||
BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING,
|
||||
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
|
||||
BootstrapSettings.MLOCKALL_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.elasticsearch.index.store.FsDirectoryService;
|
|||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.tribe.TribeService;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -90,7 +89,7 @@ public class SettingsModule extends AbstractModule {
|
|||
|
||||
/**
|
||||
* Registers a settings filter pattern that allows to filter out certain settings that for instance contain sensitive information
|
||||
* or if a setting is for internal purposes only. The given patter must either be a valid settings key or a simple regesp pattern.
|
||||
* or if a setting is for internal purposes only. The given pattern must either be a valid settings key or a simple regexp pattern.
|
||||
*/
|
||||
public void registerSettingsFilter(String filter) {
|
||||
if (SettingsFilter.isValidPattern(filter) == false) {
|
||||
|
@ -103,11 +102,23 @@ public class SettingsModule extends AbstractModule {
|
|||
}
|
||||
|
||||
public void registerSettingsFilterIfMissing(String filter) {
|
||||
if (settingsFilterPattern.contains(filter)) {
|
||||
if (settingsFilterPattern.contains(filter) == false) {
|
||||
registerSettingsFilter(filter);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a setting has already been registered
|
||||
*/
|
||||
public boolean exists(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
return clusterSettings.containsKey(setting.getKey());
|
||||
case INDEX:
|
||||
return indexSettings.containsKey(setting.getKey());
|
||||
}
|
||||
throw new IllegalArgumentException("setting scope is unknown. This should never happen!");
|
||||
}
|
||||
|
||||
private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true);
|
||||
|
|
|
@ -80,14 +80,32 @@ public class CancellableThreads {
|
|||
* @param interruptable code to run
|
||||
*/
|
||||
public void execute(Interruptable interruptable) {
|
||||
try {
|
||||
executeIO(interruptable);
|
||||
} catch (IOException e) {
|
||||
assert false : "the passed interruptable can not result in an IOException";
|
||||
throw new RuntimeException("unexpected IO exception", e);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* run the Interruptable, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread
|
||||
* causing the call to prematurely return.
|
||||
*
|
||||
* @param interruptable code to run
|
||||
*/
|
||||
public void executeIO(IOInterruptable interruptable) throws IOException {
|
||||
boolean wasInterrupted = add();
|
||||
RuntimeException throwable = null;
|
||||
RuntimeException runtimeException = null;
|
||||
IOException ioException = null;
|
||||
|
||||
try {
|
||||
interruptable.run();
|
||||
} catch (InterruptedException | ThreadInterruptedException e) {
|
||||
// assume this is us and ignore
|
||||
} catch (RuntimeException t) {
|
||||
throwable = t;
|
||||
runtimeException = t;
|
||||
} catch (IOException e) {
|
||||
ioException = e;
|
||||
} finally {
|
||||
remove();
|
||||
}
|
||||
|
@ -101,10 +119,14 @@ public class CancellableThreads {
|
|||
}
|
||||
synchronized (this) {
|
||||
if (isCancelled()) {
|
||||
onCancel(reason, throwable);
|
||||
} else if (throwable != null) {
|
||||
onCancel(reason, ioException != null ? ioException : runtimeException);
|
||||
} else if (ioException != null) {
|
||||
// if we're not canceling, we throw the original exception
|
||||
throw throwable;
|
||||
throw ioException;
|
||||
}
|
||||
if (runtimeException != null) {
|
||||
// if we're not canceling, we throw the original exception
|
||||
throw runtimeException;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -131,10 +153,14 @@ public class CancellableThreads {
|
|||
}
|
||||
|
||||
|
||||
public interface Interruptable {
|
||||
public interface Interruptable extends IOInterruptable {
|
||||
void run() throws InterruptedException;
|
||||
}
|
||||
|
||||
public interface IOInterruptable {
|
||||
void run() throws IOException, InterruptedException;
|
||||
}
|
||||
|
||||
public static class ExecutionCancelledException extends ElasticsearchException {
|
||||
|
||||
public ExecutionCancelledException(String msg) {
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
private final Map<String, Analyzer> cachedAnalyzer = new ConcurrentHashMap<>();
|
||||
private final PrebuiltAnalysis prebuiltAnalysis;
|
||||
private final HunspellService hunspellService;
|
||||
private final Environment environemnt;
|
||||
private final Environment environment;
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
|
||||
this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
|
@ -70,7 +70,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
|
||||
prebuiltAnalysis = new PrebuiltAnalysis();
|
||||
this.hunspellService = hunspellService;
|
||||
this.environemnt = environment;
|
||||
this.environment = environment;
|
||||
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterBuilder = new HashMap<>(charFilters);
|
||||
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterBuilder = new HashMap<>(tokenFilters);
|
||||
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerBuilder = new HashMap<>(tokenizers);
|
||||
|
@ -115,13 +115,13 @@ public final class AnalysisRegistry implements Closeable {
|
|||
AnalysisModule.AnalysisProvider<AnalyzerProvider> provider = analyzers.get(analyzer);
|
||||
return provider == null ? null : cachedAnalyzer.computeIfAbsent(analyzer, (key) -> {
|
||||
try {
|
||||
return provider.get(environemnt, key).get();
|
||||
return provider.get(environment, key).get();
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to load analyzer for name " + key, ex);
|
||||
}}
|
||||
);
|
||||
}
|
||||
return analyzerProvider.get(environemnt, analyzer).get();
|
||||
return analyzerProvider.get(environment, analyzer).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -324,7 +324,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
if (type == null) {
|
||||
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
factory = type.get(settings, environemnt, name, currentSettings);
|
||||
factory = type.get(settings, environment, name, currentSettings);
|
||||
}
|
||||
factories.put(name, factory);
|
||||
} else {
|
||||
|
@ -335,7 +335,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
if (type == null) {
|
||||
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
final T factory = type.get(settings, environemnt, name, currentSettings);
|
||||
final T factory = type.get(settings, environment, name, currentSettings);
|
||||
factories.put(name, factory);
|
||||
}
|
||||
|
||||
|
@ -355,9 +355,9 @@ public final class AnalysisRegistry implements Closeable {
|
|||
AnalysisModule.AnalysisProvider<T> defaultProvider = defaultInstance.get(name);
|
||||
final T instance;
|
||||
if (defaultProvider == null) {
|
||||
instance = provider.get(settings, environemnt, name, defaultSettings);
|
||||
instance = provider.get(settings, environment, name, defaultSettings);
|
||||
} else {
|
||||
instance = defaultProvider.get(settings, environemnt, name, defaultSettings);
|
||||
instance = defaultProvider.get(settings, environment, name, defaultSettings);
|
||||
}
|
||||
factories.put(name, instance);
|
||||
String camelCase = Strings.toCamelCase(name);
|
||||
|
@ -371,7 +371,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
final AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
|
||||
final String camelCase = Strings.toCamelCase(name);
|
||||
if (factories.containsKey(name) == false || (defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
|
||||
final T instance = provider.get(settings, environemnt, name, defaultSettings);
|
||||
final T instance = provider.get(settings, environment, name, defaultSettings);
|
||||
if (factories.containsKey(name) == false) {
|
||||
factories.put(name, instance);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
|
||||
/**
|
||||
* The index-level query cache. This class mostly delegates to the node-level
|
||||
|
|
|
@ -19,27 +19,24 @@
|
|||
|
||||
package org.elasticsearch.index.cache.request;
|
||||
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.common.cache.RemovalListener;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardRequestCache extends AbstractIndexShardComponent implements RemovalListener<IndicesRequestCache.Key, IndicesRequestCache.Value> {
|
||||
public final class ShardRequestCache {
|
||||
|
||||
final CounterMetric evictionsMetric = new CounterMetric();
|
||||
final CounterMetric totalMetric = new CounterMetric();
|
||||
final CounterMetric hitCount = new CounterMetric();
|
||||
final CounterMetric missCount = new CounterMetric();
|
||||
|
||||
public ShardRequestCache(ShardId shardId, IndexSettings indexSettings) {
|
||||
super(shardId, indexSettings);
|
||||
}
|
||||
|
||||
public RequestCacheStats stats() {
|
||||
return new RequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count());
|
||||
}
|
||||
|
@ -52,21 +49,20 @@ public class ShardRequestCache extends AbstractIndexShardComponent implements Re
|
|||
missCount.inc();
|
||||
}
|
||||
|
||||
public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) {
|
||||
public void onCached(Accountable key, Accountable value) {
|
||||
totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> removalNotification) {
|
||||
if (removalNotification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED) {
|
||||
public void onRemoval(Accountable key, Accountable value, boolean evicted) {
|
||||
if (evicted) {
|
||||
evictionsMetric.inc();
|
||||
}
|
||||
long dec = 0;
|
||||
if (removalNotification.getKey() != null) {
|
||||
dec += removalNotification.getKey().ramBytesUsed();
|
||||
if (key != null) {
|
||||
dec += key.ramBytesUsed();
|
||||
}
|
||||
if (removalNotification.getValue() != null) {
|
||||
dec += removalNotification.getValue().ramBytesUsed();
|
||||
if (value != null) {
|
||||
dec += value.ramBytesUsed();
|
||||
}
|
||||
totalMetric.dec(dec);
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -94,6 +95,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
static {
|
||||
Map<String, IndexFieldData.Builder> buildersByTypeBuilder = new HashMap<>();
|
||||
buildersByTypeBuilder.put("string", new PagedBytesIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put(KeywordFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("float", MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("double", MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("byte", MISSING_DOC_VALUES_BUILDER);
|
||||
|
@ -110,6 +112,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
|
||||
docValuesBuildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
|
||||
.put("string", new DocValuesIndexFieldData.Builder())
|
||||
.put(KeywordFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder())
|
||||
.put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
|
||||
.put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
|
||||
.put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
|
||||
|
@ -126,6 +129,9 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
.put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
|
||||
.put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
|
|
|
@ -19,10 +19,12 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -45,7 +47,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
|
|||
}
|
||||
|
||||
protected static class GeoPointTermsEnum extends BaseGeoPointTermsEnum {
|
||||
protected GeoPointTermsEnum(BytesRefIterator termsEnum) {
|
||||
protected GeoPointTermsEnum(BytesRefIterator termsEnum, GeoPointField.TermEncoding termEncoding) {
|
||||
super(termsEnum);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -48,25 +49,20 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
|||
*/
|
||||
public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
|
||||
private final CircuitBreakerService breakerService;
|
||||
private final boolean indexCreatedBefore22;
|
||||
|
||||
public static class Builder implements IndexFieldData.Builder {
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache,
|
||||
breakerService, fieldType.fieldDataType().getSettings()
|
||||
.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_2_2_0) ||
|
||||
indexSettings.getIndexVersionCreated().before(Version.V_2_2_0));
|
||||
breakerService);
|
||||
}
|
||||
}
|
||||
|
||||
public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName,
|
||||
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService,
|
||||
final boolean indexCreatedBefore22) {
|
||||
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
super(indexSettings, fieldName, fieldDataType, cache);
|
||||
this.breakerService = breakerService;
|
||||
this.indexCreatedBefore22 = indexCreatedBefore22;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,7 +78,8 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
|||
estimator.afterLoad(null, data.ramBytesUsed());
|
||||
return data;
|
||||
}
|
||||
return (indexCreatedBefore22 == true) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
|
||||
return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ?
|
||||
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,7 +92,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
|||
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
|
||||
boolean success = false;
|
||||
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
|
||||
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())));
|
||||
final GeoPointField.TermEncoding termEncoding = indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_3_0) ?
|
||||
GeoPointField.TermEncoding.PREFIX : GeoPointField.TermEncoding.NUMERIC;
|
||||
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())), termEncoding);
|
||||
Long hashedPoint;
|
||||
long numTerms = 0;
|
||||
while ((hashedPoint = iter.next()) != null) {
|
||||
|
@ -181,4 +180,4 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import java.util.Map;
|
|||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doc;
|
||||
|
||||
public class DocumentMapperParser {
|
||||
|
||||
|
@ -111,7 +110,7 @@ public class DocumentMapperParser {
|
|||
|
||||
Mapper.TypeParser.ParserContext parserContext = parserContext(type);
|
||||
// parse RootObjectMapper
|
||||
DocumentMapper.Builder docBuilder = doc((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
|
||||
DocumentMapper.Builder docBuilder = new DocumentMapper.Builder((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
|
||||
Iterator<Map.Entry<String, Object>> iterator = mapping.entrySet().iterator();
|
||||
// parse DocumentMapper
|
||||
while(iterator.hasNext()) {
|
||||
|
|
|
@ -28,7 +28,15 @@ import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
@ -323,7 +331,7 @@ class DocumentParser implements Closeable {
|
|||
context.path().remove();
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.object(currentFieldName).enabled(true);
|
||||
builder = new ObjectMapper.Builder(currentFieldName).enabled(true);
|
||||
// if this is a non root object, then explicitly set the dynamic behavior if set
|
||||
if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(mapper.dynamic());
|
||||
|
@ -442,37 +450,37 @@ class DocumentParser implements Closeable {
|
|||
if (fieldType instanceof StringFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.stringField(currentFieldName);
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType instanceof DateFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.dateField(currentFieldName);
|
||||
builder = new DateFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType.numericType() != null) {
|
||||
switch (fieldType.numericType()) {
|
||||
case LONG:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case DOUBLE:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.doubleField(currentFieldName);
|
||||
builder = new DoubleFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case INT:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.integerField(currentFieldName);
|
||||
builder = new IntegerFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case FLOAT:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.floatField(currentFieldName);
|
||||
builder = new FloatFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -503,7 +511,7 @@ class DocumentParser implements Closeable {
|
|||
dateTimeFormatter.parser().parseMillis(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter);
|
||||
builder = new DateFieldMapper.Builder(currentFieldName).dateTimeFormatter(dateTimeFormatter);
|
||||
}
|
||||
return builder;
|
||||
} catch (Exception e) {
|
||||
|
@ -518,7 +526,7 @@ class DocumentParser implements Closeable {
|
|||
Long.parseLong(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} catch (NumberFormatException e) {
|
||||
|
@ -528,7 +536,7 @@ class DocumentParser implements Closeable {
|
|||
Double.parseDouble(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.doubleField(currentFieldName);
|
||||
builder = new DoubleFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} catch (NumberFormatException e) {
|
||||
|
@ -537,7 +545,7 @@ class DocumentParser implements Closeable {
|
|||
}
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.stringField(currentFieldName);
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
|
@ -545,7 +553,7 @@ class DocumentParser implements Closeable {
|
|||
if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) {
|
||||
|
@ -554,20 +562,20 @@ class DocumentParser implements Closeable {
|
|||
// no templates are defined, we use float by default instead of double
|
||||
// since this is much more space-efficient and should be enough most of
|
||||
// the time
|
||||
builder = MapperBuilders.floatField(currentFieldName);
|
||||
builder = new FloatFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.booleanField(currentFieldName);
|
||||
builder = new BooleanFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.binaryField(currentFieldName);
|
||||
builder = new BinaryFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else {
|
||||
|
@ -677,7 +685,7 @@ class DocumentParser implements Closeable {
|
|||
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
|
||||
}
|
||||
builder = MapperBuilders.object(paths[i]).enabled(true);
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ByteFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
||||
|
||||
public final class MapperBuilders {
|
||||
|
||||
private MapperBuilders() {}
|
||||
|
||||
public static DocumentMapper.Builder doc(RootObjectMapper.Builder objectBuilder, MapperService mapperService) {
|
||||
return new DocumentMapper.Builder(objectBuilder, mapperService);
|
||||
}
|
||||
|
||||
public static RootObjectMapper.Builder rootObject(String name) {
|
||||
return new RootObjectMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ObjectMapper.Builder object(String name) {
|
||||
return new ObjectMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static BooleanFieldMapper.Builder booleanField(String name) {
|
||||
return new BooleanFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static StringFieldMapper.Builder stringField(String name) {
|
||||
return new StringFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static BinaryFieldMapper.Builder binaryField(String name) {
|
||||
return new BinaryFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static DateFieldMapper.Builder dateField(String name) {
|
||||
return new DateFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static IpFieldMapper.Builder ipField(String name) {
|
||||
return new IpFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ShortFieldMapper.Builder shortField(String name) {
|
||||
return new ShortFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ByteFieldMapper.Builder byteField(String name) {
|
||||
return new ByteFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static IntegerFieldMapper.Builder integerField(String name) {
|
||||
return new IntegerFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static TokenCountFieldMapper.Builder tokenCountField(String name) {
|
||||
return new TokenCountFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static LongFieldMapper.Builder longField(String name) {
|
||||
return new LongFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static FloatFieldMapper.Builder floatField(String name) {
|
||||
return new FloatFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static DoubleFieldMapper.Builder doubleField(String name) {
|
||||
return new DoubleFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static GeoShapeFieldMapper.Builder geoShapeField(String name) {
|
||||
return new GeoShapeFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static CompletionFieldMapper.Builder completionField(String name) {
|
||||
return new CompletionFieldMapper.Builder(name);
|
||||
}
|
||||
}
|
|
@ -42,7 +42,6 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.binaryField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
|
||||
/**
|
||||
|
@ -79,7 +78,7 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
BinaryFieldMapper.Builder builder = binaryField(name);
|
||||
BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.booleanField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
|
@ -96,7 +95,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
BooleanFieldMapper.Builder builder = booleanField(name);
|
||||
BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -49,7 +49,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeByteValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.byteField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -97,7 +96,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ByteFieldMapper.Builder builder = byteField(name);
|
||||
ByteFieldMapper.Builder builder = new ByteFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -58,7 +58,6 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.completionField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
|
@ -119,7 +118,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
CompletionFieldMapper.Builder builder = completionField(name);
|
||||
CompletionFieldMapper.Builder builder = new CompletionFieldMapper.Builder(name);
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
|
|
@ -63,7 +63,6 @@ import java.util.Objects;
|
|||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.dateField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
|
@ -153,7 +152,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
DateFieldMapper.Builder builder = dateField(name);
|
||||
DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
boolean configuredFormat = false;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
|
|
@ -51,7 +51,6 @@ import java.util.Map;
|
|||
|
||||
import static org.apache.lucene.util.NumericUtils.doubleToSortableLong;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -98,7 +97,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
DoubleFieldMapper.Builder builder = doubleField(name);
|
||||
DoubleFieldMapper.Builder builder = new DoubleFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -52,7 +52,6 @@ import java.util.Map;
|
|||
|
||||
import static org.apache.lucene.util.NumericUtils.floatToSortableInt;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.floatField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -99,7 +98,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
FloatFieldMapper.Builder builder = floatField(name);
|
||||
FloatFieldMapper.Builder builder = new FloatFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -51,7 +51,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.integerField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -104,7 +103,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
IntegerFieldMapper.Builder builder = integerField(name);
|
||||
IntegerFieldMapper.Builder builder = new IntegerFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -0,0 +1,274 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.core;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
* A field mapper for keywords. This mapper accepts strings and indexes them as-is.
|
||||
*/
|
||||
public final class KeywordFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
|
||||
|
||||
public static final String CONTENT_TYPE = "keyword";
|
||||
|
||||
public static class Defaults {
|
||||
public static final MappedFieldType FIELD_TYPE = new KeywordFieldType();
|
||||
|
||||
static {
|
||||
FIELD_TYPE.setTokenized(false);
|
||||
FIELD_TYPE.setOmitNorms(true);
|
||||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
|
||||
FIELD_TYPE.freeze();
|
||||
}
|
||||
|
||||
public static final String NULL_VALUE = null;
|
||||
public static final int IGNORE_ABOVE = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
public static class Builder extends FieldMapper.Builder<Builder, KeywordFieldMapper> {
|
||||
|
||||
protected String nullValue = Defaults.NULL_VALUE;
|
||||
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
public Builder ignoreAbove(int ignoreAbove) {
|
||||
if (ignoreAbove < 0) {
|
||||
throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove);
|
||||
}
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder indexOptions(IndexOptions indexOptions) {
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) {
|
||||
throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]="
|
||||
+ indexOptionToString(fieldType.indexOptions()));
|
||||
}
|
||||
return super.indexOptions(indexOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper build(BuilderContext context) {
|
||||
setupFieldType(context);
|
||||
KeywordFieldMapper fieldMapper = new KeywordFieldMapper(
|
||||
name, fieldType, defaultFieldType, ignoreAbove,
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
return fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object propNode = entry.getValue();
|
||||
if (propName.equals("null_value")) {
|
||||
if (propNode == null) {
|
||||
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||
}
|
||||
builder.nullValue(propNode.toString());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_above")) {
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static final class KeywordFieldType extends MappedFieldType {
|
||||
|
||||
public KeywordFieldType() {}
|
||||
|
||||
protected KeywordFieldType(KeywordFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
public KeywordFieldType clone() {
|
||||
return new KeywordFieldType(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String value(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query nullValueQuery() {
|
||||
if (nullValue() == null) {
|
||||
return null;
|
||||
}
|
||||
return termQuery(nullValue(), null);
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
private int ignoreAbove;
|
||||
|
||||
protected KeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
int ignoreAbove, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0;
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected KeywordFieldMapper clone() {
|
||||
return (KeywordFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
|
||||
final String value;
|
||||
if (context.externalValueSet()) {
|
||||
value = context.externalValue().toString();
|
||||
} else {
|
||||
XContentParser parser = context.parser();
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
|
||||
value = fieldType().nullValueAsString();
|
||||
} else {
|
||||
value = parser.textOrNull();
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null || value.length() > ignoreAbove) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (context.includeInAll(includeInAll, this)) {
|
||||
context.allEntries().addText(fieldType().name(), value, fieldType().boost());
|
||||
}
|
||||
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String contentType() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.includeInAll = ((KeywordFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
|
||||
if (includeDefaults || fieldType().nullValue() != null) {
|
||||
builder.field("null_value", fieldType().nullValue());
|
||||
}
|
||||
|
||||
if (includeInAll != null) {
|
||||
builder.field("include_in_all", includeInAll);
|
||||
} else if (includeDefaults) {
|
||||
builder.field("include_in_all", true);
|
||||
}
|
||||
|
||||
if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
|
||||
builder.field("ignore_above", ignoreAbove);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -51,7 +51,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeLongValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.longField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -103,7 +102,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
LongFieldMapper.Builder builder = longField(name);
|
||||
LongFieldMapper.Builder builder = new LongFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -51,7 +51,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeShortValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.shortField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -101,7 +100,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ShortFieldMapper.Builder builder = shortField(name);
|
||||
ShortFieldMapper.Builder builder = new ShortFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -45,7 +45,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.index.IndexOptions.NONE;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField;
|
||||
|
||||
|
@ -146,7 +145,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
StringFieldMapper.Builder builder = stringField(fieldName);
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
final Object index = node.remove("index");
|
||||
|
|
|
@ -43,7 +43,6 @@ import java.util.Map;
|
|||
|
||||
import static org.apache.lucene.index.IndexOptions.NONE;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.tokenCountField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -98,7 +97,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
|||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
TokenCountFieldMapper.Builder builder = tokenCountField(name);
|
||||
TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
|
|
@ -21,7 +21,8 @@ package org.elasticsearch.index.mapper.geo;
|
|||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
|
@ -29,7 +30,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -50,8 +51,6 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
|
@ -159,8 +158,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
context.path().add(name);
|
||||
if (enableLatLon) {
|
||||
NumberFieldMapper.Builder<?, ?> latMapperBuilder = doubleField(Names.LAT).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> lonMapperBuilder = doubleField(Names.LON).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> latMapperBuilder = new DoubleFieldMapper.Builder(Names.LAT).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> lonMapperBuilder = new DoubleFieldMapper.Builder(Names.LON).includeInAll(false);
|
||||
if (precisionStep != null) {
|
||||
latMapperBuilder.precisionStep(precisionStep);
|
||||
lonMapperBuilder.precisionStep(precisionStep);
|
||||
|
@ -172,7 +171,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
StringFieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
|
||||
.omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
|
|
|
@ -20,9 +20,10 @@
|
|||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.GeoPointField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -59,8 +60,6 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
|
||||
FIELD_TYPE.setTokenized(false);
|
||||
FIELD_TYPE.setOmitNorms(true);
|
||||
FIELD_TYPE.setNumericType(FieldType.NumericType.LONG);
|
||||
FIELD_TYPE.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
FIELD_TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC);
|
||||
FIELD_TYPE.setHasDocValues(true);
|
||||
FIELD_TYPE.freeze();
|
||||
|
@ -83,6 +82,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
||||
}
|
||||
setupFieldType(context);
|
||||
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
||||
geoHashMapper, multiFields, ignoreMalformed, copyTo);
|
||||
|
@ -90,6 +93,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
||||
}
|
||||
return super.build(context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -53,7 +54,6 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -160,7 +160,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = geoShapeField(name);
|
||||
Builder builder = new Builder(name);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
|
|
@ -38,12 +38,11 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -132,15 +131,15 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
StringFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
|
||||
KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
|
||||
MappedFieldType childJoinFieldType = Defaults.FIELD_TYPE.clone();
|
||||
childJoinFieldType.setName(joinField(null));
|
||||
return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings);
|
||||
}
|
||||
}
|
||||
|
||||
static StringFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) {
|
||||
StringFieldMapper.Builder parentJoinField = MapperBuilders.stringField(joinField(docType));
|
||||
static KeywordFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) {
|
||||
KeywordFieldMapper.Builder parentJoinField = new KeywordFieldMapper.Builder(joinField(docType));
|
||||
parentJoinField.indexOptions(IndexOptions.NONE);
|
||||
parentJoinField.docValues(true);
|
||||
parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED);
|
||||
|
@ -206,9 +205,9 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
private final String parentType;
|
||||
// has no impact of field data settings, is just here for creating a join field,
|
||||
// the parent field mapper in the child type pointing to this type determines the field data settings for this join field
|
||||
private final StringFieldMapper parentJoinField;
|
||||
private final KeywordFieldMapper parentJoinField;
|
||||
|
||||
private ParentFieldMapper(StringFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) {
|
||||
private ParentFieldMapper(KeywordFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) {
|
||||
super(NAME, childJoinFieldType, Defaults.FIELD_TYPE, indexSettings);
|
||||
this.parentType = parentType;
|
||||
this.parentJoinField = parentJoinField;
|
||||
|
|
|
@ -57,7 +57,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.ipField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
|
@ -139,7 +138,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
IpFieldMapper.Builder builder = ipField(name);
|
||||
IpFieldMapper.Builder builder = new Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
|
|
@ -50,7 +50,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.object;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -300,7 +299,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
}
|
||||
|
||||
protected Builder createBuilder(String name) {
|
||||
return object(name);
|
||||
return new Builder(name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,10 +26,9 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -61,17 +60,16 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
public PercolatorFieldMapper build(BuilderContext context) {
|
||||
context.path().add(name);
|
||||
StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
|
||||
StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
|
||||
KeywordFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
|
||||
KeywordFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
|
||||
context.path().remove();
|
||||
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField);
|
||||
}
|
||||
|
||||
static StringFieldMapper.Builder createStringFieldBuilder(String name) {
|
||||
StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name);
|
||||
static KeywordFieldMapper.Builder createStringFieldBuilder(String name) {
|
||||
KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name);
|
||||
queryMetaDataFieldBuilder.docValues(false);
|
||||
queryMetaDataFieldBuilder.store(false);
|
||||
queryMetaDataFieldBuilder.tokenized(false);
|
||||
queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
|
||||
return queryMetaDataFieldBuilder;
|
||||
}
|
||||
|
@ -111,10 +109,10 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
|
||||
private final boolean mapUnmappedFieldAsString;
|
||||
private final QueryShardContext queryShardContext;
|
||||
private final StringFieldMapper queryTermsField;
|
||||
private final StringFieldMapper unknownQueryField;
|
||||
private final KeywordFieldMapper queryTermsField;
|
||||
private final KeywordFieldMapper unknownQueryField;
|
||||
|
||||
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) {
|
||||
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.queryShardContext = queryShardContext;
|
||||
this.queryTermsField = queryTermsField;
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointInBBoxQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
|
@ -105,7 +106,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
|
||||
// we do not check longitudes as the query generation code can deal with flipped left/right values
|
||||
}
|
||||
|
||||
|
||||
topLeft.reset(top, left);
|
||||
bottomRight.reset(bottom, right);
|
||||
return this;
|
||||
|
@ -133,7 +134,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
public GeoPoint topLeft() {
|
||||
return topLeft;
|
||||
}
|
||||
|
||||
|
||||
/** Returns the bottom right corner of the bounding box. */
|
||||
public GeoPoint bottomRight() {
|
||||
return bottomRight;
|
||||
|
@ -168,7 +169,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
this.validationMethod = method;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns geo coordinate validation method to use.
|
||||
* */
|
||||
|
@ -264,8 +265,13 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
}
|
||||
}
|
||||
|
||||
if (context.indexVersionCreated().onOrAfter(Version.V_2_2_0)) {
|
||||
return new GeoPointInBBoxQuery(fieldType.name(), luceneTopLeft.lon(), luceneBottomRight.lat(),
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
return new GeoPointInBBoxQuery(fieldType.name(), encoding, luceneTopLeft.lon(), luceneBottomRight.lat(),
|
||||
luceneBottomRight.lon(), luceneTopLeft.lat());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointDistanceQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -229,14 +230,19 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
|
||||
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
|
||||
|
||||
if (shardContext.indexVersionCreated().before(Version.V_2_2_0)) {
|
||||
final Version indexVersionCreated = shardContext.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
|
||||
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);
|
||||
return new GeoDistanceRangeQuery(center, null, normDistance, true, false, geoDistance, geoFieldType, indexFieldData, optimizeBbox);
|
||||
}
|
||||
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
normDistance = GeoUtils.maxRadialDistance(center, normDistance);
|
||||
return new GeoPointDistanceQuery(fieldType.name(), center.lon(), center.lat(), normDistance);
|
||||
return new GeoPointDistanceQuery(fieldType.name(), encoding, center.lon(), center.lat(), normDistance);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointDistanceRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.GeoDistanceUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery;
|
||||
import org.apache.lucene.spatial.util.GeoDistanceUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
|
@ -41,7 +42,7 @@ import java.io.IOException;
|
|||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.apache.lucene.util.GeoUtils.TOLERANCE;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE;
|
||||
|
||||
public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistanceRangeQueryBuilder> {
|
||||
|
||||
|
@ -267,16 +268,22 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
toValue = GeoDistanceUtils.maxRadialDistanceMeters(point.lon(), point.lat());
|
||||
}
|
||||
|
||||
if (indexCreatedBeforeV2_2 == true) {
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
|
||||
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
|
||||
return new GeoDistanceRangeQuery(point, fromValue, toValue, includeLower, includeUpper, geoDistance, geoFieldType,
|
||||
indexFieldData, optimizeBbox);
|
||||
indexFieldData, optimizeBbox);
|
||||
}
|
||||
|
||||
return new GeoPointDistanceRangeQuery(fieldType.name(), point.lon(), point.lat(),
|
||||
(includeLower) ? fromValue : fromValue + TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - TOLERANCE);
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
|
||||
return new GeoPointDistanceRangeQuery(fieldType.name(), encoding, point.lon(), point.lat(),
|
||||
(includeLower) ? fromValue : fromValue + TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - TOLERANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointInPolygonQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -136,7 +137,8 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
|
|||
}
|
||||
}
|
||||
|
||||
if (context.indexVersionCreated().before(Version.V_2_2_0)) {
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
|
||||
return new GeoPolygonQuery(indexFieldData, shell.toArray(new GeoPoint[shellSize]));
|
||||
}
|
||||
|
@ -149,7 +151,11 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
|
|||
lats[i] = p.lat();
|
||||
lons[i] = p.lon();
|
||||
}
|
||||
return new GeoPointInPolygonQuery(fieldType.name(), lons, lats);
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
return new GeoPointInPolygonQuery(fieldType.name(), encoding, lons, lats);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item;
|
|||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.indices.cache.query.terms.TermsLookup;
|
||||
import org.elasticsearch.indices.TermsLookup;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.Template;
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
|||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
|
@ -278,7 +277,7 @@ public class QueryShardContext {
|
|||
if (fieldMapping != null || allowUnmappedFields) {
|
||||
return fieldMapping;
|
||||
} else if (mapUnmappedFieldAsString) {
|
||||
StringFieldMapper.Builder builder = MapperBuilders.stringField(name);
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name);
|
||||
return builder.build(new Mapper.BuilderContext(indexSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
} else {
|
||||
throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name);
|
||||
|
|
|
@ -37,8 +37,7 @@ import org.elasticsearch.common.lucene.search.Queries;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.indices.cache.query.terms.TermsLookup;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.indices.TermsLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
|
|||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.indices.cache.query.terms.TermsLookup;
|
||||
import org.elasticsearch.indices.TermsLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -224,7 +224,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
this.getService = new ShardGetService(indexSettings, this, mapperService);
|
||||
this.searchService = new ShardSearchStats(slowLog);
|
||||
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
|
||||
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
|
||||
this.shardQueryCache = new ShardRequestCache();
|
||||
this.shardFieldData = new ShardFieldData();
|
||||
this.indexFieldDataService = indexFieldDataService;
|
||||
this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings);
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -158,7 +159,8 @@ public class TermVectorsService {
|
|||
|
||||
private static boolean isValidField(MappedFieldType fieldType) {
|
||||
// must be a string
|
||||
if (!(fieldType instanceof StringFieldMapper.StringFieldType)) {
|
||||
if (fieldType instanceof StringFieldMapper.StringFieldType == false
|
||||
&& fieldType instanceof KeywordFieldMapper.KeywordFieldType == false) {
|
||||
return false;
|
||||
}
|
||||
// and must be indexed
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
|
@ -60,7 +61,7 @@ import org.elasticsearch.indices.flush.SyncedFlushService;
|
|||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.recovery.RecoverySource;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTargetService;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
|
@ -96,6 +97,7 @@ public class IndicesModule extends AbstractModule {
|
|||
registerMapper(DateFieldMapper.CONTENT_TYPE, new DateFieldMapper.TypeParser());
|
||||
registerMapper(IpFieldMapper.CONTENT_TYPE, new IpFieldMapper.TypeParser());
|
||||
registerMapper(StringFieldMapper.CONTENT_TYPE, new StringFieldMapper.TypeParser());
|
||||
registerMapper(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser());
|
||||
registerMapper(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser());
|
||||
registerMapper(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser());
|
||||
registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
|
||||
|
@ -153,7 +155,7 @@ public class IndicesModule extends AbstractModule {
|
|||
|
||||
bind(IndicesService.class).asEagerSingleton();
|
||||
bind(RecoverySettings.class).asEagerSingleton();
|
||||
bind(RecoveryTarget.class).asEagerSingleton();
|
||||
bind(RecoveryTargetService.class).asEagerSingleton();
|
||||
bind(RecoverySource.class).asEagerSingleton();
|
||||
bind(IndicesStore.class).asEagerSingleton();
|
||||
bind(IndicesClusterStateService.class).asEagerSingleton();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.query;
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
|
@ -0,0 +1,337 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.cache.CacheLoader;
|
||||
import org.elasticsearch.common.cache.RemovalListener;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* The indices request cache allows to cache a shard level request stage responses, helping with improving
|
||||
* similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent
|
||||
* with the semantics of NRT (the index reader version is part of the cache key), and relies on size based
|
||||
* eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that
|
||||
* are no longer used or closed shards.
|
||||
* <p>
|
||||
* Currently, the cache is only enabled for count requests, and can only be opted in on an index
|
||||
* level setting that can be dynamically changed and defaults to false.
|
||||
* <p>
|
||||
* There are still several TODOs left in this class, some easily addressable, some more complex, but the support
|
||||
* is functional.
|
||||
*/
|
||||
public final class IndicesRequestCache extends AbstractComponent implements RemovalListener<IndicesRequestCache.Key,
|
||||
IndicesRequestCache.Value>, Closeable {
|
||||
|
||||
/**
|
||||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||
* since we are checking on the cluster state IndexMetaData always.
|
||||
*/
|
||||
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable",
|
||||
false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%",
|
||||
false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire",
|
||||
new TimeValue(0), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
|
||||
private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
private final ByteSizeValue size;
|
||||
private final TimeValue expire;
|
||||
private final Cache<Key, Value> cache;
|
||||
|
||||
IndicesRequestCache(Settings settings) {
|
||||
super(settings);
|
||||
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
|
||||
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
|
||||
long sizeInBytes = size.bytes();
|
||||
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
|
||||
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);
|
||||
if (expire != null) {
|
||||
cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis()));
|
||||
}
|
||||
cache = cacheBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
void clear(CacheEntity entity) {
|
||||
keysToClean.add(new CleanupKey(entity, -1));
|
||||
cleanCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Value> notification) {
|
||||
notification.getKey().entity.onRemoval(notification);
|
||||
}
|
||||
|
||||
BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) throws Exception {
|
||||
final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey);
|
||||
Loader loader = new Loader(cacheEntity);
|
||||
Value value = cache.computeIfAbsent(key, loader);
|
||||
if (loader.isLoaded()) {
|
||||
key.entity.onMiss();
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion());
|
||||
if (!registeredClosedListeners.containsKey(cleanupKey)) {
|
||||
Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
|
||||
if (previous == null) {
|
||||
ElasticsearchDirectoryReader.addReaderCloseListener(reader, cleanupKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key.entity.onHit();
|
||||
}
|
||||
return value.reference;
|
||||
}
|
||||
|
||||
private static class Loader implements CacheLoader<Key, Value> {
|
||||
|
||||
private final CacheEntity entity;
|
||||
private boolean loaded;
|
||||
|
||||
Loader(CacheEntity entity) {
|
||||
this.entity = entity;
|
||||
}
|
||||
|
||||
public boolean isLoaded() {
|
||||
return this.loaded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Value load(Key key) throws Exception {
|
||||
Value value = entity.loadValue();
|
||||
entity.onCached(key, value);
|
||||
loaded = true;
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic interface to make this cache testable.
|
||||
*/
|
||||
interface CacheEntity {
|
||||
/**
|
||||
* Loads the actual cache value. this is the heavy lifting part.
|
||||
*/
|
||||
Value loadValue() throws IOException;
|
||||
|
||||
/**
|
||||
* Called after the value was loaded via {@link #loadValue()}
|
||||
*/
|
||||
void onCached(Key key, Value value);
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the resource behind this entity is still open ie.
|
||||
* entities assiciated with it can remain in the cache. ie. IndexShard is still open.
|
||||
*/
|
||||
boolean isOpen();
|
||||
|
||||
/**
|
||||
* Returns the cache identity. this is, similar to {@link #isOpen()} the resource identity behind this cache entity.
|
||||
* For instance IndexShard is the identity while a CacheEntity is per DirectoryReader. Yet, we group by IndexShard instance.
|
||||
*/
|
||||
Object getCacheIdentity();
|
||||
|
||||
/**
|
||||
* Called each time this entity has a cache hit.
|
||||
*/
|
||||
void onHit();
|
||||
|
||||
/**
|
||||
* Called each time this entity has a cache miss.
|
||||
*/
|
||||
void onMiss();
|
||||
|
||||
/**
|
||||
* Called when this entity instance is removed
|
||||
*/
|
||||
void onRemoval(RemovalNotification<Key, Value> notification);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static class Value implements Accountable {
|
||||
final BytesReference reference;
|
||||
final long ramBytesUsed;
|
||||
|
||||
Value(BytesReference reference, long ramBytesUsed) {
|
||||
this.reference = reference;
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
static class Key implements Accountable {
|
||||
public final CacheEntity entity; // use as identity equality
|
||||
public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
public final BytesReference value;
|
||||
|
||||
Key(CacheEntity entity, long readerVersion, BytesReference value) {
|
||||
this.entity = entity;
|
||||
this.readerVersion = readerVersion;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
// TODO: more detailed ram usage?
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
Key key = (Key) o;
|
||||
if (readerVersion != key.readerVersion) return false;
|
||||
if (!entity.getCacheIdentity().equals(key.entity.getCacheIdentity())) return false;
|
||||
if (!value.equals(key.value)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = entity.getCacheIdentity().hashCode();
|
||||
result = 31 * result + Long.hashCode(readerVersion);
|
||||
result = 31 * result + value.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class CleanupKey implements IndexReader.ReaderClosedListener {
|
||||
final CacheEntity entity;
|
||||
final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
|
||||
private CleanupKey(CacheEntity entity, long readerVersion) {
|
||||
this.entity = entity;
|
||||
this.readerVersion = readerVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader reader) {
|
||||
Boolean remove = registeredClosedListeners.remove(this);
|
||||
if (remove != null) {
|
||||
keysToClean.add(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
CleanupKey that = (CleanupKey) o;
|
||||
if (readerVersion != that.readerVersion) return false;
|
||||
if (!entity.getCacheIdentity().equals(that.entity.getCacheIdentity())) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = entity.getCacheIdentity().hashCode();
|
||||
result = 31 * result + Long.hashCode(readerVersion);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
synchronized void cleanCache() {
|
||||
final ObjectSet<CleanupKey> currentKeysToClean = new ObjectHashSet<>();
|
||||
final ObjectSet<Object> currentFullClean = new ObjectHashSet<>();
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext(); ) {
|
||||
CleanupKey cleanupKey = iterator.next();
|
||||
iterator.remove();
|
||||
if (cleanupKey.readerVersion == -1 || cleanupKey.entity.isOpen() == false) {
|
||||
// -1 indicates full cleanup, as does a closed shard
|
||||
currentFullClean.add(cleanupKey.entity.getCacheIdentity());
|
||||
} else {
|
||||
currentKeysToClean.add(cleanupKey);
|
||||
}
|
||||
}
|
||||
if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) {
|
||||
for (Iterator<Key> iterator = cache.keys().iterator(); iterator.hasNext(); ) {
|
||||
Key key = iterator.next();
|
||||
if (currentFullClean.contains(key.entity.getCacheIdentity())) {
|
||||
iterator.remove();
|
||||
} else {
|
||||
if (currentKeysToClean.contains(new CleanupKey(key.entity, key.readerVersion))) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cache.refresh();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the current size of the cache
|
||||
*/
|
||||
final int count() {
|
||||
return cache.count();
|
||||
}
|
||||
|
||||
final int numRegisteredCloseListeners() { // for testing
|
||||
return registeredClosedListeners.size();
|
||||
}
|
||||
}
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -29,15 +29,19 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
|||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
|
@ -56,6 +60,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.cache.request.ShardRequestCache;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
|
@ -67,21 +72,25 @@ import org.elasticsearch.index.search.stats.SearchStats;
|
|||
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
|
@ -106,7 +115,7 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
|
|||
public class IndicesService extends AbstractLifecycleComponent<IndicesService> implements Iterable<IndexService>, IndexService.ShardStoreDeleter {
|
||||
|
||||
public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout";
|
||||
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
|
||||
private final PluginsService pluginsService;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final TimeValue shardsClosedTimeout;
|
||||
|
@ -116,7 +125,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopeSetting;
|
||||
private final IndicesFieldDataCache indicesFieldDataCache;
|
||||
private final FieldDataCacheCleaner fieldDataCacheCleaner;
|
||||
private final CacheCleaner cacheCleaner;
|
||||
private final ThreadPool threadPool;
|
||||
private final CircuitBreakerService circuitBreakerService;
|
||||
private volatile Map<String, IndexService> indices = emptyMap();
|
||||
|
@ -132,7 +141,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
@Override
|
||||
protected void doStart() {
|
||||
// Start thread that will manage cleaning the field data cache periodically
|
||||
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.fieldDataCacheCleaner);
|
||||
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.cacheCleaner);
|
||||
}
|
||||
|
||||
@Inject
|
||||
|
@ -150,7 +159,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
this.clusterService = clusterService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.indicesRequestCache = new IndicesRequestCache(settings, threadPool);
|
||||
this.indicesRequestCache = new IndicesRequestCache(settings);
|
||||
this.indicesQueryCache = new IndicesQueryCache(settings);
|
||||
this.mapperRegistry = mapperRegistry;
|
||||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType);
|
||||
|
@ -165,8 +174,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes);
|
||||
}
|
||||
});
|
||||
this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings);
|
||||
this.fieldDataCacheCleaner = new FieldDataCacheCleaner(indicesFieldDataCache, logger, threadPool, this.cleanInterval);
|
||||
this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings);
|
||||
this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval);
|
||||
|
||||
|
||||
}
|
||||
|
@ -202,7 +211,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, fieldDataCacheCleaner, indicesRequestCache, indicesQueryCache);
|
||||
IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, cacheCleaner, indicesRequestCache, indicesQueryCache);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -433,10 +442,6 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
return circuitBreakerService;
|
||||
}
|
||||
|
||||
public IndicesRequestCache getIndicesRequestCache() {
|
||||
return indicesRequestCache;
|
||||
}
|
||||
|
||||
public IndicesQueryCache getIndicesQueryCache() {
|
||||
return indicesQueryCache;
|
||||
}
|
||||
|
@ -827,16 +832,18 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
* has an entry invalidated may not clean up the entry if it is not read from
|
||||
* or written to after invalidation.
|
||||
*/
|
||||
private final static class FieldDataCacheCleaner implements Runnable, Releasable {
|
||||
private final static class CacheCleaner implements Runnable, Releasable {
|
||||
|
||||
private final IndicesFieldDataCache cache;
|
||||
private final ESLogger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final TimeValue interval;
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
private final IndicesRequestCache requestCache;
|
||||
|
||||
public FieldDataCacheCleaner(IndicesFieldDataCache cache, ESLogger logger, ThreadPool threadPool, TimeValue interval) {
|
||||
public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, ESLogger logger, ThreadPool threadPool, TimeValue interval) {
|
||||
this.cache = cache;
|
||||
this.requestCache = requestCache;
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
this.interval = interval;
|
||||
|
@ -856,6 +863,12 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("periodic field data cache cleanup finished in {} milliseconds", TimeValue.nsecToMSec(System.nanoTime() - startTimeNS));
|
||||
}
|
||||
|
||||
try {
|
||||
this.requestCache.cleanCache();
|
||||
} catch (Exception e) {
|
||||
logger.warn("Exception during periodic request cache cleanup:", e);
|
||||
}
|
||||
// Reschedule itself to run again if not closed
|
||||
if (closed.get() == false) {
|
||||
threadPool.schedule(interval, ThreadPool.Names.SAME, this);
|
||||
|
@ -867,4 +880,148 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
closed.compareAndSet(false, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
|
||||
|
||||
/**
|
||||
* Can the shard request be cached at all?
|
||||
*/
|
||||
public boolean canCache(ShardSearchRequest request, SearchContext context) {
|
||||
if (request.template() != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// for now, only enable it for requests with no hits
|
||||
if (context.size() != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We cannot cache with DFS because results depend not only on the content of the index but also
|
||||
// on the overridden statistics. So if you ran two queries on the same index with different stats
|
||||
// (because an other shard was updated) you would get wrong results because of the scores
|
||||
// (think about top_hits aggs or scripts using the score)
|
||||
if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
|
||||
return false;
|
||||
}
|
||||
IndexSettings settings = context.indexShard().getIndexSettings();
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) {
|
||||
return false;
|
||||
}
|
||||
} else if (request.requestCache() == false) {
|
||||
return false;
|
||||
}
|
||||
// if the reader is not a directory reader, we can't get the version from it
|
||||
if ((context.searcher().getIndexReader() instanceof DirectoryReader) == false) {
|
||||
return false;
|
||||
}
|
||||
// if now in millis is used (or in the future, a more generic "isDeterministic" flag
|
||||
// then we can't cache based on "now" key within the search request, as it is not deterministic
|
||||
if (context.nowInMillisUsed()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
public void clearRequestCache(IndexShard shard) {
|
||||
if (shard == null) {
|
||||
return;
|
||||
}
|
||||
indicesRequestCache.clear(new IndexShardCacheEntity(shard));
|
||||
logger.trace("{} explicit cache clear", shard.shardId());
|
||||
}
|
||||
/**
|
||||
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
|
||||
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
|
||||
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
|
||||
* the same cache.
|
||||
*/
|
||||
public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception {
|
||||
assert canCache(request, context);
|
||||
final IndexShardCacheEntity entity = new IndexShardCacheEntity(context.indexShard(), queryPhase, context);
|
||||
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
|
||||
final BytesReference bytesReference = indicesRequestCache.getOrCompute(entity, directoryReader, request.cacheKey());
|
||||
if (entity.loaded == false) { // if we have loaded this we don't need to do anything
|
||||
// restore the cached query result into the context
|
||||
final QuerySearchResult result = context.queryResult();
|
||||
result.readFromWithId(context.id(), bytesReference.streamInput());
|
||||
result.shardTarget(context.shardTarget());
|
||||
}
|
||||
}
|
||||
|
||||
static final class IndexShardCacheEntity implements IndicesRequestCache.CacheEntity {
|
||||
private final QueryPhase queryPhase;
|
||||
private final SearchContext context;
|
||||
private final IndexShard indexShard;
|
||||
private final ShardRequestCache requestCache;
|
||||
private boolean loaded = false;
|
||||
|
||||
IndexShardCacheEntity(IndexShard indexShard) {
|
||||
this(indexShard, null, null);
|
||||
}
|
||||
|
||||
public IndexShardCacheEntity(IndexShard indexShard, QueryPhase queryPhase, SearchContext context) {
|
||||
this.queryPhase = queryPhase;
|
||||
this.context = context;
|
||||
this.indexShard = indexShard;
|
||||
this.requestCache = indexShard.requestCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesRequestCache.Value loadValue() throws IOException {
|
||||
queryPhase.execute(context);
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
context.queryResult().writeToNoId(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
final BytesReference reference = out.bytes();
|
||||
loaded = true;
|
||||
return new IndicesRequestCache.Value(reference, out.ramBytesUsed());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) {
|
||||
requestCache.onCached(key, value);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isOpen() {
|
||||
return indexShard.state() != IndexShardState.CLOSED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCacheIdentity() {
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onHit() {
|
||||
requestCache.onHit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMiss() {
|
||||
requestCache.onMiss();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> notification) {
|
||||
requestCache.onRemoval(notification.getKey(), notification.getValue(), notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.query.terms;
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -123,11 +123,12 @@ public class TermsLookup implements Writeable<TermsLookup>, ToXContent {
|
|||
path = parser.text();
|
||||
break;
|
||||
default:
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support [" + currentFieldName
|
||||
+ "] within lookup element");
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "] within lookup element");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token ["
|
||||
+ token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
return new TermsLookup(index, type, id, path).routing(routing);
|
|
@ -1,443 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.request;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.cache.CacheLoader;
|
||||
import org.elasticsearch.common.cache.RemovalListener;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* The indices request cache allows to cache a shard level request stage responses, helping with improving
|
||||
* similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent
|
||||
* with the semantics of NRT (the index reader version is part of the cache key), and relies on size based
|
||||
* eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that
|
||||
* are no longer used or closed shards.
|
||||
* <p>
|
||||
* Currently, the cache is only enabled for count requests, and can only be opted in on an index
|
||||
* level setting that can be dynamically changed and defaults to false.
|
||||
* <p>
|
||||
* There are still several TODOs left in this class, some easily addressable, some more complex, but the support
|
||||
* is functional.
|
||||
*/
|
||||
public class IndicesRequestCache extends AbstractComponent implements RemovalListener<IndicesRequestCache.Key, IndicesRequestCache.Value>, Closeable {
|
||||
|
||||
/**
|
||||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||
* since we are checking on the cluster state IndexMetaData always.
|
||||
*/
|
||||
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final TimeValue cleanInterval;
|
||||
private final Reaper reaper;
|
||||
|
||||
final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
|
||||
final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
|
||||
|
||||
//TODO make these changes configurable on the cluster level
|
||||
private final ByteSizeValue size;
|
||||
private final TimeValue expire;
|
||||
|
||||
private volatile Cache<Key, Value> cache;
|
||||
|
||||
public IndicesRequestCache(Settings settings, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.cleanInterval = INDICES_CACHE_REQUEST_CLEAN_INTERVAL.get(settings);
|
||||
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
|
||||
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
|
||||
buildCache();
|
||||
this.reaper = new Reaper();
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
|
||||
}
|
||||
|
||||
|
||||
private void buildCache() {
|
||||
long sizeInBytes = size.bytes();
|
||||
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
|
||||
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);
|
||||
|
||||
if (expire != null) {
|
||||
cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis()));
|
||||
}
|
||||
|
||||
cache = cacheBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
reaper.close();
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
public void clear(IndexShard shard) {
|
||||
if (shard == null) {
|
||||
return;
|
||||
}
|
||||
keysToClean.add(new CleanupKey(shard, -1));
|
||||
logger.trace("{} explicit cache clear", shard.shardId());
|
||||
reaper.reap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Value> notification) {
|
||||
notification.getKey().shard.requestCache().onRemoval(notification);
|
||||
}
|
||||
|
||||
/**
|
||||
* Can the shard request be cached at all?
|
||||
*/
|
||||
public boolean canCache(ShardSearchRequest request, SearchContext context) {
|
||||
if (request.template() != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// for now, only enable it for requests with no hits
|
||||
if (context.size() != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We cannot cache with DFS because results depend not only on the content of the index but also
|
||||
// on the overridden statistics. So if you ran two queries on the same index with different stats
|
||||
// (because an other shard was updated) you would get wrong results because of the scores
|
||||
// (think about top_hits aggs or scripts using the score)
|
||||
if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
|
||||
return false;
|
||||
}
|
||||
IndexSettings settings = context.indexShard().getIndexSettings();
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (settings.getValue(INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) {
|
||||
return false;
|
||||
}
|
||||
} else if (request.requestCache() == false) {
|
||||
return false;
|
||||
}
|
||||
// if the reader is not a directory reader, we can't get the version from it
|
||||
if ((context.searcher().getIndexReader() instanceof DirectoryReader) == false) {
|
||||
return false;
|
||||
}
|
||||
// if now in millis is used (or in the future, a more generic "isDeterministic" flag
|
||||
// then we can't cache based on "now" key within the search request, as it is not deterministic
|
||||
if (context.nowInMillisUsed()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
|
||||
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
|
||||
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
|
||||
* the same cache.
|
||||
*/
|
||||
public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception {
|
||||
assert canCache(request, context);
|
||||
Key key = buildKey(request, context);
|
||||
Loader loader = new Loader(queryPhase, context);
|
||||
Value value = cache.computeIfAbsent(key, loader);
|
||||
if (loader.isLoaded()) {
|
||||
key.shard.requestCache().onMiss();
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
|
||||
if (!registeredClosedListeners.containsKey(cleanupKey)) {
|
||||
Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
|
||||
if (previous == null) {
|
||||
ElasticsearchDirectoryReader.addReaderCloseListener(context.searcher().getDirectoryReader(), cleanupKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key.shard.requestCache().onHit();
|
||||
// restore the cached query result into the context
|
||||
final QuerySearchResult result = context.queryResult();
|
||||
result.readFromWithId(context.id(), value.reference.streamInput());
|
||||
result.shardTarget(context.shardTarget());
|
||||
}
|
||||
}
|
||||
|
||||
private static class Loader implements CacheLoader<Key, Value> {
|
||||
|
||||
private final QueryPhase queryPhase;
|
||||
private final SearchContext context;
|
||||
private boolean loaded;
|
||||
|
||||
Loader(QueryPhase queryPhase, SearchContext context) {
|
||||
this.queryPhase = queryPhase;
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
public boolean isLoaded() {
|
||||
return this.loaded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Value load(Key key) throws Exception {
|
||||
queryPhase.execute(context);
|
||||
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
context.queryResult().writeToNoId(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
final BytesReference reference = out.bytes();
|
||||
loaded = true;
|
||||
Value value = new Value(reference, out.ramBytesUsed());
|
||||
key.shard.requestCache().onCached(key, value);
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class Value implements Accountable {
|
||||
final BytesReference reference;
|
||||
final long ramBytesUsed;
|
||||
|
||||
public Value(BytesReference reference, long ramBytesUsed) {
|
||||
this.reference = reference;
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
public static class Key implements Accountable {
|
||||
public final IndexShard shard; // use as identity equality
|
||||
public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
public final BytesReference value;
|
||||
|
||||
Key(IndexShard shard, long readerVersion, BytesReference value) {
|
||||
this.shard = shard;
|
||||
this.readerVersion = readerVersion;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
// TODO: more detailed ram usage?
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
Key key = (Key) o;
|
||||
if (readerVersion != key.readerVersion) return false;
|
||||
if (!shard.equals(key.shard)) return false;
|
||||
if (!value.equals(key.value)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = shard.hashCode();
|
||||
result = 31 * result + Long.hashCode(readerVersion);
|
||||
result = 31 * result + value.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class CleanupKey implements IndexReader.ReaderClosedListener {
|
||||
IndexShard indexShard;
|
||||
long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
|
||||
private CleanupKey(IndexShard indexShard, long readerVersion) {
|
||||
this.indexShard = indexShard;
|
||||
this.readerVersion = readerVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader reader) {
|
||||
Boolean remove = registeredClosedListeners.remove(this);
|
||||
if (remove != null) {
|
||||
keysToClean.add(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
CleanupKey that = (CleanupKey) o;
|
||||
if (readerVersion != that.readerVersion) return false;
|
||||
if (!indexShard.equals(that.indexShard)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = indexShard.hashCode();
|
||||
result = 31 * result + Long.hashCode(readerVersion);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class Reaper implements Runnable {
|
||||
|
||||
private final ObjectSet<CleanupKey> currentKeysToClean = new ObjectHashSet<>();
|
||||
private final ObjectSet<IndexShard> currentFullClean = new ObjectHashSet<>();
|
||||
|
||||
private volatile boolean closed;
|
||||
|
||||
void close() {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
if (keysToClean.isEmpty()) {
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
reap();
|
||||
schedule();
|
||||
}
|
||||
});
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not run ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void schedule() {
|
||||
try {
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this);
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not schedule ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized void reap() {
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext(); ) {
|
||||
CleanupKey cleanupKey = iterator.next();
|
||||
iterator.remove();
|
||||
if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) {
|
||||
// -1 indicates full cleanup, as does a closed shard
|
||||
currentFullClean.add(cleanupKey.indexShard);
|
||||
} else {
|
||||
currentKeysToClean.add(cleanupKey);
|
||||
}
|
||||
}
|
||||
|
||||
if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) {
|
||||
CleanupKey lookupKey = new CleanupKey(null, -1);
|
||||
for (Iterator<Key> iterator = cache.keys().iterator(); iterator.hasNext(); ) {
|
||||
Key key = iterator.next();
|
||||
if (currentFullClean.contains(key.shard)) {
|
||||
iterator.remove();
|
||||
} else {
|
||||
lookupKey.indexShard = key.shard;
|
||||
lookupKey.readerVersion = key.readerVersion;
|
||||
if (currentKeysToClean.contains(lookupKey)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cache.refresh();
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
}
|
||||
}
|
||||
|
||||
private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception {
|
||||
// TODO: for now, this will create different keys for different JSON order
|
||||
// TODO: tricky to get around this, need to parse and order all, which can be expensive
|
||||
return new Key(context.indexShard(),
|
||||
((DirectoryReader) context.searcher().getIndexReader()).getVersion(),
|
||||
request.cacheKey());
|
||||
}
|
||||
}
|
|
@ -63,7 +63,7 @@ import org.elasticsearch.indices.flush.SyncedFlushService;
|
|||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoverySource;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTargetService;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.snapshots.RestoreService;
|
||||
|
@ -83,7 +83,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
private final IndicesService indicesService;
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadPool threadPool;
|
||||
private final RecoveryTarget recoveryTarget;
|
||||
private final RecoveryTargetService recoveryTargetService;
|
||||
private final ShardStateAction shardStateAction;
|
||||
private final NodeIndexDeletedAction nodeIndexDeletedAction;
|
||||
private final NodeMappingRefreshAction nodeMappingRefreshAction;
|
||||
|
@ -105,7 +105,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
@Inject
|
||||
public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService,
|
||||
ThreadPool threadPool, RecoveryTarget recoveryTarget,
|
||||
ThreadPool threadPool, RecoveryTargetService recoveryTargetService,
|
||||
ShardStateAction shardStateAction,
|
||||
NodeIndexDeletedAction nodeIndexDeletedAction,
|
||||
NodeMappingRefreshAction nodeMappingRefreshAction,
|
||||
|
@ -113,11 +113,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
SearchService searchService, SyncedFlushService syncedFlushService,
|
||||
RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) {
|
||||
super(settings);
|
||||
this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTarget, searchService, syncedFlushService);
|
||||
this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTargetService, searchService, syncedFlushService);
|
||||
this.indicesService = indicesService;
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
this.recoveryTarget = recoveryTarget;
|
||||
this.recoveryTargetService = recoveryTargetService;
|
||||
this.shardStateAction = shardStateAction;
|
||||
this.nodeIndexDeletedAction = nodeIndexDeletedAction;
|
||||
this.nodeMappingRefreshAction = nodeMappingRefreshAction;
|
||||
|
@ -466,7 +466,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
} else if (isPeerRecovery(shardRouting)) {
|
||||
final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
|
||||
// check if there is an existing recovery going, and if so, and the source node is not the same, cancel the recovery to restart it
|
||||
if (recoveryTarget.cancelRecoveriesForShard(indexShard.shardId(), "recovery source node changed", status -> !status.sourceNode().equals(sourceNode))) {
|
||||
if (recoveryTargetService.cancelRecoveriesForShard(indexShard.shardId(), "recovery source node changed", status -> !status.sourceNode().equals(sourceNode))) {
|
||||
logger.debug("[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
|
||||
// closing the shard will also cancel any ongoing recovery.
|
||||
indexService.removeShard(shardRouting.id(), "removing shard (recovery source node changed)");
|
||||
|
@ -609,7 +609,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
|
||||
RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode());
|
||||
indexShard.markAsRecovering("from " + sourceNode, recoveryState);
|
||||
recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
|
||||
recoveryTargetService.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
|
||||
} catch (Throwable e) {
|
||||
indexShard.failShard("corrupted preexisting index", e);
|
||||
handleRecoveryFailure(indexService, shardRouting, true, e);
|
||||
|
@ -698,7 +698,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
return !shardRouting.primary() || shardRouting.relocatingNodeId() != null;
|
||||
}
|
||||
|
||||
private class PeerRecoveryListener implements RecoveryTarget.RecoveryListener {
|
||||
private class PeerRecoveryListener implements RecoveryTargetService.RecoveryListener {
|
||||
|
||||
private final ShardRouting shardRouting;
|
||||
private final IndexService indexService;
|
||||
|
|
|
@ -37,13 +37,13 @@ import java.util.function.Predicate;
|
|||
/**
|
||||
* This class holds a collection of all on going recoveries on the current node (i.e., the node is the target node
|
||||
* of those recoveries). The class is used to guarantee concurrent semantics such that once a recoveries was done/cancelled/failed
|
||||
* no other thread will be able to find it. Last, the {@link StatusRef} inner class verifies that recovery temporary files
|
||||
* no other thread will be able to find it. Last, the {@link RecoveryRef} inner class verifies that recovery temporary files
|
||||
* and store will only be cleared once on going usage is finished.
|
||||
*/
|
||||
public class RecoveriesCollection {
|
||||
|
||||
/** This is the single source of truth for ongoing recoveries. If it's not here, it was canceled or done */
|
||||
private final ConcurrentMap<Long, RecoveryStatus> onGoingRecoveries = ConcurrentCollections.newConcurrentMap();
|
||||
private final ConcurrentMap<Long, RecoveryTarget> onGoingRecoveries = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
final private ESLogger logger;
|
||||
final private ThreadPool threadPool;
|
||||
|
@ -59,9 +59,9 @@ public class RecoveriesCollection {
|
|||
* @return the id of the new recovery.
|
||||
*/
|
||||
public long startRecovery(IndexShard indexShard, DiscoveryNode sourceNode,
|
||||
RecoveryTarget.RecoveryListener listener, TimeValue activityTimeout) {
|
||||
RecoveryStatus status = new RecoveryStatus(indexShard, sourceNode, listener);
|
||||
RecoveryStatus existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status);
|
||||
RecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout) {
|
||||
RecoveryTarget status = new RecoveryTarget(indexShard, sourceNode, listener);
|
||||
RecoveryTarget existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status);
|
||||
assert existingStatus == null : "found two RecoveryStatus instances with the same id";
|
||||
logger.trace("{} started recovery from {}, id [{}]", indexShard.shardId(), sourceNode, status.recoveryId());
|
||||
threadPool.schedule(activityTimeout, ThreadPool.Names.GENERIC,
|
||||
|
@ -70,33 +70,33 @@ public class RecoveriesCollection {
|
|||
}
|
||||
|
||||
/**
|
||||
* gets the {@link RecoveryStatus } for a given id. The RecoveryStatus returned has it's ref count already incremented
|
||||
* to make sure it's safe to use. However, you must call {@link RecoveryStatus#decRef()} when you are done with it, typically
|
||||
* gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented
|
||||
* to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically
|
||||
* by using this method in a try-with-resources clause.
|
||||
* <p>
|
||||
* Returns null if recovery is not found
|
||||
*/
|
||||
public StatusRef getStatus(long id) {
|
||||
RecoveryStatus status = onGoingRecoveries.get(id);
|
||||
public RecoveryRef getRecovery(long id) {
|
||||
RecoveryTarget status = onGoingRecoveries.get(id);
|
||||
if (status != null && status.tryIncRef()) {
|
||||
return new StatusRef(status);
|
||||
return new RecoveryRef(status);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Similar to {@link #getStatus(long)} but throws an exception if no recovery is found */
|
||||
public StatusRef getStatusSafe(long id, ShardId shardId) {
|
||||
StatusRef statusRef = getStatus(id);
|
||||
if (statusRef == null) {
|
||||
/** Similar to {@link #getRecovery(long)} but throws an exception if no recovery is found */
|
||||
public RecoveryRef getRecoverySafe(long id, ShardId shardId) {
|
||||
RecoveryRef recoveryRef = getRecovery(id);
|
||||
if (recoveryRef == null) {
|
||||
throw new IndexShardClosedException(shardId);
|
||||
}
|
||||
assert statusRef.status().shardId().equals(shardId);
|
||||
return statusRef;
|
||||
assert recoveryRef.status().shardId().equals(shardId);
|
||||
return recoveryRef;
|
||||
}
|
||||
|
||||
/** cancel the recovery with the given id (if found) and remove it from the recovery collection */
|
||||
public boolean cancelRecovery(long id, String reason) {
|
||||
RecoveryStatus removed = onGoingRecoveries.remove(id);
|
||||
RecoveryTarget removed = onGoingRecoveries.remove(id);
|
||||
boolean cancelled = false;
|
||||
if (removed != null) {
|
||||
logger.trace("{} canceled recovery from {}, id [{}] (reason [{}])",
|
||||
|
@ -115,7 +115,7 @@ public class RecoveriesCollection {
|
|||
* @param sendShardFailure true a shard failed message should be sent to the master
|
||||
*/
|
||||
public void failRecovery(long id, RecoveryFailedException e, boolean sendShardFailure) {
|
||||
RecoveryStatus removed = onGoingRecoveries.remove(id);
|
||||
RecoveryTarget removed = onGoingRecoveries.remove(id);
|
||||
if (removed != null) {
|
||||
logger.trace("{} failing recovery from {}, id [{}]. Send shard failure: [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId(), sendShardFailure);
|
||||
removed.fail(e, sendShardFailure);
|
||||
|
@ -124,7 +124,7 @@ public class RecoveriesCollection {
|
|||
|
||||
/** mark the recovery with the given id as done (if found) */
|
||||
public void markRecoveryAsDone(long id) {
|
||||
RecoveryStatus removed = onGoingRecoveries.remove(id);
|
||||
RecoveryTarget removed = onGoingRecoveries.remove(id);
|
||||
if (removed != null) {
|
||||
logger.trace("{} marking recovery from {} as done, id [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId());
|
||||
removed.markAsDone();
|
||||
|
@ -151,9 +151,9 @@ public class RecoveriesCollection {
|
|||
* already issued outstanding references.
|
||||
* @return true if a recovery was cancelled
|
||||
*/
|
||||
public boolean cancelRecoveriesForShard(ShardId shardId, String reason, Predicate<RecoveryStatus> shouldCancel) {
|
||||
public boolean cancelRecoveriesForShard(ShardId shardId, String reason, Predicate<RecoveryTarget> shouldCancel) {
|
||||
boolean cancelled = false;
|
||||
for (RecoveryStatus status : onGoingRecoveries.values()) {
|
||||
for (RecoveryTarget status : onGoingRecoveries.values()) {
|
||||
if (status.shardId().equals(shardId)) {
|
||||
boolean cancel = false;
|
||||
// if we can't increment the status, the recovery is not there any more.
|
||||
|
@ -174,20 +174,20 @@ public class RecoveriesCollection {
|
|||
|
||||
|
||||
/**
|
||||
* a reference to {@link RecoveryStatus}, which implements {@link AutoCloseable}. closing the reference
|
||||
* causes {@link RecoveryStatus#decRef()} to be called. This makes sure that the underlying resources
|
||||
* will not be freed until {@link RecoveriesCollection.StatusRef#close()} is called.
|
||||
* a reference to {@link RecoveryTarget}, which implements {@link AutoCloseable}. closing the reference
|
||||
* causes {@link RecoveryTarget#decRef()} to be called. This makes sure that the underlying resources
|
||||
* will not be freed until {@link RecoveryRef#close()} is called.
|
||||
*/
|
||||
public static class StatusRef implements AutoCloseable {
|
||||
public static class RecoveryRef implements AutoCloseable {
|
||||
|
||||
private final RecoveryStatus status;
|
||||
private final RecoveryTarget status;
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
|
||||
/**
|
||||
* Important: {@link org.elasticsearch.indices.recovery.RecoveryStatus#tryIncRef()} should
|
||||
* Important: {@link RecoveryTarget#tryIncRef()} should
|
||||
* be *successfully* called on status before
|
||||
*/
|
||||
public StatusRef(RecoveryStatus status) {
|
||||
public RecoveryRef(RecoveryTarget status) {
|
||||
this.status = status;
|
||||
this.status.setLastAccessTime();
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ public class RecoveriesCollection {
|
|||
}
|
||||
}
|
||||
|
||||
public RecoveryStatus status() {
|
||||
public RecoveryTarget status() {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ public class RecoveriesCollection {
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
RecoveryStatus status = onGoingRecoveries.get(recoveryId);
|
||||
RecoveryTarget status = onGoingRecoveries.get(recoveryId);
|
||||
if (status == null) {
|
||||
logger.trace("[monitor] no status found for [{}], shutting down", recoveryId);
|
||||
return;
|
||||
|
|
|
@ -120,10 +120,13 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
|
|||
|
||||
logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
|
||||
final RecoverySourceHandler handler;
|
||||
final RemoteRecoveryTargetHandler recoveryTarget =
|
||||
new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService, request.targetNode(),
|
||||
recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime));
|
||||
if (shard.indexSettings().isOnSharedFilesystem()) {
|
||||
handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
|
||||
handler = new SharedFSRecoverySourceHandler(shard, recoveryTarget, request, logger);
|
||||
} else {
|
||||
handler = new RecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
|
||||
handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), logger);
|
||||
}
|
||||
ongoingRecoveries.add(shard, handler);
|
||||
try {
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.CancellableThreads.Interruptable;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -47,18 +46,13 @@ import org.elasticsearch.index.shard.IndexShardState;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.RemoteTransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
|
@ -82,9 +76,8 @@ public class RecoverySourceHandler {
|
|||
private final int shardId;
|
||||
// Request containing source and target node information
|
||||
private final StartRecoveryRequest request;
|
||||
private final RecoverySettings recoverySettings;
|
||||
private final TransportService transportService;
|
||||
private final int chunkSizeInBytes;
|
||||
private final RecoveryTargetHandler recoveryTarget;
|
||||
|
||||
protected final RecoveryResponse response;
|
||||
|
||||
|
@ -104,16 +97,17 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
};
|
||||
|
||||
public RecoverySourceHandler(final IndexShard shard, final StartRecoveryRequest request, final RecoverySettings recoverySettings,
|
||||
final TransportService transportService, final ESLogger logger) {
|
||||
public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget,
|
||||
final StartRecoveryRequest request,
|
||||
final int fileChunkSizeInBytes,
|
||||
final ESLogger logger) {
|
||||
this.shard = shard;
|
||||
this.recoveryTarget = recoveryTarget;
|
||||
this.request = request;
|
||||
this.recoverySettings = recoverySettings;
|
||||
this.logger = logger;
|
||||
this.transportService = transportService;
|
||||
this.indexName = this.request.shardId().getIndex().getName();
|
||||
this.shardId = this.request.shardId().id();
|
||||
this.chunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt();
|
||||
this.chunkSizeInBytes = fileChunkSizeInBytes;
|
||||
this.response = new RecoveryResponse();
|
||||
}
|
||||
|
||||
|
@ -200,11 +194,14 @@ public class RecoverySourceHandler {
|
|||
final long numDocsTarget = request.metadataSnapshot().getNumDocs();
|
||||
final long numDocsSource = recoverySourceMetadata.getNumDocs();
|
||||
if (numDocsTarget != numDocsSource) {
|
||||
throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + "(" + request.targetNode().getName() + ")");
|
||||
throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " +
|
||||
"of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource
|
||||
+ "(" + request.targetNode().getName() + ")");
|
||||
}
|
||||
// we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
|
||||
// so we don't return here
|
||||
logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName, shardId,
|
||||
logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName,
|
||||
shardId,
|
||||
request.targetNode(), recoverySourceSyncId);
|
||||
} else {
|
||||
final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
|
||||
|
@ -213,7 +210,8 @@ public class RecoverySourceHandler {
|
|||
response.phase1ExistingFileSizes.add(md.length());
|
||||
existingTotalSize += md.length();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}]," +
|
||||
" size [{}]",
|
||||
indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length());
|
||||
}
|
||||
totalSize += md.length();
|
||||
|
@ -223,7 +221,8 @@ public class RecoverySourceHandler {
|
|||
phase1Files.addAll(diff.missing);
|
||||
for (StoreFileMetaData md : phase1Files) {
|
||||
if (request.metadataSnapshot().asMap().containsKey(md.name())) {
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote " +
|
||||
"[{}], local [{}]",
|
||||
indexName, shardId, request.targetNode(), md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
|
||||
} else {
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
|
||||
|
@ -237,20 +236,16 @@ public class RecoverySourceHandler {
|
|||
response.phase1TotalSize = totalSize;
|
||||
response.phase1ExistingTotalSize = existingTotalSize;
|
||||
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with " +
|
||||
"total_size [{}]",
|
||||
indexName, shardId, request.targetNode(), response.phase1FileNames.size(),
|
||||
new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
|
||||
cancellableThreads.execute(() -> {
|
||||
RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(),
|
||||
response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes,
|
||||
translogView.totalOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest,
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
cancellableThreads.execute(() ->
|
||||
recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames,
|
||||
response.phase1ExistingFileSizes, translogView.totalOperations()));
|
||||
// How many bytes we've copied since we last called RateLimiter.pause
|
||||
final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes);
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories =
|
||||
md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
|
||||
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
|
||||
// Send the CLEAN_FILES request, which takes all of the files that
|
||||
// were transferred and renames them from their temporary file
|
||||
|
@ -261,23 +256,19 @@ public class RecoverySourceHandler {
|
|||
// related to this recovery (out of date segments, for example)
|
||||
// are deleted
|
||||
try {
|
||||
cancellableThreads.execute(() -> {
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
|
||||
new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
} catch (RemoteTransportException remoteException) {
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
|
||||
} catch (RemoteTransportException | IOException targetException) {
|
||||
final IOException corruptIndexException;
|
||||
// we realized that after the index was copied and we wanted to finalize the recovery
|
||||
// the index was corrupted:
|
||||
// - maybe due to a broken segments file on an empty index (transferred with no checksum)
|
||||
// - maybe due to old segments without checksums or length only checks
|
||||
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) {
|
||||
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
|
||||
try {
|
||||
final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
|
||||
StoreFileMetaData[] metadata =
|
||||
StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
|
||||
StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new
|
||||
StoreFileMetaData[size]);
|
||||
ArrayUtil.timSort(metadata, (o1, o2) -> {
|
||||
return Long.compare(o1.length(), o2.length()); // check small files first
|
||||
});
|
||||
|
@ -291,17 +282,18 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
remoteException.addSuppressed(ex);
|
||||
throw remoteException;
|
||||
targetException.addSuppressed(ex);
|
||||
throw targetException;
|
||||
}
|
||||
// corruption has happened on the way to replica
|
||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
|
||||
exception.addSuppressed(remoteException);
|
||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
||||
"checksums are ok", null);
|
||||
exception.addSuppressed(targetException);
|
||||
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
|
||||
corruptIndexException, shard.shardId(), request.targetNode());
|
||||
throw exception;
|
||||
} else {
|
||||
throw remoteException;
|
||||
throw targetException;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -318,22 +310,14 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
|
||||
protected void prepareTargetForTranslog(final int totalTranslogOps) {
|
||||
protected void prepareTargetForTranslog(final int totalTranslogOps) throws IOException {
|
||||
StopWatch stopWatch = new StopWatch().start();
|
||||
logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode());
|
||||
final long startEngineStart = stopWatch.totalTime().millis();
|
||||
cancellableThreads.execute(new Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
// Send a request preparing the new shard's translog to receive
|
||||
// operations. This ensures the shard engine is started and disables
|
||||
// garbage collection (not the JVM's GC!) of tombstone deletes
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
|
||||
new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), totalTranslogOps),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
});
|
||||
|
||||
// Send a request preparing the new shard's translog to receive
|
||||
// operations. This ensures the shard engine is started and disables
|
||||
// garbage collection (not the JVM's GC!) of tombstone deletes
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps));
|
||||
stopWatch.stop();
|
||||
|
||||
response.startTime = stopWatch.totalTime().millis() - startEngineStart;
|
||||
|
@ -378,20 +362,7 @@ public class RecoverySourceHandler {
|
|||
logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode());
|
||||
|
||||
|
||||
cancellableThreads.execute(new Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
// Send the FINALIZE request to the target node. The finalize request
|
||||
// clears unreferenced translog files, refreshes the engine now that
|
||||
// new segments are available, and enables garbage collection of
|
||||
// tombstone files. The shard is also moved to the POST_RECOVERY phase
|
||||
// during this time
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE,
|
||||
new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
});
|
||||
cancellableThreads.execute(recoveryTarget::finalizeRecovery);
|
||||
|
||||
if (isPrimaryRelocation()) {
|
||||
/**
|
||||
|
@ -408,7 +379,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
stopWatch.stop();
|
||||
logger.trace("[{}][{}] finalizing recovery to {}: took [{}]",
|
||||
indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
}
|
||||
|
||||
protected boolean isPrimaryRelocation() {
|
||||
|
@ -435,12 +406,6 @@ public class RecoverySourceHandler {
|
|||
throw new ElasticsearchException("failed to get next operation from translog", ex);
|
||||
}
|
||||
|
||||
final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder()
|
||||
.withCompress(true)
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionLongTimeout())
|
||||
.build();
|
||||
|
||||
if (operation == null) {
|
||||
logger.trace("[{}][{}] no translog operations to send to {}",
|
||||
indexName, shardId, request.targetNode());
|
||||
|
@ -464,12 +429,7 @@ public class RecoverySourceHandler {
|
|||
// index docs to replicas while the index files are recovered
|
||||
// the lock can potentially be removed, in which case, it might
|
||||
// make sense to re-enable throttling in this phase
|
||||
cancellableThreads.execute(() -> {
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
request.recoveryId(), request.shardId(), operations, snapshot.totalOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}",
|
||||
indexName, shardId, ops, new ByteSizeValue(size),
|
||||
|
@ -489,12 +449,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
// send the leftover
|
||||
if (!operations.isEmpty()) {
|
||||
cancellableThreads.execute(() -> {
|
||||
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
request.recoveryId(), request.shardId(), operations, snapshot.totalOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
|
||||
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
@ -525,13 +480,11 @@ public class RecoverySourceHandler {
|
|||
|
||||
final class RecoveryOutputStream extends OutputStream {
|
||||
private final StoreFileMetaData md;
|
||||
private final AtomicLong bytesSinceLastPause;
|
||||
private final Translog.View translogView;
|
||||
private long position = 0;
|
||||
|
||||
RecoveryOutputStream(StoreFileMetaData md, AtomicLong bytesSinceLastPause, Translog.View translogView) {
|
||||
RecoveryOutputStream(StoreFileMetaData md, Translog.View translogView) {
|
||||
this.md = md;
|
||||
this.bytesSinceLastPause = bytesSinceLastPause;
|
||||
this.translogView = translogView;
|
||||
}
|
||||
|
||||
|
@ -548,43 +501,10 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException {
|
||||
final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder()
|
||||
.withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so we are safing the cpu for other things
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
cancellableThreads.execute(() -> {
|
||||
// Pause using the rate limiter, if desired, to throttle the recovery
|
||||
final long throttleTimeInNanos;
|
||||
// always fetch the ratelimiter - it might be updated in real-time on the recovery settings
|
||||
final RateLimiter rl = recoverySettings.rateLimiter();
|
||||
if (rl != null) {
|
||||
long bytes = bytesSinceLastPause.addAndGet(content.length());
|
||||
if (bytes > rl.getMinPauseCheckBytes()) {
|
||||
// Time to pause
|
||||
bytesSinceLastPause.addAndGet(-bytes);
|
||||
try {
|
||||
throttleTimeInNanos = rl.pause(bytes);
|
||||
shard.recoveryStats().addThrottleTime(throttleTimeInNanos);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to pause recovery", e);
|
||||
}
|
||||
} else {
|
||||
throttleTimeInNanos = 0;
|
||||
}
|
||||
} else {
|
||||
throttleTimeInNanos = 0;
|
||||
}
|
||||
// Actually send the file chunk to the target node, waiting for it to complete
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK,
|
||||
new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk,
|
||||
translogView.totalOperations(),
|
||||
/* we send totalOperations with every request since we collect stats on the target and that way we can
|
||||
* see how many translog ops we accumulate while copying files across the network. A future optimization
|
||||
* would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
|
||||
*/
|
||||
throttleTimeInNanos), chunkSendOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
// Actually send the file chunk to the target node, waiting for it to complete
|
||||
cancellableThreads.executeIO(() ->
|
||||
recoveryTarget.writeFileChunk(md, position, content, lastChunk, translogView.totalOperations())
|
||||
);
|
||||
if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
|
||||
throw new IndexShardClosedException(request.shardId());
|
||||
}
|
||||
|
@ -594,7 +514,7 @@ public class RecoverySourceHandler {
|
|||
void sendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) throws Throwable {
|
||||
store.incRef();
|
||||
try {
|
||||
ArrayUtil.timSort(files, (a,b) -> Long.compare(a.length(), b.length())); // send smallest first
|
||||
ArrayUtil.timSort(files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
final StoreFileMetaData md = files[i];
|
||||
try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
|
||||
|
@ -609,10 +529,11 @@ public class RecoverySourceHandler {
|
|||
failEngine(corruptIndexException);
|
||||
throw corruptIndexException;
|
||||
} else { // corruption has happened on the way to replica
|
||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
|
||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
||||
"checksums are ok", null);
|
||||
exception.addSuppressed(t);
|
||||
logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK",
|
||||
corruptIndexException, shardId, request.targetNode(), md);
|
||||
corruptIndexException, shardId, request.targetNode(), md);
|
||||
throw exception;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -1,288 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
public class RecoveryStatus extends AbstractRefCounted {
|
||||
|
||||
private final ESLogger logger;
|
||||
|
||||
private final static AtomicLong idGenerator = new AtomicLong();
|
||||
|
||||
private final String RECOVERY_PREFIX = "recovery.";
|
||||
|
||||
private final ShardId shardId;
|
||||
private final long recoveryId;
|
||||
private final IndexShard indexShard;
|
||||
private final DiscoveryNode sourceNode;
|
||||
private final String tempFilePrefix;
|
||||
private final Store store;
|
||||
private final RecoveryTarget.RecoveryListener listener;
|
||||
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
||||
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
||||
private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||
|
||||
// last time this status was accessed
|
||||
private volatile long lastAccessTime = System.nanoTime();
|
||||
|
||||
public RecoveryStatus(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTarget.RecoveryListener listener) {
|
||||
|
||||
super("recovery_status");
|
||||
this.recoveryId = idGenerator.incrementAndGet();
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
this.indexShard = indexShard;
|
||||
this.sourceNode = sourceNode;
|
||||
this.shardId = indexShard.shardId();
|
||||
this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + ".";
|
||||
this.store = indexShard.store();
|
||||
// make sure the store is not released until we are done.
|
||||
store.incRef();
|
||||
indexShard.recoveryStats().incCurrentAsTarget();
|
||||
}
|
||||
|
||||
private final Map<String, String> tempFileNames = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
public long recoveryId() {
|
||||
return recoveryId;
|
||||
}
|
||||
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public IndexShard indexShard() {
|
||||
ensureRefCount();
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
public DiscoveryNode sourceNode() {
|
||||
return this.sourceNode;
|
||||
}
|
||||
|
||||
public RecoveryState state() {
|
||||
return indexShard.recoveryState();
|
||||
}
|
||||
|
||||
public CancellableThreads CancellableThreads() {
|
||||
return cancellableThreads;
|
||||
}
|
||||
|
||||
/** return the last time this RecoveryStatus was used (based on System.nanoTime() */
|
||||
public long lastAccessTime() {
|
||||
return lastAccessTime;
|
||||
}
|
||||
|
||||
/** sets the lasAccessTime flag to now */
|
||||
public void setLastAccessTime() {
|
||||
lastAccessTime = System.nanoTime();
|
||||
}
|
||||
|
||||
public Store store() {
|
||||
ensureRefCount();
|
||||
return store;
|
||||
}
|
||||
|
||||
public RecoveryState.Stage stage() {
|
||||
return state().getStage();
|
||||
}
|
||||
|
||||
public Store.LegacyChecksums legacyChecksums() {
|
||||
return legacyChecksums;
|
||||
}
|
||||
|
||||
/** renames all temporary files to their true name, potentially overriding existing files */
|
||||
public void renameAllTempFiles() throws IOException {
|
||||
ensureRefCount();
|
||||
store.renameTempFilesSafe(tempFileNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel the recovery. calling this method will clean temporary files and release the store
|
||||
* unless this object is in use (in which case it will be cleaned once all ongoing users call
|
||||
* {@link #decRef()}
|
||||
* <p>
|
||||
* if {@link #CancellableThreads()} was used, the threads will be interrupted.
|
||||
*/
|
||||
public void cancel(String reason) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
try {
|
||||
logger.debug("recovery canceled (reason: [{}])", reason);
|
||||
cancellableThreads.cancel(reason);
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fail the recovery and call listener
|
||||
*
|
||||
* @param e exception that encapsulating the failure
|
||||
* @param sendShardFailure indicates whether to notify the master of the shard failure
|
||||
*/
|
||||
public void fail(RecoveryFailedException e, boolean sendShardFailure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
try {
|
||||
listener.onRecoveryFailure(state(), e, sendShardFailure);
|
||||
} finally {
|
||||
try {
|
||||
cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]");
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** mark the current recovery as done */
|
||||
public void markAsDone() {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
assert tempFileNames.isEmpty() : "not all temporary files are renamed";
|
||||
try {
|
||||
// this might still throw an exception ie. if the shard is CLOSED due to some other event.
|
||||
// it's safer to decrement the reference in a try finally here.
|
||||
indexShard.postRecovery("peer recovery done");
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
listener.onRecoveryDone(state());
|
||||
}
|
||||
}
|
||||
|
||||
/** Get a temporary name for the provided file name. */
|
||||
public String getTempNameForFile(String origFile) {
|
||||
return tempFilePrefix + origFile;
|
||||
}
|
||||
|
||||
public IndexOutput getOpenIndexOutput(String key) {
|
||||
ensureRefCount();
|
||||
return openIndexOutputs.get(key);
|
||||
}
|
||||
|
||||
/** remove and {@link org.apache.lucene.store.IndexOutput} for a given file. It is the caller's responsibility to close it */
|
||||
public IndexOutput removeOpenIndexOutputs(String name) {
|
||||
ensureRefCount();
|
||||
return openIndexOutputs.remove(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@link org.apache.lucene.store.IndexOutput} for the given file name. Note that the
|
||||
* IndexOutput actually point at a temporary file.
|
||||
* <p>
|
||||
* Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput
|
||||
* at a later stage
|
||||
*/
|
||||
public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException {
|
||||
ensureRefCount();
|
||||
String tempFileName = getTempNameForFile(fileName);
|
||||
if (tempFileNames.containsKey(tempFileName)) {
|
||||
throw new IllegalStateException("output for file [" + fileName + "] has already been created");
|
||||
}
|
||||
// add first, before it's created
|
||||
tempFileNames.put(tempFileName, fileName);
|
||||
IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT);
|
||||
openIndexOutputs.put(fileName, indexOutput);
|
||||
return indexOutput;
|
||||
}
|
||||
|
||||
public void resetRecovery() throws IOException {
|
||||
cleanOpenFiles();
|
||||
indexShard().performRecoveryRestart();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
try {
|
||||
cleanOpenFiles();
|
||||
} finally {
|
||||
// free store. increment happens in constructor
|
||||
store.decRef();
|
||||
indexShard.recoveryStats().decCurrentAsTarget();
|
||||
}
|
||||
}
|
||||
|
||||
protected void cleanOpenFiles() {
|
||||
// clean open index outputs
|
||||
Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<String, IndexOutput> entry = iterator.next();
|
||||
logger.trace("closing IndexOutput file [{}]", entry.getValue());
|
||||
try {
|
||||
entry.getValue().close();
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while closing recovery output [{}]", t, entry.getValue());
|
||||
}
|
||||
iterator.remove();
|
||||
}
|
||||
// trash temporary files
|
||||
for (String file : tempFileNames.keySet()) {
|
||||
logger.trace("cleaning temporary file [{}]", file);
|
||||
store.deleteQuiet(file);
|
||||
}
|
||||
legacyChecksums.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return shardId + " [" + recoveryId + "]";
|
||||
}
|
||||
|
||||
private void ensureRefCount() {
|
||||
if (refCount() <= 0) {
|
||||
throw new ElasticsearchException("RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -22,505 +22,392 @@ package org.elasticsearch.indices.recovery;
|
|||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
import org.elasticsearch.index.mapper.MapperException;
|
||||
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
/**
|
||||
* The recovery target handles recoveries of peer shards of the shard+node to recover to.
|
||||
* <p>
|
||||
* Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and
|
||||
* not several of them (since we don't allocate several shard replicas to the same node).
|
||||
*
|
||||
*/
|
||||
public class RecoveryTarget extends AbstractComponent implements IndexEventListener {
|
||||
|
||||
public static class Actions {
|
||||
public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo";
|
||||
public static final String FILE_CHUNK = "internal:index/shard/recovery/file_chunk";
|
||||
public static final String CLEAN_FILES = "internal:index/shard/recovery/clean_files";
|
||||
public static final String TRANSLOG_OPS = "internal:index/shard/recovery/translog_ops";
|
||||
public static final String PREPARE_TRANSLOG = "internal:index/shard/recovery/prepare_translog";
|
||||
public static final String FINALIZE = "internal:index/shard/recovery/finalize";
|
||||
|
||||
public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler {
|
||||
|
||||
private final ESLogger logger;
|
||||
|
||||
private final static AtomicLong idGenerator = new AtomicLong();
|
||||
|
||||
private final String RECOVERY_PREFIX = "recovery.";
|
||||
|
||||
private final ShardId shardId;
|
||||
private final long recoveryId;
|
||||
private final IndexShard indexShard;
|
||||
private final DiscoveryNode sourceNode;
|
||||
private final String tempFilePrefix;
|
||||
private final Store store;
|
||||
private final RecoveryTargetService.RecoveryListener listener;
|
||||
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
||||
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
||||
private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||
|
||||
// last time this status was accessed
|
||||
private volatile long lastAccessTime = System.nanoTime();
|
||||
|
||||
private final Map<String, String> tempFileNames = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener) {
|
||||
|
||||
super("recovery_status");
|
||||
this.recoveryId = idGenerator.incrementAndGet();
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
this.indexShard = indexShard;
|
||||
this.sourceNode = sourceNode;
|
||||
this.shardId = indexShard.shardId();
|
||||
this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + ".";
|
||||
this.store = indexShard.store();
|
||||
indexShard.recoveryStats().incCurrentAsTarget();
|
||||
// make sure the store is not released until we are done.
|
||||
store.incRef();
|
||||
}
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final TransportService transportService;
|
||||
|
||||
private final RecoverySettings recoverySettings;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final RecoveriesCollection onGoingRecoveries;
|
||||
|
||||
@Inject
|
||||
public RecoveryTarget(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.transportService = transportService;
|
||||
this.recoverySettings = recoverySettings;
|
||||
this.clusterService = clusterService;
|
||||
this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool);
|
||||
|
||||
transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest::new, ThreadPool.Names.GENERIC, new FilesInfoRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new FileChunkTransportRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new FinalizeRecoveryRequestHandler());
|
||||
public long recoveryId() {
|
||||
return recoveryId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
|
||||
if (indexShard != null) {
|
||||
onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed");
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public IndexShard indexShard() {
|
||||
ensureRefCount();
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
public DiscoveryNode sourceNode() {
|
||||
return this.sourceNode;
|
||||
}
|
||||
|
||||
public RecoveryState state() {
|
||||
return indexShard.recoveryState();
|
||||
}
|
||||
|
||||
public CancellableThreads CancellableThreads() {
|
||||
return cancellableThreads;
|
||||
}
|
||||
|
||||
/** return the last time this RecoveryStatus was used (based on System.nanoTime() */
|
||||
public long lastAccessTime() {
|
||||
return lastAccessTime;
|
||||
}
|
||||
|
||||
/** sets the lasAccessTime flag to now */
|
||||
public void setLastAccessTime() {
|
||||
lastAccessTime = System.nanoTime();
|
||||
}
|
||||
|
||||
public Store store() {
|
||||
ensureRefCount();
|
||||
return store;
|
||||
}
|
||||
|
||||
public RecoveryState.Stage stage() {
|
||||
return state().getStage();
|
||||
}
|
||||
|
||||
public Store.LegacyChecksums legacyChecksums() {
|
||||
return legacyChecksums;
|
||||
}
|
||||
|
||||
/** renames all temporary files to their true name, potentially overriding existing files */
|
||||
public void renameAllTempFiles() throws IOException {
|
||||
ensureRefCount();
|
||||
store.renameTempFilesSafe(tempFileNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel the recovery. calling this method will clean temporary files and release the store
|
||||
* unless this object is in use (in which case it will be cleaned once all ongoing users call
|
||||
* {@link #decRef()}
|
||||
* <p>
|
||||
* if {@link #CancellableThreads()} was used, the threads will be interrupted.
|
||||
*/
|
||||
public void cancel(String reason) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
try {
|
||||
logger.debug("recovery canceled (reason: [{}])", reason);
|
||||
cancellableThreads.cancel(reason);
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel all ongoing recoveries for the given shard, if their status match a predicate
|
||||
* fail the recovery and call listener
|
||||
*
|
||||
* @param reason reason for cancellation
|
||||
* @param shardId shardId for which to cancel recoveries
|
||||
* @param shouldCancel a predicate to check if a recovery should be cancelled or not. Null means cancel without an extra check.
|
||||
* note that the recovery state can change after this check, but before it is being cancelled via other
|
||||
* already issued outstanding references.
|
||||
* @return true if a recovery was cancelled
|
||||
* @param e exception that encapsulating the failure
|
||||
* @param sendShardFailure indicates whether to notify the master of the shard failure
|
||||
*/
|
||||
public boolean cancelRecoveriesForShard(ShardId shardId, String reason, @Nullable Predicate<RecoveryStatus> shouldCancel) {
|
||||
return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason, shouldCancel);
|
||||
}
|
||||
|
||||
public void startRecovery(final IndexShard indexShard, final RecoveryState.Type recoveryType, final DiscoveryNode sourceNode, final RecoveryListener listener) {
|
||||
// create a new recovery status, and process...
|
||||
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
|
||||
threadPool.generic().execute(new RecoveryRunner(recoveryId));
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryStatus.recoveryId(), retryAfter);
|
||||
retryRecovery(recoveryStatus, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryStatus recoveryStatus, final String reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason);
|
||||
retryRecovery(recoveryStatus, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
private void retryRecovery(final RecoveryStatus recoveryStatus, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
try {
|
||||
recoveryStatus.resetRecovery();
|
||||
} catch (Throwable e) {
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(currentRequest, e), true);
|
||||
}
|
||||
threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryStatus.recoveryId()));
|
||||
}
|
||||
|
||||
private void doRecovery(final RecoveryStatus recoveryStatus) {
|
||||
assert recoveryStatus.sourceNode() != null : "can't do a recovery without a source node";
|
||||
|
||||
logger.trace("collecting local files for {}", recoveryStatus);
|
||||
Store.MetadataSnapshot metadataSnapshot = null;
|
||||
try {
|
||||
metadataSnapshot = recoveryStatus.store().getMetadataOrEmpty();
|
||||
} catch (IOException e) {
|
||||
logger.warn("error while listing local files, recover as if there are none", e);
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (Exception e) {
|
||||
// this will be logged as warning later on...
|
||||
logger.trace("unexpected error while listing local files, failing recovery", e);
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(),
|
||||
new RecoveryFailedException(recoveryStatus.state(), "failed to list local files", e), true);
|
||||
return;
|
||||
}
|
||||
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(),
|
||||
metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
|
||||
|
||||
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
|
||||
try {
|
||||
logger.trace("[{}][{}] starting recovery from {}", request.shardId().getIndex().getName(), request.shardId().id(), request.sourceNode());
|
||||
recoveryStatus.indexShard().prepareForIndexRecovery();
|
||||
recoveryStatus.CancellableThreads().execute(new CancellableThreads.Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
responseHolder.set(transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
|
||||
@Override
|
||||
public RecoveryResponse newInstance() {
|
||||
return new RecoveryResponse();
|
||||
}
|
||||
}).txGet());
|
||||
}
|
||||
});
|
||||
final RecoveryResponse recoveryResponse = responseHolder.get();
|
||||
assert responseHolder != null;
|
||||
final TimeValue recoveryTime = new TimeValue(recoveryStatus.state().getTimer().time());
|
||||
// do this through ongoing recoveries to remove it from the collection
|
||||
onGoingRecoveries.markRecoveryAsDone(recoveryStatus.recoveryId());
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
|
||||
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
|
||||
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
|
||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
|
||||
.append("\n");
|
||||
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
|
||||
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
|
||||
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations")
|
||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
|
||||
.append("\n");
|
||||
logger.trace(sb.toString());
|
||||
} else {
|
||||
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryStatus.sourceNode(), recoveryTime);
|
||||
}
|
||||
} catch (CancellableThreads.ExecutionCancelledException e) {
|
||||
logger.trace("recovery cancelled", e);
|
||||
} catch (Throwable e) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id());
|
||||
}
|
||||
Throwable cause = ExceptionsHelper.unwrapCause(e);
|
||||
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
|
||||
// this can also come from the source wrapped in a RemoteTransportException
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
|
||||
return;
|
||||
}
|
||||
if (cause instanceof RecoveryEngineException) {
|
||||
// unwrap an exception that was thrown as part of the recovery
|
||||
cause = cause.getCause();
|
||||
}
|
||||
// do it twice, in case we have double transport exception
|
||||
cause = ExceptionsHelper.unwrapCause(cause);
|
||||
if (cause instanceof RecoveryEngineException) {
|
||||
// unwrap an exception that was thrown as part of the recovery
|
||||
cause = cause.getCause();
|
||||
}
|
||||
|
||||
// here, we would add checks against exception that need to be retried (and not removeAndClean in this case)
|
||||
|
||||
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
|
||||
// if the target is not ready yet, retry
|
||||
retryRecovery(recoveryStatus, "remote shard not ready", recoverySettings.retryDelayStateSync(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof DelayRecoveryException) {
|
||||
retryRecovery(recoveryStatus, cause, recoverySettings.retryDelayStateSync(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof ConnectTransportException) {
|
||||
logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", recoveryStatus.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
|
||||
retryRecovery(recoveryStatus, cause.getMessage(), recoverySettings.retryDelayNetwork(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof IndexShardClosedException) {
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof AlreadyClosedException) {
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
|
||||
return;
|
||||
}
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true);
|
||||
}
|
||||
}
|
||||
|
||||
public interface RecoveryListener {
|
||||
void onRecoveryDone(RecoveryState state);
|
||||
|
||||
void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure);
|
||||
}
|
||||
|
||||
class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler<RecoveryPrepareForTranslogOperationsRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
recoveryStatus.indexShard().skipTranslogRecovery();
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class FinalizeRecoveryRequestHandler implements TransportRequestHandler<RecoveryFinalizeRecoveryRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
recoveryStatus.indexShard().finalizeRecovery();
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class TranslogOperationsRequestHandler implements TransportRequestHandler<RecoveryTranslogOperationsRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
final RecoveryState.Translog translog = recoveryStatus.state().getTranslog();
|
||||
translog.totalOperations(request.totalTranslogOps());
|
||||
assert recoveryStatus.indexShard().recoveryState() == recoveryStatus.state();
|
||||
try {
|
||||
recoveryStatus.indexShard().performBatchRecovery(request.operations());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (TranslogRecoveryPerformer.BatchOperationException exception) {
|
||||
MapperException mapperException = (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class);
|
||||
if (mapperException == null) {
|
||||
throw exception;
|
||||
}
|
||||
// in very rare cases a translog replay from primary is processed before a mapping update on this node
|
||||
// which causes local mapping changes. we want to wait until these mappings are processed.
|
||||
logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception.completedOperations());
|
||||
translog.decrementRecoveredOperations(exception.completedOperations());
|
||||
// we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be
|
||||
// canceled)
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
try {
|
||||
messageReceived(request, channel);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
logger.warn("failed to send error back to recovery source", e1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
onFailure(new ElasticsearchException("cluster service was closed while waiting for mapping updates"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
// note that we do not use a timeout (see comment above)
|
||||
onFailure(new ElasticsearchTimeoutException("timed out waiting for mapping updates (timeout [" + timeout + "])"));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FilesInfoRequestHandler implements TransportRequestHandler<RecoveryFilesInfoRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
final RecoveryState.Index index = recoveryStatus.state().getIndex();
|
||||
for (int i = 0; i < request.phase1ExistingFileNames.size(); i++) {
|
||||
index.addFileDetail(request.phase1ExistingFileNames.get(i), request.phase1ExistingFileSizes.get(i), true);
|
||||
}
|
||||
for (int i = 0; i < request.phase1FileNames.size(); i++) {
|
||||
index.addFileDetail(request.phase1FileNames.get(i), request.phase1FileSizes.get(i), false);
|
||||
}
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps);
|
||||
recoveryStatus.state().getTranslog().totalOperationsOnStart(request.totalTranslogOps);
|
||||
// recoveryBytesCount / recoveryFileCount will be set as we go...
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class CleanFilesRequestHandler implements TransportRequestHandler<RecoveryCleanFilesRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
// first, we go and move files that were created with the recovery id suffix to
|
||||
// the actual names, its ok if we have a corrupted index here, since we have replicas
|
||||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
recoveryStatus.indexShard().deleteShardState(); // we have to delete it first since even if we fail to rename the shard might be invalid
|
||||
recoveryStatus.renameAllTempFiles();
|
||||
final Store store = recoveryStatus.store();
|
||||
// now write checksums
|
||||
recoveryStatus.legacyChecksums().write(store);
|
||||
Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot();
|
||||
try {
|
||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
// this is a fatal exception at this stage.
|
||||
// this means we transferred files from the remote that have not be checksummed and they are
|
||||
// broken. We have to clean up this shard entirely, remove all files and bubble it up to the
|
||||
// source shard since this index might be broken there as well? The Source can handle this and checks
|
||||
// its content on disk if possible.
|
||||
try {
|
||||
try {
|
||||
store.removeCorruptionMarker();
|
||||
} finally {
|
||||
Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.debug("Failed to clean lucene index", e);
|
||||
ex.addSuppressed(e);
|
||||
}
|
||||
RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||
recoveryStatus.fail(rfe, true);
|
||||
throw rfe;
|
||||
} catch (Exception ex) {
|
||||
RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||
recoveryStatus.fail(rfe, true);
|
||||
throw rfe;
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FileChunkTransportRequestHandler implements TransportRequestHandler<RecoveryFileChunkRequest> {
|
||||
|
||||
// How many bytes we've copied since we last called RateLimiter.pause
|
||||
final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
|
||||
@Override
|
||||
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
final Store store = recoveryStatus.store();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
final RecoveryState.Index indexState = recoveryStatus.state().getIndex();
|
||||
if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
|
||||
indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
|
||||
}
|
||||
IndexOutput indexOutput;
|
||||
if (request.position() == 0) {
|
||||
indexOutput = recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
|
||||
} else {
|
||||
indexOutput = recoveryStatus.getOpenIndexOutput(request.name());
|
||||
}
|
||||
BytesReference content = request.content();
|
||||
if (!content.hasArray()) {
|
||||
content = content.toBytesArray();
|
||||
}
|
||||
RateLimiter rl = recoverySettings.rateLimiter();
|
||||
if (rl != null) {
|
||||
long bytes = bytesSinceLastPause.addAndGet(content.length());
|
||||
if (bytes > rl.getMinPauseCheckBytes()) {
|
||||
// Time to pause
|
||||
bytesSinceLastPause.addAndGet(-bytes);
|
||||
long throttleTimeInNanos = rl.pause(bytes);
|
||||
indexState.addTargetThrottling(throttleTimeInNanos);
|
||||
recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
|
||||
}
|
||||
}
|
||||
indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
|
||||
indexState.addRecoveredBytesToFile(request.name(), content.length());
|
||||
if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) {
|
||||
try {
|
||||
Store.verify(indexOutput);
|
||||
} finally {
|
||||
// we are done
|
||||
indexOutput.close();
|
||||
}
|
||||
// write the checksum
|
||||
recoveryStatus.legacyChecksums().add(request.metadata());
|
||||
final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name());
|
||||
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
||||
store.directory().sync(Collections.singleton(temporaryFileName));
|
||||
IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name());
|
||||
assert remove == null || remove == indexOutput; // remove maybe null if we got finished
|
||||
}
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class RecoveryRunner extends AbstractRunnable {
|
||||
|
||||
final long recoveryId;
|
||||
|
||||
RecoveryRunner(long recoveryId) {
|
||||
this.recoveryId = recoveryId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatus(recoveryId)) {
|
||||
if (statusRef != null) {
|
||||
logger.error("unexpected error during recovery [{}], failing shard", t, recoveryId);
|
||||
onGoingRecoveries.failRecovery(recoveryId,
|
||||
new RecoveryFailedException(statusRef.status().state(), "unexpected error", t),
|
||||
true // be safe
|
||||
);
|
||||
} else {
|
||||
logger.debug("unexpected error during recovery, but recovery id [{}] is finished", t, recoveryId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatus(recoveryId);
|
||||
if (statusRef == null) {
|
||||
logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId);
|
||||
return;
|
||||
}
|
||||
public void fail(RecoveryFailedException e, boolean sendShardFailure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
try {
|
||||
doRecovery(statusRef.status());
|
||||
listener.onRecoveryFailure(state(), e, sendShardFailure);
|
||||
} finally {
|
||||
statusRef.close();
|
||||
try {
|
||||
cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]");
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** mark the current recovery as done */
|
||||
public void markAsDone() {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
assert tempFileNames.isEmpty() : "not all temporary files are renamed";
|
||||
try {
|
||||
// this might still throw an exception ie. if the shard is CLOSED due to some other event.
|
||||
// it's safer to decrement the reference in a try finally here.
|
||||
indexShard.postRecovery("peer recovery done");
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
}
|
||||
listener.onRecoveryDone(state());
|
||||
}
|
||||
}
|
||||
|
||||
/** Get a temporary name for the provided file name. */
|
||||
public String getTempNameForFile(String origFile) {
|
||||
return tempFilePrefix + origFile;
|
||||
}
|
||||
|
||||
public IndexOutput getOpenIndexOutput(String key) {
|
||||
ensureRefCount();
|
||||
return openIndexOutputs.get(key);
|
||||
}
|
||||
|
||||
/** remove and {@link org.apache.lucene.store.IndexOutput} for a given file. It is the caller's responsibility to close it */
|
||||
public IndexOutput removeOpenIndexOutputs(String name) {
|
||||
ensureRefCount();
|
||||
return openIndexOutputs.remove(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@link org.apache.lucene.store.IndexOutput} for the given file name. Note that the
|
||||
* IndexOutput actually point at a temporary file.
|
||||
* <p>
|
||||
* Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput
|
||||
* at a later stage
|
||||
*/
|
||||
public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException {
|
||||
ensureRefCount();
|
||||
String tempFileName = getTempNameForFile(fileName);
|
||||
if (tempFileNames.containsKey(tempFileName)) {
|
||||
throw new IllegalStateException("output for file [" + fileName + "] has already been created");
|
||||
}
|
||||
// add first, before it's created
|
||||
tempFileNames.put(tempFileName, fileName);
|
||||
IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT);
|
||||
openIndexOutputs.put(fileName, indexOutput);
|
||||
return indexOutput;
|
||||
}
|
||||
|
||||
public void resetRecovery() throws IOException {
|
||||
cleanOpenFiles();
|
||||
indexShard().performRecoveryRestart();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
try {
|
||||
cleanOpenFiles();
|
||||
} finally {
|
||||
// free store. increment happens in constructor
|
||||
store.decRef();
|
||||
indexShard.recoveryStats().decCurrentAsTarget();
|
||||
}
|
||||
}
|
||||
|
||||
protected void cleanOpenFiles() {
|
||||
// clean open index outputs
|
||||
Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<String, IndexOutput> entry = iterator.next();
|
||||
logger.trace("closing IndexOutput file [{}]", entry.getValue());
|
||||
try {
|
||||
entry.getValue().close();
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while closing recovery output [{}]", t, entry.getValue());
|
||||
}
|
||||
iterator.remove();
|
||||
}
|
||||
// trash temporary files
|
||||
for (String file : tempFileNames.keySet()) {
|
||||
logger.trace("cleaning temporary file [{}]", file);
|
||||
store.deleteQuiet(file);
|
||||
}
|
||||
legacyChecksums.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return shardId + " [" + recoveryId + "]";
|
||||
}
|
||||
|
||||
private void ensureRefCount() {
|
||||
if (refCount() <= 0) {
|
||||
throw new ElasticsearchException("RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef " +
|
||||
"calls");
|
||||
}
|
||||
}
|
||||
|
||||
/*** Implementation of {@link RecoveryTargetHandler } */
|
||||
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
indexShard().skipTranslogRecovery();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finalizeRecovery() {
|
||||
indexShard().finalizeRecovery();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) throws TranslogRecoveryPerformer
|
||||
.BatchOperationException {
|
||||
final RecoveryState.Translog translog = state().getTranslog();
|
||||
translog.totalOperations(totalTranslogOps);
|
||||
assert indexShard().recoveryState() == state();
|
||||
indexShard().performBatchRecovery(operations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void receiveFileInfo(List<String> phase1FileNames,
|
||||
List<Long> phase1FileSizes,
|
||||
List<String> phase1ExistingFileNames,
|
||||
List<Long> phase1ExistingFileSizes,
|
||||
int totalTranslogOps) {
|
||||
final RecoveryState.Index index = state().getIndex();
|
||||
for (int i = 0; i < phase1ExistingFileNames.size(); i++) {
|
||||
index.addFileDetail(phase1ExistingFileNames.get(i), phase1ExistingFileSizes.get(i), true);
|
||||
}
|
||||
for (int i = 0; i < phase1FileNames.size(); i++) {
|
||||
index.addFileDetail(phase1FileNames.get(i), phase1FileSizes.get(i), false);
|
||||
}
|
||||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
state().getTranslog().totalOperationsOnStart(totalTranslogOps);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
|
||||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
// first, we go and move files that were created with the recovery id suffix to
|
||||
// the actual names, its ok if we have a corrupted index here, since we have replicas
|
||||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
indexShard().deleteShardState(); // we have to delete it first since even if we fail to rename the shard
|
||||
// might be invalid
|
||||
renameAllTempFiles();
|
||||
final Store store = store();
|
||||
// now write checksums
|
||||
legacyChecksums().write(store);
|
||||
try {
|
||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
// this is a fatal exception at this stage.
|
||||
// this means we transferred files from the remote that have not be checksummed and they are
|
||||
// broken. We have to clean up this shard entirely, remove all files and bubble it up to the
|
||||
// source shard since this index might be broken there as well? The Source can handle this and checks
|
||||
// its content on disk if possible.
|
||||
try {
|
||||
try {
|
||||
store.removeCorruptionMarker();
|
||||
} finally {
|
||||
Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.debug("Failed to clean lucene index", e);
|
||||
ex.addSuppressed(e);
|
||||
}
|
||||
RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex);
|
||||
fail(rfe, true);
|
||||
throw rfe;
|
||||
} catch (Exception ex) {
|
||||
RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex);
|
||||
fail(rfe, true);
|
||||
throw rfe;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content,
|
||||
boolean lastChunk, int totalTranslogOps) throws IOException {
|
||||
final Store store = store();
|
||||
final String name = fileMetaData.name();
|
||||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
final RecoveryState.Index indexState = state().getIndex();
|
||||
IndexOutput indexOutput;
|
||||
if (position == 0) {
|
||||
indexOutput = openAndPutIndexOutput(name, fileMetaData, store);
|
||||
} else {
|
||||
indexOutput = getOpenIndexOutput(name);
|
||||
}
|
||||
if (content.hasArray() == false) {
|
||||
content = content.toBytesArray();
|
||||
}
|
||||
indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
|
||||
indexState.addRecoveredBytesToFile(name, content.length());
|
||||
if (indexOutput.getFilePointer() >= fileMetaData.length() || lastChunk) {
|
||||
try {
|
||||
Store.verify(indexOutput);
|
||||
} finally {
|
||||
// we are done
|
||||
indexOutput.close();
|
||||
}
|
||||
// write the checksum
|
||||
legacyChecksums().add(fileMetaData);
|
||||
final String temporaryFileName = getTempNameForFile(name);
|
||||
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
||||
store.directory().sync(Collections.singleton(temporaryFileName));
|
||||
IndexOutput remove = removeOpenIndexOutputs(name);
|
||||
assert remove == null || remove == indexOutput; // remove maybe null if we got finished
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
public interface RecoveryTargetHandler {
|
||||
|
||||
/**
|
||||
* Prepares the tranget to receive translog operations, after all file have been copied
|
||||
*
|
||||
* @param totalTranslogOps total translog operations expected to be sent
|
||||
*/
|
||||
void prepareForTranslogOperations(int totalTranslogOps) throws IOException;
|
||||
|
||||
/**
|
||||
* The finalize request clears unreferenced translog files, refreshes the engine now that
|
||||
* new segments are available, and enables garbage collection of
|
||||
* tombstone files. The shard is also moved to the POST_RECOVERY phase during this time
|
||||
**/
|
||||
void finalizeRecovery();
|
||||
|
||||
/**
|
||||
* Index a set of translog operations on the target
|
||||
* @param operations operations to index
|
||||
* @param totalTranslogOps current number of total operations expected to be indexed
|
||||
*/
|
||||
void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps);
|
||||
|
||||
/**
|
||||
* Notifies the target of the files it is going to receive
|
||||
*/
|
||||
void receiveFileInfo(List<String> phase1FileNames,
|
||||
List<Long> phase1FileSizes,
|
||||
List<String> phase1ExistingFileNames,
|
||||
List<Long> phase1ExistingFileSizes,
|
||||
int totalTranslogOps);
|
||||
|
||||
/**
|
||||
* After all source files has been sent over, this command is sent to the target so it can clean any local
|
||||
* files that are not part of the source store
|
||||
* @param totalTranslogOps an update number of translog operations that will be replayed later on
|
||||
* @param sourceMetaData meta data of the source store
|
||||
*/
|
||||
void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException;
|
||||
|
||||
/** writes a partial file chunk to the target store */
|
||||
void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content,
|
||||
boolean lastChunk, int totalTranslogOps) throws IOException;
|
||||
|
||||
}
|
|
@ -0,0 +1,470 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
import org.elasticsearch.index.mapper.MapperException;
|
||||
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
/**
|
||||
* The recovery target handles recoveries of peer shards of the shard+node to recover to.
|
||||
* <p>
|
||||
* Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and
|
||||
* not several of them (since we don't allocate several shard replicas to the same node).
|
||||
*/
|
||||
public class RecoveryTargetService extends AbstractComponent implements IndexEventListener {
|
||||
|
||||
public static class Actions {
|
||||
public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo";
|
||||
public static final String FILE_CHUNK = "internal:index/shard/recovery/file_chunk";
|
||||
public static final String CLEAN_FILES = "internal:index/shard/recovery/clean_files";
|
||||
public static final String TRANSLOG_OPS = "internal:index/shard/recovery/translog_ops";
|
||||
public static final String PREPARE_TRANSLOG = "internal:index/shard/recovery/prepare_translog";
|
||||
public static final String FINALIZE = "internal:index/shard/recovery/finalize";
|
||||
}
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final TransportService transportService;
|
||||
|
||||
private final RecoverySettings recoverySettings;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final RecoveriesCollection onGoingRecoveries;
|
||||
|
||||
@Inject
|
||||
public RecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings
|
||||
recoverySettings,
|
||||
ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.transportService = transportService;
|
||||
this.recoverySettings = recoverySettings;
|
||||
this.clusterService = clusterService;
|
||||
this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool);
|
||||
|
||||
transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest::new, ThreadPool.Names.GENERIC, new
|
||||
FilesInfoRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new
|
||||
FileChunkTransportRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new
|
||||
CleanFilesRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool
|
||||
.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC,
|
||||
new TranslogOperationsRequestHandler());
|
||||
transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new
|
||||
FinalizeRecoveryRequestHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
|
||||
if (indexShard != null) {
|
||||
onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel all ongoing recoveries for the given shard, if their status match a predicate
|
||||
*
|
||||
* @param reason reason for cancellation
|
||||
* @param shardId shardId for which to cancel recoveries
|
||||
* @param shouldCancel a predicate to check if a recovery should be cancelled or not. Null means cancel without an extra check.
|
||||
* note that the recovery state can change after this check, but before it is being cancelled via other
|
||||
* already issued outstanding references.
|
||||
* @return true if a recovery was cancelled
|
||||
*/
|
||||
public boolean cancelRecoveriesForShard(ShardId shardId, String reason, @Nullable Predicate<RecoveryTarget> shouldCancel) {
|
||||
return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason, shouldCancel);
|
||||
}
|
||||
|
||||
public void startRecovery(final IndexShard indexShard, final RecoveryState.Type recoveryType, final DiscoveryNode sourceNode, final
|
||||
RecoveryListener listener) {
|
||||
// create a new recovery status, and process...
|
||||
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
|
||||
threadPool.generic().execute(new RecoveryRunner(recoveryId));
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final
|
||||
StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryTarget.recoveryId(), retryAfter);
|
||||
retryRecovery(recoveryTarget, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryTarget recoveryTarget, final String reason, TimeValue retryAfter, final
|
||||
StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryTarget.recoveryId(), retryAfter, reason);
|
||||
retryRecovery(recoveryTarget, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
private void retryRecovery(final RecoveryTarget recoveryTarget, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
try {
|
||||
recoveryTarget.resetRecovery();
|
||||
} catch (Throwable e) {
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(currentRequest, e), true);
|
||||
}
|
||||
threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryTarget.recoveryId()));
|
||||
}
|
||||
|
||||
private void doRecovery(final RecoveryTarget recoveryTarget) {
|
||||
assert recoveryTarget.sourceNode() != null : "can't do a recovery without a source node";
|
||||
|
||||
logger.trace("collecting local files for {}", recoveryTarget);
|
||||
Store.MetadataSnapshot metadataSnapshot = null;
|
||||
try {
|
||||
metadataSnapshot = recoveryTarget.store().getMetadataOrEmpty();
|
||||
} catch (IOException e) {
|
||||
logger.warn("error while listing local files, recover as if there are none", e);
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (Exception e) {
|
||||
// this will be logged as warning later on...
|
||||
logger.trace("unexpected error while listing local files, failing recovery", e);
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(),
|
||||
new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e), true);
|
||||
return;
|
||||
}
|
||||
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(),
|
||||
clusterService.localNode(),
|
||||
metadataSnapshot, recoveryTarget.state().getType(), recoveryTarget.recoveryId());
|
||||
|
||||
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
|
||||
try {
|
||||
logger.trace("[{}][{}] starting recovery from {}", request.shardId().getIndex().getName(), request.shardId().id(), request
|
||||
.sourceNode());
|
||||
recoveryTarget.indexShard().prepareForIndexRecovery();
|
||||
recoveryTarget.CancellableThreads().execute(() -> responseHolder.set(
|
||||
transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request,
|
||||
new FutureTransportResponseHandler<RecoveryResponse>() {
|
||||
@Override
|
||||
public RecoveryResponse newInstance() {
|
||||
return new RecoveryResponse();
|
||||
}
|
||||
}).txGet()));
|
||||
final RecoveryResponse recoveryResponse = responseHolder.get();
|
||||
assert responseHolder != null;
|
||||
final TimeValue recoveryTime = new TimeValue(recoveryTarget.state().getTimer().time());
|
||||
// do this through ongoing recoveries to remove it from the collection
|
||||
onGoingRecoveries.markRecoveryAsDone(recoveryTarget.recoveryId());
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id())
|
||||
.append("] ");
|
||||
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
|
||||
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with " +
|
||||
"total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
|
||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append
|
||||
(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
|
||||
.append("\n");
|
||||
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with " +
|
||||
"total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
|
||||
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
|
||||
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log " +
|
||||
"operations")
|
||||
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
|
||||
.append("\n");
|
||||
logger.trace(sb.toString());
|
||||
} else {
|
||||
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime);
|
||||
}
|
||||
} catch (CancellableThreads.ExecutionCancelledException e) {
|
||||
logger.trace("recovery cancelled", e);
|
||||
} catch (Throwable e) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id());
|
||||
}
|
||||
Throwable cause = ExceptionsHelper.unwrapCause(e);
|
||||
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
|
||||
// this can also come from the source wrapped in a RemoteTransportException
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source has canceled the" +
|
||||
" recovery", cause), false);
|
||||
return;
|
||||
}
|
||||
if (cause instanceof RecoveryEngineException) {
|
||||
// unwrap an exception that was thrown as part of the recovery
|
||||
cause = cause.getCause();
|
||||
}
|
||||
// do it twice, in case we have double transport exception
|
||||
cause = ExceptionsHelper.unwrapCause(cause);
|
||||
if (cause instanceof RecoveryEngineException) {
|
||||
// unwrap an exception that was thrown as part of the recovery
|
||||
cause = cause.getCause();
|
||||
}
|
||||
|
||||
// here, we would add checks against exception that need to be retried (and not removeAndClean in this case)
|
||||
|
||||
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof
|
||||
ShardNotFoundException) {
|
||||
// if the target is not ready yet, retry
|
||||
retryRecovery(recoveryTarget, "remote shard not ready", recoverySettings.retryDelayStateSync(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof DelayRecoveryException) {
|
||||
retryRecovery(recoveryTarget, cause, recoverySettings.retryDelayStateSync(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof ConnectTransportException) {
|
||||
logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", recoveryTarget.shardId(), recoverySettings
|
||||
.retryDelayNetwork(), cause.getMessage());
|
||||
retryRecovery(recoveryTarget, cause.getMessage(), recoverySettings.retryDelayNetwork(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof IndexShardClosedException) {
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source shard is " +
|
||||
"closed", cause), false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cause instanceof AlreadyClosedException) {
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source shard is " +
|
||||
"closed", cause), false);
|
||||
return;
|
||||
}
|
||||
onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, e), true);
|
||||
}
|
||||
}
|
||||
|
||||
public interface RecoveryListener {
|
||||
void onRecoveryDone(RecoveryState state);
|
||||
|
||||
void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure);
|
||||
}
|
||||
|
||||
class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler<RecoveryPrepareForTranslogOperationsRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps());
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class FinalizeRecoveryRequestHandler implements TransportRequestHandler<RecoveryFinalizeRecoveryRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
recoveryRef.status().finalizeRecovery();
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class TranslogOperationsRequestHandler implements TransportRequestHandler<RecoveryTranslogOperationsRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws IOException {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef =
|
||||
onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
final RecoveryTarget recoveryTarget = recoveryRef.status();
|
||||
try {
|
||||
recoveryTarget.indexTranslogOperations(request.operations(), request.totalTranslogOps());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (TranslogRecoveryPerformer.BatchOperationException exception) {
|
||||
MapperException mapperException = (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class);
|
||||
if (mapperException == null) {
|
||||
throw exception;
|
||||
}
|
||||
// in very rare cases a translog replay from primary is processed before a mapping update on this node
|
||||
// which causes local mapping changes. we want to wait until these mappings are processed.
|
||||
logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception
|
||||
.completedOperations());
|
||||
// we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be
|
||||
// canceled)
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
try {
|
||||
messageReceived(request, channel);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
logger.warn("failed to send error back to recovery source", e1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
onFailure(new ElasticsearchException("cluster service was closed while waiting for mapping updates"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
// note that we do not use a timeout (see comment above)
|
||||
onFailure(new ElasticsearchTimeoutException("timed out waiting for mapping updates (timeout [" + timeout +
|
||||
"])"));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FilesInfoRequestHandler implements TransportRequestHandler<RecoveryFilesInfoRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
recoveryRef.status().receiveFileInfo(request.phase1FileNames, request.phase1FileSizes, request.phase1ExistingFileNames,
|
||||
request.phase1ExistingFileSizes, request.totalTranslogOps);
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class CleanFilesRequestHandler implements TransportRequestHandler<RecoveryCleanFilesRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
recoveryRef.status().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FileChunkTransportRequestHandler implements TransportRequestHandler<RecoveryFileChunkRequest> {
|
||||
|
||||
// How many bytes we've copied since we last called RateLimiter.pause
|
||||
final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
|
||||
@Override
|
||||
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
final RecoveryTarget status = recoveryRef.status();
|
||||
final RecoveryState.Index indexState = status.state().getIndex();
|
||||
if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
|
||||
indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
|
||||
}
|
||||
|
||||
RateLimiter rateLimiter = recoverySettings.rateLimiter();
|
||||
if (rateLimiter != null) {
|
||||
long bytes = bytesSinceLastPause.addAndGet(request.content().length());
|
||||
if (bytes > rateLimiter.getMinPauseCheckBytes()) {
|
||||
// Time to pause
|
||||
bytesSinceLastPause.addAndGet(-bytes);
|
||||
long throttleTimeInNanos = rateLimiter.pause(bytes);
|
||||
indexState.addTargetThrottling(throttleTimeInNanos);
|
||||
status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
|
||||
}
|
||||
}
|
||||
|
||||
status.writeFileChunk(request.metadata(), request.position(), request.content(),
|
||||
request.lastChunk(), request.totalTranslogOps()
|
||||
);
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class RecoveryRunner extends AbstractRunnable {
|
||||
|
||||
final long recoveryId;
|
||||
|
||||
RecoveryRunner(long recoveryId) {
|
||||
this.recoveryId = recoveryId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
|
||||
if (recoveryRef != null) {
|
||||
logger.error("unexpected error during recovery [{}], failing shard", t, recoveryId);
|
||||
onGoingRecoveries.failRecovery(recoveryId,
|
||||
new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", t),
|
||||
true // be safe
|
||||
);
|
||||
} else {
|
||||
logger.debug("unexpected error during recovery, but recovery id [{}] is finished", t, recoveryId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId);
|
||||
if (recoveryRef == null) {
|
||||
logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
doRecovery(recoveryRef.status());
|
||||
} finally {
|
||||
recoveryRef.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
|
||||
private final TransportService transportService;
|
||||
private final long recoveryId;
|
||||
private final ShardId shardId;
|
||||
private final DiscoveryNode targetNode;
|
||||
private final RecoverySettings recoverySettings;
|
||||
|
||||
private final TransportRequestOptions translogOpsRequestOptions;
|
||||
private final TransportRequestOptions fileChunkRequestOptions;
|
||||
|
||||
private final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
|
||||
private final Consumer<Long> onSourceThrottle;
|
||||
|
||||
public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportService transportService, DiscoveryNode targetNode,
|
||||
RecoverySettings recoverySettings, Consumer<Long> onSourceThrottle) {
|
||||
this.transportService = transportService;
|
||||
|
||||
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.targetNode = targetNode;
|
||||
this.recoverySettings = recoverySettings;
|
||||
this.onSourceThrottle = onSourceThrottle;
|
||||
this.translogOpsRequestOptions = TransportRequestOptions.builder()
|
||||
.withCompress(true)
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionLongTimeout())
|
||||
.build();
|
||||
this.fileChunkRequestOptions = TransportRequestOptions.builder()
|
||||
.withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so
|
||||
// we are saving the cpu for other things
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.PREPARE_TRANSLOG,
|
||||
new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finalizeRecovery() {
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FINALIZE,
|
||||
new RecoveryFinalizeRecoveryRequest(recoveryId, shardId),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
recoveryId, shardId, operations, totalTranslogOps);
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
translogOpsRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void receiveFileInfo(List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames,
|
||||
List<Long> phase1ExistingFileSizes, int totalTranslogOps) {
|
||||
|
||||
RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(recoveryId, shardId,
|
||||
phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, totalTranslogOps);
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest,
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.CLEAN_FILES,
|
||||
new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean
|
||||
lastChunk, int totalTranslogOps) throws IOException {
|
||||
// Pause using the rate limiter, if desired, to throttle the recovery
|
||||
final long throttleTimeInNanos;
|
||||
// always fetch the ratelimiter - it might be updated in real-time on the recovery settings
|
||||
final RateLimiter rl = recoverySettings.rateLimiter();
|
||||
if (rl != null) {
|
||||
long bytes = bytesSinceLastPause.addAndGet(content.length());
|
||||
if (bytes > rl.getMinPauseCheckBytes()) {
|
||||
// Time to pause
|
||||
bytesSinceLastPause.addAndGet(-bytes);
|
||||
try {
|
||||
throttleTimeInNanos = rl.pause(bytes);
|
||||
onSourceThrottle.accept(throttleTimeInNanos);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to pause recovery", e);
|
||||
}
|
||||
} else {
|
||||
throttleTimeInNanos = 0;
|
||||
}
|
||||
} else {
|
||||
throttleTimeInNanos = 0;
|
||||
}
|
||||
|
||||
transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILE_CHUNK,
|
||||
new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk,
|
||||
totalTranslogOps,
|
||||
/* we send totalOperations with every request since we collect stats on the target and that way we can
|
||||
* see how many translog ops we accumulate while copying files across the network. A future optimization
|
||||
* would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
|
||||
*/
|
||||
throttleTimeInNanos), fileChunkRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.indices.recovery;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -35,15 +34,16 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
private final IndexShard shard;
|
||||
private final StartRecoveryRequest request;
|
||||
|
||||
public SharedFSRecoverySourceHandler(IndexShard shard, StartRecoveryRequest request, RecoverySettings recoverySettings, TransportService transportService, ESLogger logger) {
|
||||
super(shard, request, recoverySettings, transportService, logger);
|
||||
public SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request, ESLogger
|
||||
logger) {
|
||||
super(shard, recoveryTarget, request, -1, logger);
|
||||
this.shard = shard;
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RecoveryResponse recoverToTarget() {
|
||||
boolean engineClosed = false;
|
||||
public RecoveryResponse recoverToTarget() throws IOException {
|
||||
boolean engineClosed = false;
|
||||
try {
|
||||
logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode());
|
||||
if (isPrimaryRelocation()) {
|
||||
|
@ -83,5 +83,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
shard.shardId(), request.targetNode());
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.elasticsearch.index.search.stats.StatsGroupsParseElement;
|
|||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -136,8 +136,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
|
||||
private final FetchPhase fetchPhase;
|
||||
|
||||
private final IndicesRequestCache indicesQueryCache;
|
||||
|
||||
private final long defaultKeepAlive;
|
||||
|
||||
private volatile TimeValue defaultSearchTimeout;
|
||||
|
@ -169,7 +167,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
this.dfsPhase = dfsPhase;
|
||||
this.queryPhase = queryPhase;
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.indicesQueryCache = indicesService.getIndicesRequestCache();
|
||||
|
||||
TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
|
||||
this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis();
|
||||
|
@ -258,9 +255,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
*/
|
||||
private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final SearchContext context,
|
||||
final QueryPhase queryPhase) throws Exception {
|
||||
final boolean canCache = indicesQueryCache.canCache(request, context);
|
||||
final boolean canCache = indicesService.canCache(request, context);
|
||||
if (canCache) {
|
||||
indicesQueryCache.loadIntoContext(request, context, queryPhase);
|
||||
indicesService.loadIntoContext(request, context, queryPhase);
|
||||
} else {
|
||||
queryPhase.execute(context);
|
||||
}
|
||||
|
|
|
@ -218,13 +218,13 @@ public class AggregatorParsers {
|
|||
parser.getTokenLocation());
|
||||
} else if (aggFactory != null) {
|
||||
assert pipelineAggregatorFactory == null;
|
||||
if (metaData != null) {
|
||||
if (metaData != null) {
|
||||
aggFactory.setMetaData(metaData);
|
||||
}
|
||||
}
|
||||
|
||||
if (subFactories != null) {
|
||||
if (subFactories != null) {
|
||||
aggFactory.subAggregations(subFactories);
|
||||
}
|
||||
}
|
||||
|
||||
factories.addAggregator(aggFactory);
|
||||
} else {
|
||||
|
@ -234,6 +234,9 @@ public class AggregatorParsers {
|
|||
"Aggregation [" + aggregationName + "] cannot define sub-aggregations",
|
||||
parser.getTokenLocation());
|
||||
}
|
||||
if (metaData != null) {
|
||||
pipelineAggregatorFactory.setMetaData(metaData);
|
||||
}
|
||||
factories.addPipelineAggregator(pipelineAggregatorFactory);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid;
|
|||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
|
@ -150,10 +150,10 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
|
|||
if (shardSize < -1) {
|
||||
throw new IllegalArgumentException(
|
||||
"[shardSize] must be greater than or equal to 0. Found [" + shardSize + "] in [" + name + "]");
|
||||
}
|
||||
}
|
||||
this.shardSize = shardSize;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public int shardSize() {
|
||||
return shardSize;
|
||||
|
@ -164,23 +164,23 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
|
|||
ValuesSourceConfig<ValuesSource.GeoPoint> config, AggregatorFactory<?> parent, Builder subFactoriesBuilder)
|
||||
throws IOException {
|
||||
int shardSize = this.shardSize;
|
||||
if (shardSize == 0) {
|
||||
shardSize = Integer.MAX_VALUE;
|
||||
}
|
||||
if (shardSize == 0) {
|
||||
shardSize = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
int requiredSize = this.requiredSize;
|
||||
if (requiredSize == 0) {
|
||||
requiredSize = Integer.MAX_VALUE;
|
||||
}
|
||||
if (requiredSize == 0) {
|
||||
requiredSize = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
if (shardSize < 0) {
|
||||
if (shardSize < 0) {
|
||||
// Use default heuristic to avoid any wrong-ranking caused by distributed counting
|
||||
shardSize = BucketUtils.suggestShardSideQueueSize(requiredSize, context.searchContext().numberOfShards());
|
||||
}
|
||||
}
|
||||
|
||||
if (shardSize < requiredSize) {
|
||||
shardSize = requiredSize;
|
||||
}
|
||||
if (shardSize < requiredSize) {
|
||||
shardSize = requiredSize;
|
||||
}
|
||||
return new GeoHashGridAggregatorFactory(name, type, config, precision, requiredSize, shardSize, context, parent,
|
||||
subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
|
|||
out.writeVInt(precision);
|
||||
out.writeVInt(requiredSize);
|
||||
out.writeVInt(shardSize);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
|
@ -222,7 +222,7 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
|
|||
}
|
||||
if (shardSize != other.shardSize) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -288,4 +288,4 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
|
|||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.geogrid;
|
||||
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.search.aggregations.metrics.geocentroid;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.GeoUtils;
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -100,7 +100,7 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
|
|||
pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts;
|
||||
pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts;
|
||||
}
|
||||
centroids.set(bucket, GeoUtils.mortonHash(pt[0], pt[1]));
|
||||
centroids.set(bucket, GeoEncodingUtils.mortonHash(pt[0], pt[1]));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics.geocentroid;
|
||||
|
||||
import org.apache.lucene.util.GeoUtils;
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -140,7 +140,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
|
|||
out.writeVLong(count);
|
||||
if (centroid != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeLong(GeoUtils.mortonHash(centroid.lon(), centroid.lat()));
|
||||
out.writeLong(GeoEncodingUtils.mortonHash(centroid.lon(), centroid.lat()));
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.support.format;
|
||||
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.search.suggest.completion.context;
|
|||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -44,6 +43,9 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.addNeighbors;
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode;
|
||||
|
||||
/**
|
||||
* A {@link ContextMapping} that uses a geo location/area as a
|
||||
* criteria.
|
||||
|
@ -150,7 +152,7 @@ public class GeoContextMapping extends ContextMapping {
|
|||
if (parser.nextToken() == Token.VALUE_NUMBER) {
|
||||
double lat = parser.doubleValue();
|
||||
if (parser.nextToken() == Token.END_ARRAY) {
|
||||
contexts.add(GeoHashUtils.stringEncode(lon, lat, precision));
|
||||
contexts.add(stringEncode(lon, lat, precision));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("only two values [lon, lat] expected");
|
||||
}
|
||||
|
@ -160,7 +162,7 @@ public class GeoContextMapping extends ContextMapping {
|
|||
} else {
|
||||
while (token != Token.END_ARRAY) {
|
||||
GeoPoint point = GeoUtils.parseGeoPoint(parser);
|
||||
contexts.add(GeoHashUtils.stringEncode(point.getLon(), point.getLat(), precision));
|
||||
contexts.add(stringEncode(point.getLon(), point.getLat(), precision));
|
||||
token = parser.nextToken();
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +173,7 @@ public class GeoContextMapping extends ContextMapping {
|
|||
} else {
|
||||
// or a single location
|
||||
GeoPoint point = GeoUtils.parseGeoPoint(parser);
|
||||
contexts.add(GeoHashUtils.stringEncode(point.getLon(), point.getLat(), precision));
|
||||
contexts.add(stringEncode(point.getLon(), point.getLat(), precision));
|
||||
}
|
||||
return contexts;
|
||||
}
|
||||
|
@ -194,7 +196,7 @@ public class GeoContextMapping extends ContextMapping {
|
|||
// we write doc values fields differently: one field for all values, so we need to only care about indexed fields
|
||||
if (lonField.fieldType().docValuesType() == DocValuesType.NONE) {
|
||||
spare.reset(latField.numericValue().doubleValue(), lonField.numericValue().doubleValue());
|
||||
geohashes.add(GeoHashUtils.stringEncode(spare.getLon(), spare.getLat(), precision));
|
||||
geohashes.add(stringEncode(spare.getLon(), spare.getLat(), precision));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -261,16 +263,16 @@ public class GeoContextMapping extends ContextMapping {
|
|||
}
|
||||
GeoPoint point = queryContext.getGeoPoint();
|
||||
final Collection<String> locations = new HashSet<>();
|
||||
String geoHash = GeoHashUtils.stringEncode(point.getLon(), point.getLat(), minPrecision);
|
||||
String geoHash = stringEncode(point.getLon(), point.getLat(), minPrecision);
|
||||
locations.add(geoHash);
|
||||
if (queryContext.getNeighbours().isEmpty() && geoHash.length() == this.precision) {
|
||||
GeoHashUtils.addNeighbors(geoHash, locations);
|
||||
addNeighbors(geoHash, locations);
|
||||
} else if (queryContext.getNeighbours().isEmpty() == false) {
|
||||
for (Integer neighbourPrecision : queryContext.getNeighbours()) {
|
||||
if (neighbourPrecision < geoHash.length()) {
|
||||
String truncatedGeoHash = geoHash.substring(0, neighbourPrecision);
|
||||
locations.add(truncatedGeoHash);
|
||||
GeoHashUtils.addNeighbors(truncatedGeoHash, locations);
|
||||
addNeighbors(truncatedGeoHash, locations);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" {
|
|||
//// Very special jar permissions:
|
||||
//// These are dangerous permissions that we don't want to grant to everything.
|
||||
|
||||
grant codeBase "${codebase.lucene-core-5.5.0-snapshot-4de5f1d.jar}" {
|
||||
grant codeBase "${codebase.lucene-core-5.5.0-snapshot-850c6c2.jar}" {
|
||||
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
|
||||
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
|
|
|
@ -31,7 +31,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
|
|||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.lucene-test-framework-5.5.0-snapshot-4de5f1d.jar}" {
|
||||
grant codeBase "${codebase.lucene-test-framework-5.5.0-snapshot-850c6c2.jar}" {
|
||||
// needed by RamUsageTester
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
|
|
@ -50,10 +50,12 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
|
|||
.startObject("doc")
|
||||
.startObject("properties")
|
||||
.startObject("foo")
|
||||
.field("type", "string")
|
||||
.field("index", "not_analyzed")
|
||||
.field("type", "keyword")
|
||||
.field("doc_values", true)
|
||||
.field("store", true)
|
||||
.endObject()
|
||||
.startObject("bar")
|
||||
.field("type", "string")
|
||||
.field("term_vector", "with_positions_offsets_payloads")
|
||||
.endObject()
|
||||
.endObject()
|
||||
|
@ -61,7 +63,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
|
|||
.endObject();
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping));
|
||||
ensureGreen("test");
|
||||
client().prepareIndex("test", "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex("test", "doc", "1").setSource("foo", "bar", "bar", "baz").get();
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
||||
IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
|
||||
|
@ -73,7 +75,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
|
|||
assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L));
|
||||
|
||||
// now check multiple segments stats are merged together
|
||||
client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get();
|
||||
client().prepareIndex("test", "doc", "2").setSource("foo", "bar", "bar", "baz").get();
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
||||
rsp = client().admin().indices().prepareStats("test").get();
|
||||
|
|
|
@ -153,7 +153,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
"field1", "type=string,index=no", // no tvs
|
||||
"field2", "type=string,index=no,store=true", // no tvs
|
||||
"field3", "type=string,index=no,term_vector=yes", // no tvs
|
||||
"field4", "type=string,index=not_analyzed", // yes tvs
|
||||
"field4", "type=keyword", // yes tvs
|
||||
"field5", "type=string,index=analyzed")); // yes tvs
|
||||
|
||||
ensureYellow();
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.test.ESTestCase;
|
|||
public class BootstrapSettingsTests extends ESTestCase {
|
||||
|
||||
public void testDefaultSettings() {
|
||||
assertTrue(BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(Settings.EMPTY));
|
||||
assertTrue(BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(Settings.EMPTY));
|
||||
assertFalse(BootstrapSettings.MLOCKALL_SETTING.get(Settings.EMPTY));
|
||||
assertTrue(BootstrapSettings.SECCOMP_SETTING.get(Settings.EMPTY));
|
||||
|
|
|
@ -91,7 +91,7 @@ public class SimpleClusterStateIT extends ESIntegTestCase {
|
|||
.setOrder(0)
|
||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("field1").field("type", "string").field("store", true).endObject()
|
||||
.startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject()
|
||||
.startObject("field2").field("type", "keyword").field("store", true).endObject()
|
||||
.endObject().endObject().endObject())
|
||||
.get();
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ public class AckIT extends ESIntegTestCase {
|
|||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed"));
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword"));
|
||||
|
||||
for (Client client : clients()) {
|
||||
assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
|
||||
|
@ -281,7 +281,7 @@ public class AckIT extends ESIntegTestCase {
|
|||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get();
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword").setTimeout("0s").get();
|
||||
assertThat(putMappingResponse.isAcknowledged(), equalTo(false));
|
||||
}
|
||||
|
||||
|
|
|
@ -18,13 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Tests for {@link org.apache.lucene.util.GeoHashUtils}
|
||||
* Tests for {@link org.apache.lucene.spatial.util.GeoHashUtils}
|
||||
*/
|
||||
public class GeoHashTests extends ESTestCase {
|
||||
public void testGeohashAsLongRoutines() {
|
||||
|
@ -60,4 +58,4 @@ public class GeoHashTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,4 +127,26 @@ public class SettingsModuleTests extends ModuleTestCase {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
public void testRegisterSettingsFilter() {
|
||||
Settings settings = Settings.builder().put("foo.bar", "false").put("bar.foo", false).put("bar.baz", false).build();
|
||||
SettingsModule module = new SettingsModule(settings);
|
||||
module.registerSetting(Setting.boolSetting("foo.bar", true, false, Setting.Scope.CLUSTER));
|
||||
module.registerSetting(Setting.boolSetting("bar.foo", true, false, Setting.Scope.CLUSTER));
|
||||
module.registerSetting(Setting.boolSetting("bar.baz", true, false, Setting.Scope.CLUSTER));
|
||||
|
||||
module.registerSettingsFilter("foo.*");
|
||||
module.registerSettingsFilterIfMissing("bar.foo");
|
||||
try {
|
||||
module.registerSettingsFilter("bar.foo");
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("filter [bar.foo] has already been registered", ex.getMessage());
|
||||
}
|
||||
assertInstanceBinding(module, Settings.class, (s) -> s == settings);
|
||||
assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().size() == 1);
|
||||
assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().containsKey("bar.baz"));
|
||||
assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().get("bar.baz").equals("false"));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
*/
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.elasticsearch.common.util.CancellableThreads.IOInterruptable;
|
||||
import org.elasticsearch.common.util.CancellableThreads.Interruptable;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
public class CancellableThreadsTests extends ESTestCase {
|
||||
|
@ -31,6 +33,13 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static class IOCustomException extends IOException {
|
||||
public IOCustomException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private class TestPlan {
|
||||
public final int id;
|
||||
public final boolean busySpin;
|
||||
|
@ -38,6 +47,8 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
public final boolean exitBeforeCancel;
|
||||
public final boolean exceptAfterCancel;
|
||||
public final boolean presetInterrupt;
|
||||
public final boolean ioOp;
|
||||
private final boolean ioException;
|
||||
|
||||
private TestPlan(int id) {
|
||||
this.id = id;
|
||||
|
@ -46,9 +57,77 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
this.exitBeforeCancel = randomBoolean();
|
||||
this.exceptAfterCancel = randomBoolean();
|
||||
this.presetInterrupt = randomBoolean();
|
||||
this.ioOp = randomBoolean();
|
||||
this.ioException = ioOp && randomBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
static class TestRunnable implements Interruptable {
|
||||
final TestPlan plan;
|
||||
final CountDownLatch readyForCancel;
|
||||
|
||||
TestRunnable(TestPlan plan, CountDownLatch readyForCancel) {
|
||||
this.plan = plan;
|
||||
this.readyForCancel = readyForCancel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted());
|
||||
if (plan.exceptBeforeCancel) {
|
||||
throw new CustomException("thread [" + plan.id + "] pre-cancel exception");
|
||||
} else if (plan.exitBeforeCancel) {
|
||||
return;
|
||||
}
|
||||
readyForCancel.countDown();
|
||||
try {
|
||||
if (plan.busySpin) {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
}
|
||||
} else {
|
||||
Thread.sleep(50000);
|
||||
}
|
||||
} finally {
|
||||
if (plan.exceptAfterCancel) {
|
||||
throw new CustomException("thread [" + plan.id + "] post-cancel exception");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class TestIORunnable implements IOInterruptable {
|
||||
final TestPlan plan;
|
||||
final CountDownLatch readyForCancel;
|
||||
|
||||
TestIORunnable(TestPlan plan, CountDownLatch readyForCancel) {
|
||||
this.plan = plan;
|
||||
this.readyForCancel = readyForCancel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws IOException, InterruptedException {
|
||||
assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted());
|
||||
if (plan.exceptBeforeCancel) {
|
||||
throw new IOCustomException("thread [" + plan.id + "] pre-cancel exception");
|
||||
} else if (plan.exitBeforeCancel) {
|
||||
return;
|
||||
}
|
||||
readyForCancel.countDown();
|
||||
try {
|
||||
if (plan.busySpin) {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
}
|
||||
} else {
|
||||
Thread.sleep(50000);
|
||||
}
|
||||
} finally {
|
||||
if (plan.exceptAfterCancel) {
|
||||
throw new IOCustomException("thread [" + plan.id + "] post-cancel exception");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void testCancellableThreads() throws InterruptedException {
|
||||
Thread[] threads = new Thread[randomIntBetween(3, 10)];
|
||||
|
@ -60,47 +139,28 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
for (int i = 0; i < threads.length; i++) {
|
||||
final TestPlan plan = new TestPlan(i);
|
||||
plans[i] = plan;
|
||||
threads[i] = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
if (plan.presetInterrupt) {
|
||||
Thread.currentThread().interrupt();
|
||||
threads[i] = new Thread(() -> {
|
||||
try {
|
||||
if (plan.presetInterrupt) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (plan.ioOp) {
|
||||
if (plan.ioException) {
|
||||
cancellableThreads.executeIO(new TestIORunnable(plan, readyForCancel));
|
||||
} else {
|
||||
cancellableThreads.executeIO(new TestRunnable(plan, readyForCancel));
|
||||
}
|
||||
cancellableThreads.execute(new Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted());
|
||||
if (plan.exceptBeforeCancel) {
|
||||
throw new CustomException("thread [" + plan.id + "] pre-cancel exception");
|
||||
} else if (plan.exitBeforeCancel) {
|
||||
return;
|
||||
}
|
||||
readyForCancel.countDown();
|
||||
try {
|
||||
if (plan.busySpin) {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
}
|
||||
} else {
|
||||
Thread.sleep(50000);
|
||||
}
|
||||
} finally {
|
||||
if (plan.exceptAfterCancel) {
|
||||
throw new CustomException("thread [" + plan.id + "] post-cancel exception");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
throwables[plan.id] = t;
|
||||
} else {
|
||||
cancellableThreads.execute(new TestRunnable(plan, readyForCancel));
|
||||
}
|
||||
if (plan.exceptBeforeCancel || plan.exitBeforeCancel) {
|
||||
// we have to mark we're ready now (actually done).
|
||||
readyForCancel.countDown();
|
||||
}
|
||||
interrupted[plan.id] = Thread.currentThread().isInterrupted();
|
||||
|
||||
} catch (Throwable t) {
|
||||
throwables[plan.id] = t;
|
||||
}
|
||||
if (plan.exceptBeforeCancel || plan.exitBeforeCancel) {
|
||||
// we have to mark we're ready now (actually done).
|
||||
readyForCancel.countDown();
|
||||
}
|
||||
interrupted[plan.id] = Thread.currentThread().isInterrupted();
|
||||
});
|
||||
threads[i].setDaemon(true);
|
||||
threads[i].start();
|
||||
|
@ -114,8 +174,9 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
}
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
TestPlan plan = plans[i];
|
||||
final Class<?> exceptionClass = plan.ioException ? IOCustomException.class : CustomException.class;
|
||||
if (plan.exceptBeforeCancel) {
|
||||
assertThat(throwables[i], Matchers.instanceOf(CustomException.class));
|
||||
assertThat(throwables[i], Matchers.instanceOf(exceptionClass));
|
||||
} else if (plan.exitBeforeCancel) {
|
||||
assertNull(throwables[i]);
|
||||
} else {
|
||||
|
@ -124,7 +185,7 @@ public class CancellableThreadsTests extends ESTestCase {
|
|||
if (plan.exceptAfterCancel) {
|
||||
assertThat(throwables[i].getSuppressed(),
|
||||
Matchers.arrayContaining(
|
||||
Matchers.instanceOf(CustomException.class)
|
||||
Matchers.instanceOf(exceptionClass)
|
||||
));
|
||||
} else {
|
||||
assertThat(throwables[i].getSuppressed(), Matchers.emptyArray());
|
||||
|
|
|
@ -301,8 +301,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
.setTemplate("te*")
|
||||
.setOrder(0)
|
||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("field1").field("type", "string").field("store", "yes").endObject()
|
||||
.startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
|
||||
.startObject("field1").field("type", "string").field("store", true).endObject()
|
||||
.startObject("field2").field("type", "keyword").field("store", true).endObject()
|
||||
.endObject().endObject().endObject())
|
||||
.execute().actionGet();
|
||||
client.admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet();
|
||||
|
|
|
@ -55,9 +55,8 @@ import org.elasticsearch.index.store.IndexStoreConfig;
|
|||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.ScriptContextRegistry;
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||
import org.elasticsearch.index.shard.ShadowIndexShard;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTargetService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
@ -485,7 +485,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
public void sendRequest(DiscoveryNode node, long requestId, String action,
|
||||
TransportRequest request, TransportRequestOptions options)
|
||||
throws IOException, TransportException {
|
||||
if (keepFailing.get() && action.equals(RecoveryTarget.Actions.TRANSLOG_OPS)) {
|
||||
if (keepFailing.get() && action.equals(RecoveryTargetService.Actions.TRANSLOG_OPS)) {
|
||||
logger.info("--> failing translog ops");
|
||||
throw new ElasticsearchException("failing on purpose");
|
||||
}
|
||||
|
@ -643,7 +643,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get();
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get();
|
||||
|
@ -725,7 +725,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
.build();
|
||||
|
||||
// only one node, so all primaries will end up on node1
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get();
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
// Index some documents
|
||||
|
|
|
@ -73,11 +73,11 @@ import org.elasticsearch.index.mapper.ContentPath;
|
|||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperForType;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
@ -1687,7 +1687,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
private Mapping dynamicUpdate() {
|
||||
BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());
|
||||
final RootObjectMapper root = MapperBuilders.rootObject("some_type").build(context);
|
||||
final RootObjectMapper root = new RootObjectMapper.Builder("some_type").build(context);
|
||||
return new Mapping(Version.CURRENT, root, new MetadataFieldMapper[0], emptyMap());
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,14 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
|||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ByteFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapperLegacy;
|
||||
|
@ -97,19 +104,19 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
|
|||
final MappedFieldType fieldType;
|
||||
final BuilderContext context = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
if (type.getType().equals("string")) {
|
||||
fieldType = MapperBuilders.stringField(fieldName).tokenized(false).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new StringFieldMapper.Builder(fieldName).tokenized(false).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("float")) {
|
||||
fieldType = MapperBuilders.floatField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new FloatFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("double")) {
|
||||
fieldType = MapperBuilders.doubleField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new DoubleFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("long")) {
|
||||
fieldType = MapperBuilders.longField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new LongFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("int")) {
|
||||
fieldType = MapperBuilders.integerField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new IntegerFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("short")) {
|
||||
fieldType = MapperBuilders.shortField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new ShortFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("byte")) {
|
||||
fieldType = MapperBuilders.byteField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new ByteFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else if (type.getType().equals("geo_point")) {
|
||||
if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) {
|
||||
fieldType = new GeoPointFieldMapperLegacy.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
|
@ -119,7 +126,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
|
|||
} else if (type.getType().equals("_parent")) {
|
||||
fieldType = new ParentFieldMapper.Builder("_type").type(fieldName).build(context).fieldType();
|
||||
} else if (type.getType().equals("binary")) {
|
||||
fieldType = MapperBuilders.binaryField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
fieldType = new BinaryFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
|
||||
} else {
|
||||
throw new UnsupportedOperationException(type.getType());
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ package org.elasticsearch.index.fielddata;
|
|||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.GeoPointField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.util.GeoUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
|
||||
|
@ -45,7 +45,27 @@ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImpl
|
|||
if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) {
|
||||
return new StringField(fieldName, point.lat()+","+point.lon(), store);
|
||||
}
|
||||
return new GeoPointField(fieldName, point.lon(), point.lat(), store);
|
||||
final GeoPointField.TermEncoding termEncoding;
|
||||
termEncoding = indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_2_3_0) ?
|
||||
GeoPointField.TermEncoding.PREFIX : GeoPointField.TermEncoding.NUMERIC;
|
||||
return new GeoPointField(fieldName, point.lon(), point.lat(), termEncoding, store);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean hasDocValues() {
|
||||
// prior to 22 docValues were not required
|
||||
if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long minRamBytesUsed() {
|
||||
if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) {
|
||||
return super.minRamBytesUsed();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData
|
|||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ByteFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
|
@ -104,7 +103,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
final IndexService indexService = createIndex("test");
|
||||
final IndexFieldDataService ifdService = indexService.fieldData();
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("s", "thisisastring", Store.NO));
|
||||
|
@ -121,7 +120,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
// write new segment
|
||||
writer.addDocument(doc);
|
||||
final IndexReader reader2 = DirectoryReader.open(writer, true);
|
||||
final MappedFieldType mapper2 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType();
|
||||
final MappedFieldType mapper2 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType();
|
||||
ifd = ifdService.getForField(mapper2);
|
||||
assertThat(ifd, instanceOf(SortedSetDVOrdinalsIndexFieldData.class));
|
||||
reader1.close();
|
||||
|
@ -138,7 +137,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("s", "thisisastring", Store.NO));
|
||||
|
|
|
@ -473,8 +473,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
.field("type", "string")
|
||||
.startObject("fields")
|
||||
.startObject("raw")
|
||||
.field("type", "string")
|
||||
.field("index", "not_analyzed")
|
||||
.field("type", "keyword")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
|
|
|
@ -96,7 +96,7 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
|
|||
.startObject().startObject("template_raw")
|
||||
.field("match", "*_raw")
|
||||
.field("match_mapping_type", "string")
|
||||
.startObject("mapping").field("type", "string").field("index", "not_analyzed").endObject()
|
||||
.startObject("mapping").field("type", "keyword").endObject()
|
||||
.endObject().endObject()
|
||||
|
||||
.startObject().startObject("template_all")
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.core;
|
||||
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.IndexableFieldType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
IndexService indexService;
|
||||
DocumentMapperParser parser;
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
indexService = createIndex("test");
|
||||
parser = indexService.mapperService().documentMapperParser();
|
||||
}
|
||||
|
||||
public void testDefaults() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
|
||||
assertEquals("1234", fields[0].stringValue());
|
||||
IndexableFieldType fieldType = fields[0].fieldType();
|
||||
assertThat(fieldType.omitNorms(), equalTo(true));
|
||||
assertFalse(fieldType.tokenized());
|
||||
assertFalse(fieldType.stored());
|
||||
assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS));
|
||||
assertThat(fieldType.storeTermVectors(), equalTo(false));
|
||||
assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
|
||||
assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
|
||||
assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
|
||||
assertEquals(DocValuesType.NONE, fieldType.docValuesType());
|
||||
|
||||
assertEquals(new BytesRef("1234"), fields[1].binaryValue());
|
||||
fieldType = fields[1].fieldType();
|
||||
assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE));
|
||||
assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType());
|
||||
}
|
||||
|
||||
public void testIgnoreAbove() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("ignore_above", 5).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "elk")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
|
||||
doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "elasticsearch")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(0, fields.length);
|
||||
}
|
||||
|
||||
public void testNullValue() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("null_value", "uri").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(0, fields.length);
|
||||
|
||||
doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.nullField("field")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
assertEquals("uri", fields[0].stringValue());
|
||||
}
|
||||
|
||||
public void testEnableStore() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("store", true).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
assertTrue(fields[0].fieldType().stored());
|
||||
}
|
||||
|
||||
public void testDisableIndex() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("index", false).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
assertEquals(IndexOptions.NONE, fields[0].fieldType().indexOptions());
|
||||
assertEquals(DocValuesType.SORTED_SET, fields[0].fieldType().docValuesType());
|
||||
}
|
||||
|
||||
public void testDisableDocValues() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("doc_values", false).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()
|
||||
.bytes());
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
assertEquals(DocValuesType.NONE, fields[0].fieldType().docValuesType());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.mapper.core;
|
||||
|
||||
import org.elasticsearch.index.mapper.FieldTypeTestCase;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
||||
public class KeywordFieldTypeTests extends FieldTypeTestCase {
|
||||
@Override
|
||||
protected MappedFieldType createDefaultFieldType() {
|
||||
return new KeywordFieldMapper.KeywordFieldType();
|
||||
}
|
||||
}
|
|
@ -48,7 +48,6 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
|
@ -82,7 +81,7 @@ public class ExternalMapper extends FieldMapper {
|
|||
public Builder(String name, String generatedValue, String mapperName) {
|
||||
super(name, new ExternalFieldType(), new ExternalFieldType());
|
||||
this.builder = this;
|
||||
this.stringBuilder = stringField(name).store(false);
|
||||
this.stringBuilder = new StringFieldMapper.Builder(name).store(false);
|
||||
this.generatedValue = generatedValue;
|
||||
this.mapperName = mapperName;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue