Merge branch 'master' into ingest_plugin_api
This commit is contained in:
commit
f1376262fe
|
@ -62,6 +62,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
project.gradle.projectsEvaluated {
|
||||
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
|
||||
systemProperty('tests.config.dir', "${-> node.confDir}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
|
|
|
@ -323,7 +323,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]rounding[/\\]Rounding.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]TimeValue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
|
||||
|
|
|
@ -89,15 +89,14 @@ public final class ExceptionsHelper {
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Don't swallow exceptions, allow them to propagate.
|
||||
*/
|
||||
@Deprecated
|
||||
public static String detailedMessage(Throwable t) {
|
||||
return detailedMessage(t, false, 0);
|
||||
}
|
||||
|
||||
public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
|
||||
if (t == null) {
|
||||
return "Unknown";
|
||||
}
|
||||
int counter = initialCounter + 1;
|
||||
if (t.getCause() != null) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while (t != null) {
|
||||
|
@ -107,21 +106,11 @@ public final class ExceptionsHelper {
|
|||
sb.append(t.getMessage());
|
||||
sb.append("]");
|
||||
}
|
||||
if (!newLines) {
|
||||
sb.append("; ");
|
||||
}
|
||||
sb.append("; ");
|
||||
t = t.getCause();
|
||||
if (t != null) {
|
||||
if (newLines) {
|
||||
sb.append("\n");
|
||||
for (int i = 0; i < counter; i++) {
|
||||
sb.append("\t");
|
||||
}
|
||||
} else {
|
||||
sb.append("nested: ");
|
||||
}
|
||||
sb.append("nested: ");
|
||||
}
|
||||
counter++;
|
||||
}
|
||||
return sb.toString();
|
||||
} else {
|
||||
|
|
|
@ -64,6 +64,12 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
|
|||
import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction;
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
|
||||
|
@ -151,18 +157,12 @@ import org.elasticsearch.action.get.TransportMultiGetAction;
|
|||
import org.elasticsearch.action.get.TransportShardMultiGetAction;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction;
|
||||
import org.elasticsearch.action.ingest.IngestActionFilter;
|
||||
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineAction;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.IngestActionFilter;
|
||||
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
|
||||
import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
|
@ -189,186 +189,204 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction
|
|||
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.NamedRegistry;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
*
|
||||
* Builds and binds the generic action map, all {@link TransportAction}s, and {@link ActionFilters}.
|
||||
*/
|
||||
public class ActionModule extends AbstractModule {
|
||||
|
||||
private final Map<String, ActionEntry> actions = new HashMap<>();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
|
||||
private final boolean transportClient;
|
||||
private final Map<String, ActionHandler<?, ?>> actions;
|
||||
private final List<Class<? extends ActionFilter>> actionFilters;
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final DestructiveOperations destructiveOperations;
|
||||
|
||||
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
|
||||
public final GenericAction<Request, Response> action;
|
||||
public final Class<? extends TransportAction<Request, Response>> transportAction;
|
||||
public final Class[] supportTransportActions;
|
||||
public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
|
||||
ClusterSettings clusterSettings, List<ActionPlugin> actionPlugins) {
|
||||
this.transportClient = transportClient;
|
||||
actions = setupActions(actionPlugins);
|
||||
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
}
|
||||
|
||||
ActionEntry(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
this.action = action;
|
||||
this.transportAction = transportAction;
|
||||
this.supportTransportActions = supportTransportActions;
|
||||
private Map<String, ActionHandler<?, ?>> setupActions(List<ActionPlugin> actionPlugins) {
|
||||
// Subclass NamedRegistry for easy registration
|
||||
class ActionRegistry extends NamedRegistry<ActionHandler<?, ?>> {
|
||||
public ActionRegistry() {
|
||||
super("action");
|
||||
}
|
||||
|
||||
public void register(ActionHandler<?, ?> handler) {
|
||||
register(handler.getAction().name(), handler);
|
||||
}
|
||||
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void register(
|
||||
GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction,
|
||||
Class<?>... supportTransportActions) {
|
||||
register(new ActionHandler<>(action, transportAction, supportTransportActions));
|
||||
}
|
||||
}
|
||||
ActionRegistry actions = new ActionRegistry();
|
||||
|
||||
actions.register(MainAction.INSTANCE, TransportMainAction.class);
|
||||
actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class);
|
||||
actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
|
||||
actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
|
||||
actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
|
||||
actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
|
||||
actions.register(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
|
||||
actions.register(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
|
||||
actions.register(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
|
||||
actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
|
||||
actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
|
||||
actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
|
||||
actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
|
||||
actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
|
||||
actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
|
||||
actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
|
||||
actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class);
|
||||
|
||||
actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
|
||||
actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
|
||||
actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class);
|
||||
actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
|
||||
actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class);
|
||||
actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class);
|
||||
actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
|
||||
actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class);
|
||||
actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
|
||||
actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
|
||||
actions.register(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
|
||||
actions.register(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
|
||||
actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
|
||||
actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class,
|
||||
TransportGetFieldMappingsIndexAction.class);
|
||||
actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
|
||||
actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
|
||||
actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
|
||||
actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
|
||||
actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
|
||||
actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
|
||||
actions.register(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
|
||||
actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
actions.register(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
actions.register(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
actions.register(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
|
||||
actions.register(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
|
||||
actions.register(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
|
||||
actions.register(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
|
||||
|
||||
actions.register(IndexAction.INSTANCE, TransportIndexAction.class);
|
||||
actions.register(GetAction.INSTANCE, TransportGetAction.class);
|
||||
actions.register(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
|
||||
actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
|
||||
TransportShardMultiTermsVectorAction.class);
|
||||
actions.register(DeleteAction.INSTANCE, TransportDeleteAction.class);
|
||||
actions.register(UpdateAction.INSTANCE, TransportUpdateAction.class);
|
||||
actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
|
||||
TransportShardMultiGetAction.class);
|
||||
actions.register(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
actions.register(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
actions.register(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
|
||||
//Indexed scripts
|
||||
actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
|
||||
actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
|
||||
actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
|
||||
|
||||
actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
|
||||
|
||||
actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
actions.register(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
|
||||
actions.register(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
|
||||
|
||||
actionPlugins.stream().flatMap(p -> p.getActions().stream()).forEach(actions::register);
|
||||
|
||||
return unmodifiableMap(actions.getRegistry());
|
||||
}
|
||||
|
||||
private final boolean ingestEnabled;
|
||||
private final boolean proxy;
|
||||
private List<Class<? extends ActionFilter>> setupActionFilters(List<ActionPlugin> actionPlugins, boolean ingestEnabled) {
|
||||
List<Class<? extends ActionFilter>> filters = new ArrayList<>();
|
||||
if (transportClient == false) {
|
||||
if (ingestEnabled) {
|
||||
filters.add(IngestActionFilter.class);
|
||||
} else {
|
||||
filters.add(IngestProxyActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
public ActionModule(boolean ingestEnabled, boolean proxy) {
|
||||
this.ingestEnabled = ingestEnabled;
|
||||
this.proxy = proxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers an action.
|
||||
*
|
||||
* @param action The action type.
|
||||
* @param transportAction The transport action implementing the actual action.
|
||||
* @param supportTransportActions Any support actions that are needed by the transport action.
|
||||
* @param <Request> The request type.
|
||||
* @param <Response> The response type.
|
||||
*/
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
|
||||
}
|
||||
|
||||
public ActionModule registerFilter(Class<? extends ActionFilter> actionFilter) {
|
||||
actionFilters.add(actionFilter);
|
||||
return this;
|
||||
for (ActionPlugin plugin : actionPlugins) {
|
||||
filters.addAll(plugin.getActionFilters());
|
||||
}
|
||||
return unmodifiableList(filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
if (proxy == false) {
|
||||
if (ingestEnabled) {
|
||||
registerFilter(IngestActionFilter.class);
|
||||
} else {
|
||||
registerFilter(IngestProxyActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
Multibinder<ActionFilter> actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
|
||||
for (Class<? extends ActionFilter> actionFilter : actionFilters) {
|
||||
actionFilterMultibinder.addBinding().to(actionFilter);
|
||||
}
|
||||
bind(ActionFilters.class).asEagerSingleton();
|
||||
bind(AutoCreateIndex.class).asEagerSingleton();
|
||||
bind(DestructiveOperations.class).asEagerSingleton();
|
||||
registerAction(MainAction.INSTANCE, TransportMainAction.class);
|
||||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(GetTaskAction.INSTANCE, TransportGetTaskAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
|
||||
registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
|
||||
registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
|
||||
registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
|
||||
registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
|
||||
registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
|
||||
registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
|
||||
registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
|
||||
registerAction(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
|
||||
registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
|
||||
registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
|
||||
registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
|
||||
registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
|
||||
registerAction(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class);
|
||||
|
||||
registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
|
||||
registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
|
||||
registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class);
|
||||
registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
|
||||
registerAction(ShrinkAction.INSTANCE, TransportShrinkAction.class);
|
||||
registerAction(RolloverAction.INSTANCE, TransportRolloverAction.class);
|
||||
registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
|
||||
registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class);
|
||||
registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
|
||||
registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
|
||||
registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
|
||||
registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
|
||||
registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
|
||||
registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, TransportGetFieldMappingsIndexAction.class);
|
||||
registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
|
||||
registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
|
||||
registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
|
||||
registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
|
||||
registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
|
||||
registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
|
||||
registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
|
||||
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
|
||||
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
|
||||
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
|
||||
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
|
||||
|
||||
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
|
||||
registerAction(GetAction.INSTANCE, TransportGetAction.class);
|
||||
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
|
||||
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
|
||||
TransportShardMultiTermsVectorAction.class);
|
||||
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);
|
||||
registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
|
||||
registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
|
||||
TransportShardMultiGetAction.class);
|
||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
|
||||
//Indexed scripts
|
||||
registerAction(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
|
||||
registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
|
||||
registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
|
||||
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
|
||||
|
||||
registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
|
||||
registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
|
||||
bind(DestructiveOperations.class).toInstance(destructiveOperations);
|
||||
|
||||
// register Name -> GenericAction Map that can be injected to instances.
|
||||
@SuppressWarnings("rawtypes")
|
||||
MapBinder<String, GenericAction> actionsBinder
|
||||
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
|
||||
|
||||
for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
|
||||
actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action);
|
||||
for (Map.Entry<String, ActionHandler<?, ?>> entry : actions.entrySet()) {
|
||||
actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().getAction());
|
||||
}
|
||||
// register GenericAction -> transportAction Map that can be injected to instances.
|
||||
// also register any supporting classes
|
||||
if (!proxy) {
|
||||
if (false == transportClient) {
|
||||
bind(AutoCreateIndex.class).toInstance(autoCreateIndex);
|
||||
bind(TransportLivenessAction.class).asEagerSingleton();
|
||||
@SuppressWarnings("rawtypes")
|
||||
MapBinder<GenericAction, TransportAction> transportActionsBinder
|
||||
= MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class);
|
||||
for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
|
||||
for (ActionHandler<?, ?> action : actions.values()) {
|
||||
// bind the action as eager singleton, so the map binder one will reuse it
|
||||
bind(entry.getValue().transportAction).asEagerSingleton();
|
||||
transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton();
|
||||
for (Class supportAction : entry.getValue().supportTransportActions) {
|
||||
bind(action.getTransportAction()).asEagerSingleton();
|
||||
transportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton();
|
||||
for (Class<?> supportAction : action.getSupportTransportActions()) {
|
||||
bind(supportAction).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
|
@ -210,11 +210,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
logger.debug("{}: Failed to execute [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
|
@ -239,7 +235,8 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
logger.trace("{}: Failed to execute [{}] lastShard [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request, lastShard);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
|
@ -251,22 +248,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
logger.debug("{}: Failed to execute [{}] lastShard [{}]", t,
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request,
|
||||
boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
|
|
|
@ -103,7 +103,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -116,7 +116,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
|
|
@ -107,7 +107,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
|
|
@ -47,7 +47,6 @@ public final class AutoCreateIndex {
|
|||
private final IndexNameExpressionResolver resolver;
|
||||
private final AutoCreate autoCreate;
|
||||
|
||||
@Inject
|
||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||
this.resolver = resolver;
|
||||
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -38,7 +37,6 @@ public final class DestructiveOperations extends AbstractComponent {
|
|||
Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope);
|
||||
private volatile boolean destructiveRequiresName;
|
||||
|
||||
@Inject
|
||||
public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings);
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
|
@ -26,8 +28,6 @@ import java.io.InputStreamReader;
|
|||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* A Terminal wraps access to reading input and writing output for a cli.
|
||||
*
|
||||
|
@ -81,8 +81,13 @@ public abstract class Terminal {
|
|||
|
||||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
print(verbosity, msg + lineSeparator);
|
||||
}
|
||||
|
||||
/** Prints message to the terminal at {@code verbosity} level, without a newline. */
|
||||
public final void print(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
getWriter().print(msg + lineSeparator);
|
||||
getWriter().print(msg);
|
||||
getWriter().flush();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
|
@ -125,6 +126,15 @@ public class TransportClient extends AbstractClient {
|
|||
final NetworkService networkService = new NetworkService(settings);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
try {
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>();
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>();
|
||||
additionalSettings.addAll(pluginsService.getPluginSettings());
|
||||
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
|
||||
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
|
||||
additionalSettings.addAll(builder.getRegisteredSettings());
|
||||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.nodeModules()) {
|
||||
|
@ -138,17 +148,10 @@ public class TransportClient extends AbstractClient {
|
|||
// noop
|
||||
}
|
||||
});
|
||||
modules.add(new ActionModule(false, true));
|
||||
modules.add(new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class)));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>();
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>();
|
||||
additionalSettings.addAll(pluginsService.getPluginSettings());
|
||||
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
|
||||
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
|
||||
additionalSettings.addAll(builder.getRegisteredSettings());
|
||||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.routing.DelayedAllocationService;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
|
@ -101,6 +100,7 @@ public class ClusterModule extends AbstractModule {
|
|||
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
|
||||
private final ExtensionPoint.ClassSet<IndexTemplateFilter> indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class);
|
||||
private final ClusterService clusterService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
// pkg private so tests can mock
|
||||
Class<? extends ClusterInfoService> clusterInfoServiceImpl = InternalClusterInfoService.class;
|
||||
|
@ -113,6 +113,7 @@ public class ClusterModule extends AbstractModule {
|
|||
registerShardsAllocator(ClusterModule.BALANCED_ALLOCATOR, BalancedShardsAllocator.class);
|
||||
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
|
||||
this.clusterService = clusterService;
|
||||
indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
|
||||
}
|
||||
|
||||
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
|
||||
|
@ -127,6 +128,10 @@ public class ClusterModule extends AbstractModule {
|
|||
indexTemplateFilters.registerExtension(indexTemplateFilter);
|
||||
}
|
||||
|
||||
public IndexNameExpressionResolver getIndexNameExpressionResolver() {
|
||||
return indexNameExpressionResolver;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
// bind ShardsAllocator
|
||||
|
@ -151,7 +156,7 @@ public class ClusterModule extends AbstractModule {
|
|||
bind(MetaDataIndexAliasesService.class).asEagerSingleton();
|
||||
bind(MetaDataUpdateSettingsService.class).asEagerSingleton();
|
||||
bind(MetaDataIndexTemplateService.class).asEagerSingleton();
|
||||
bind(IndexNameExpressionResolver.class).asEagerSingleton();
|
||||
bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver);
|
||||
bind(RoutingService.class).asEagerSingleton();
|
||||
bind(DelayedAllocationService.class).asEagerSingleton();
|
||||
bind(ShardStateAction.class).asEagerSingleton();
|
||||
|
|
|
@ -91,7 +91,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConne
|
|||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
logger.warn("failed to disconnect to node [{}]", e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
// We need to check that this index can be upgraded to the current version
|
||||
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
||||
try {
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData);
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData, indexMetaData);
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e);
|
||||
}
|
||||
|
|
|
@ -275,13 +275,17 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
|
||||
try {
|
||||
for (Index index : openIndices) {
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, updatedState.getMetaData().getIndexSafe(index));
|
||||
final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index);
|
||||
final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index);
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData);
|
||||
}
|
||||
for (Index index : closeIndices) {
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, updatedState.getMetaData().getIndexSafe(index));
|
||||
final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index);
|
||||
final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index);
|
||||
indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
ExceptionsHelper.convertToElastic(ex);
|
||||
throw ExceptionsHelper.convertToElastic(ex);
|
||||
}
|
||||
return updatedState;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A registry from String to some class implementation. Used to ensure implementations are registered only once.
|
||||
*/
|
||||
public class NamedRegistry<T> {
|
||||
private final Map<String, T> registry = new HashMap<>();
|
||||
private final String targetName;
|
||||
|
||||
public NamedRegistry(String targetName) {
|
||||
this.targetName = targetName;
|
||||
}
|
||||
|
||||
public Map<String, T> getRegistry() {
|
||||
return registry;
|
||||
}
|
||||
|
||||
public void register(String name, T t) {
|
||||
requireNonNull(name, "name is required");
|
||||
requireNonNull(t, targetName + " is required");
|
||||
if (registry.putIfAbsent(name, t) != null) {
|
||||
throw new IllegalArgumentException(targetName + " for name " + name + " already registered");
|
||||
}
|
||||
}
|
||||
|
||||
public <P> void extractAndRegister(List<P> plugins, Function<P, Map<String, T>> lookup) {
|
||||
for (P plugin : plugins) {
|
||||
for (Map.Entry<String, T> entry : lookup.apply(plugin).entrySet()) {
|
||||
register(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -20,18 +20,14 @@
|
|||
package org.elasticsearch.common.bytes;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.io.Channels;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.GatheringByteChannel;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class BytesArray implements BytesReference {
|
||||
public final class BytesArray implements BytesReference {
|
||||
|
||||
public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0);
|
||||
|
||||
|
@ -103,11 +99,6 @@ public class BytesArray implements BytesReference {
|
|||
os.write(bytes, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(GatheringByteChannel channel) throws IOException {
|
||||
Channels.writeToChannel(bytes, offset, length(), channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toBytes() {
|
||||
if (offset == 0 && bytes.length == length) {
|
||||
|
@ -126,11 +117,6 @@ public class BytesArray implements BytesReference {
|
|||
return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChannelBuffer toChannelBuffer() {
|
||||
return ChannelBuffers.wrappedBuffer(bytes, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasArray() {
|
||||
return true;
|
||||
|
|
|
@ -19,19 +19,18 @@
|
|||
package org.elasticsearch.common.bytes;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.GatheringByteChannel;
|
||||
|
||||
/**
|
||||
* A reference to bytes.
|
||||
*/
|
||||
public interface BytesReference {
|
||||
|
||||
public static class Helper {
|
||||
class Helper {
|
||||
|
||||
public static boolean bytesEqual(BytesReference a, BytesReference b) {
|
||||
if (a == b) {
|
||||
|
@ -108,10 +107,6 @@ public interface BytesReference {
|
|||
*/
|
||||
void writeTo(OutputStream os) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the bytes directly to the channel.
|
||||
*/
|
||||
void writeTo(GatheringByteChannel channel) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the bytes as a single byte array.
|
||||
|
@ -128,11 +123,6 @@ public interface BytesReference {
|
|||
*/
|
||||
BytesArray copyBytesArray();
|
||||
|
||||
/**
|
||||
* Returns the bytes as a channel buffer.
|
||||
*/
|
||||
ChannelBuffer toChannelBuffer();
|
||||
|
||||
/**
|
||||
* Is there an underlying byte array for this bytes reference.
|
||||
*/
|
||||
|
@ -162,4 +152,22 @@ public interface BytesReference {
|
|||
* Converts to a copied Lucene BytesRef.
|
||||
*/
|
||||
BytesRef copyBytesRef();
|
||||
|
||||
/**
|
||||
* Returns a BytesRefIterator for this BytesReference. This method allows
|
||||
* access to the internal pages of this reference without copying them. Use with care!
|
||||
* @see BytesRefIterator
|
||||
*/
|
||||
default BytesRefIterator iterator() {
|
||||
return new BytesRefIterator() {
|
||||
BytesRef ref = length() == 0 ? null : toBytesRef();
|
||||
@Override
|
||||
public BytesRef next() throws IOException {
|
||||
BytesRef r = ref;
|
||||
ref = null; // only return it once...
|
||||
return r;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,19 +20,15 @@
|
|||
package org.elasticsearch.common.bytes;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.common.io.Channels;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.ByteArray;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.GatheringByteChannel;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
|
@ -113,30 +109,6 @@ public class PagedBytesReference implements BytesReference {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(GatheringByteChannel channel) throws IOException {
|
||||
// nothing to do
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int currentLength = length;
|
||||
int currentOffset = offset;
|
||||
BytesRef ref = new BytesRef();
|
||||
|
||||
while (currentLength > 0) {
|
||||
// try to align to the underlying pages while writing, so no new arrays will be created.
|
||||
int fragmentSize = Math.min(currentLength, PAGE_SIZE - (currentOffset % PAGE_SIZE));
|
||||
boolean newArray = bytearray.get(currentOffset, fragmentSize, ref);
|
||||
assert !newArray : "PagedBytesReference failed to align with underlying bytearray. offset [" + currentOffset + "], size [" + fragmentSize + "]";
|
||||
Channels.writeToChannel(ref.bytes, ref.offset, ref.length, channel);
|
||||
currentLength -= ref.length;
|
||||
currentOffset += ref.length;
|
||||
}
|
||||
|
||||
assert currentLength == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toBytes() {
|
||||
if (length == 0) {
|
||||
|
@ -178,60 +150,6 @@ public class PagedBytesReference implements BytesReference {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChannelBuffer toChannelBuffer() {
|
||||
// nothing to do
|
||||
if (length == 0) {
|
||||
return ChannelBuffers.EMPTY_BUFFER;
|
||||
}
|
||||
|
||||
ChannelBuffer[] buffers;
|
||||
ChannelBuffer currentBuffer = null;
|
||||
BytesRef ref = new BytesRef();
|
||||
int pos = 0;
|
||||
|
||||
// are we a slice?
|
||||
if (offset != 0) {
|
||||
// remaining size of page fragment at offset
|
||||
int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE));
|
||||
bytearray.get(offset, fragmentSize, ref);
|
||||
currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, fragmentSize);
|
||||
pos += fragmentSize;
|
||||
}
|
||||
|
||||
// no need to create a composite buffer for a single page
|
||||
if (pos == length && currentBuffer != null) {
|
||||
return currentBuffer;
|
||||
}
|
||||
|
||||
// a slice > pagesize will likely require extra buffers for initial/trailing fragments
|
||||
int numBuffers = countRequiredBuffers((currentBuffer != null ? 1 : 0), length - pos);
|
||||
|
||||
buffers = new ChannelBuffer[numBuffers];
|
||||
int bufferSlot = 0;
|
||||
|
||||
if (currentBuffer != null) {
|
||||
buffers[bufferSlot] = currentBuffer;
|
||||
bufferSlot++;
|
||||
}
|
||||
|
||||
// handle remainder of pages + trailing fragment
|
||||
while (pos < length) {
|
||||
int remaining = length - pos;
|
||||
int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining;
|
||||
bytearray.get(offset + pos, bulkSize, ref);
|
||||
currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, bulkSize);
|
||||
buffers[bufferSlot] = currentBuffer;
|
||||
bufferSlot++;
|
||||
pos += bulkSize;
|
||||
}
|
||||
|
||||
// this would indicate that our numBuffer calculation is off by one.
|
||||
assert (numBuffers == bufferSlot);
|
||||
|
||||
return ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, buffers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasArray() {
|
||||
return (offset + length <= PAGE_SIZE);
|
||||
|
@ -338,17 +256,6 @@ public class PagedBytesReference implements BytesReference {
|
|||
return true;
|
||||
}
|
||||
|
||||
private int countRequiredBuffers(int initialCount, int numBytes) {
|
||||
int numBuffers = initialCount;
|
||||
// an "estimate" of how many pages remain - rounded down
|
||||
int pages = numBytes / PAGE_SIZE;
|
||||
// a remaining fragment < pagesize needs at least one buffer
|
||||
numBuffers += (pages == 0) ? 1 : pages;
|
||||
// a remainder that is not a multiple of pagesize also needs an extra buffer
|
||||
numBuffers += (pages > 0 && numBytes % PAGE_SIZE > 0) ? 1 : 0;
|
||||
return numBuffers;
|
||||
}
|
||||
|
||||
private static class PagedBytesReferenceStreamInput extends StreamInput {
|
||||
|
||||
private final ByteArray bytearray;
|
||||
|
@ -390,7 +297,7 @@ public class PagedBytesReference implements BytesReference {
|
|||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return (pos < length) ? bytearray.get(offset + pos++) : -1;
|
||||
return (pos < length) ? Byte.toUnsignedInt(bytearray.get(offset + pos++)) : -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -451,4 +358,36 @@ public class PagedBytesReference implements BytesReference {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public final BytesRefIterator iterator() {
|
||||
final int offset = this.offset;
|
||||
final int length = this.length;
|
||||
// this iteration is page aligned to ensure we do NOT materialize the pages from the ByteArray
|
||||
// we calculate the initial fragment size here to ensure that if this reference is a slice we are still page aligned
|
||||
// across the entire iteration. The first page is smaller if our offset != 0 then we start in the middle of the page
|
||||
// otherwise we iterate full pages until we reach the last chunk which also might end within a page.
|
||||
final int initialFragmentSize = offset != 0 ? PAGE_SIZE - (offset % PAGE_SIZE) : PAGE_SIZE;
|
||||
return new BytesRefIterator() {
|
||||
int position = 0;
|
||||
int nextFragmentSize = Math.min(length, initialFragmentSize);
|
||||
// this BytesRef is reused across the iteration on purpose - BytesRefIterator interface was designed for this
|
||||
final BytesRef slice = new BytesRef();
|
||||
|
||||
@Override
|
||||
public BytesRef next() throws IOException {
|
||||
if (nextFragmentSize != 0) {
|
||||
final boolean materialized = bytearray.get(offset + position, nextFragmentSize, slice);
|
||||
assert materialized == false : "iteration should be page aligned but array got materialized";
|
||||
position += nextFragmentSize;
|
||||
final int remaining = length - position;
|
||||
nextFragmentSize = Math.min(remaining, PAGE_SIZE);
|
||||
return slice;
|
||||
} else {
|
||||
assert nextFragmentSize == 0 : "fragmentSize expected [0] but was: [" + nextFragmentSize + "]";
|
||||
return null; // we are done with this iteration
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.elasticsearch.common.util.ByteArray;
|
|||
* An extension to {@link PagedBytesReference} that requires releasing its content. This
|
||||
* class exists to make it explicit when a bytes reference needs to be released, and when not.
|
||||
*/
|
||||
public class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
|
||||
public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
|
||||
|
||||
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) {
|
||||
super(bigarrays, bytearray, length);
|
||||
|
|
|
@ -1,215 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.LongArray;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Used only for backward comp. to read old compressed files, since we now use codec based compression
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class CompressedIndexInput extends IndexInput {
|
||||
|
||||
private IndexInput in;
|
||||
|
||||
private int version;
|
||||
private long totalUncompressedLength;
|
||||
private LongArray offsets;
|
||||
|
||||
private boolean closed;
|
||||
|
||||
protected byte[] uncompressed;
|
||||
protected int uncompressedLength;
|
||||
private int position = 0;
|
||||
private int valid = 0;
|
||||
private int currentOffsetIdx;
|
||||
private long currentUncompressedChunkPointer;
|
||||
|
||||
public CompressedIndexInput(IndexInput in) throws IOException {
|
||||
super("compressed(" + in.toString() + ")");
|
||||
this.in = in;
|
||||
readHeader(in);
|
||||
this.version = in.readInt();
|
||||
long metaDataPosition = in.readLong();
|
||||
long headerLength = in.getFilePointer();
|
||||
in.seek(metaDataPosition);
|
||||
this.totalUncompressedLength = in.readVLong();
|
||||
int size = in.readVInt();
|
||||
offsets = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
offsets.set(i, in.readVLong());
|
||||
}
|
||||
this.currentOffsetIdx = -1;
|
||||
this.currentUncompressedChunkPointer = 0;
|
||||
in.seek(headerLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* Method is overridden to report number of bytes that can now be read
|
||||
* from decoded data buffer, without reading bytes from the underlying
|
||||
* stream.
|
||||
* Never throws an exception; returns number of bytes available without
|
||||
* further reads from underlying source; -1 if stream has been closed, or
|
||||
* 0 if an actual read (and possible blocking) is needed to find out.
|
||||
*/
|
||||
public int available() throws IOException {
|
||||
// if closed, return -1;
|
||||
if (closed) {
|
||||
return -1;
|
||||
}
|
||||
int left = (valid - position);
|
||||
return (left <= 0) ? 0 : left;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte readByte() throws IOException {
|
||||
if (!readyBuffer()) {
|
||||
throw new EOFException();
|
||||
}
|
||||
return uncompressed[position++];
|
||||
}
|
||||
|
||||
public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException {
|
||||
if (length < 1) {
|
||||
return 0;
|
||||
}
|
||||
if (!readyBuffer()) {
|
||||
return -1;
|
||||
}
|
||||
// First let's read however much data we happen to have...
|
||||
int chunkLength = Math.min(valid - position, length);
|
||||
System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
|
||||
position += chunkLength;
|
||||
|
||||
if (chunkLength == length || !fullRead) {
|
||||
return chunkLength;
|
||||
}
|
||||
// Need more data, then
|
||||
int totalRead = chunkLength;
|
||||
do {
|
||||
offset += chunkLength;
|
||||
if (!readyBuffer()) {
|
||||
break;
|
||||
}
|
||||
chunkLength = Math.min(valid - position, (length - totalRead));
|
||||
System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
|
||||
position += chunkLength;
|
||||
totalRead += chunkLength;
|
||||
} while (totalRead < length);
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readBytes(byte[] b, int offset, int len) throws IOException {
|
||||
int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
|
||||
if (result < len) {
|
||||
throw new EOFException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilePointer() {
|
||||
return currentUncompressedChunkPointer + position;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seek(long pos) throws IOException {
|
||||
int idx = (int) (pos / uncompressedLength);
|
||||
if (idx >= offsets.size()) {
|
||||
// set the next "readyBuffer" to EOF
|
||||
currentOffsetIdx = idx;
|
||||
position = 0;
|
||||
valid = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: optimize so we won't have to readyBuffer on seek, can keep the position around, and set it on readyBuffer in this case
|
||||
if (idx != currentOffsetIdx) {
|
||||
long pointer = offsets.get(idx);
|
||||
in.seek(pointer);
|
||||
position = 0;
|
||||
valid = 0;
|
||||
currentOffsetIdx = idx - 1; // we are going to increase it in readyBuffer...
|
||||
readyBuffer();
|
||||
}
|
||||
position = (int) (pos % uncompressedLength);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long length() {
|
||||
return totalUncompressedLength;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
position = valid = 0;
|
||||
if (!closed) {
|
||||
closed = true;
|
||||
doClose();
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void doClose() throws IOException;
|
||||
|
||||
protected boolean readyBuffer() throws IOException {
|
||||
if (position < valid) {
|
||||
return true;
|
||||
}
|
||||
if (closed) {
|
||||
return false;
|
||||
}
|
||||
// we reached the end...
|
||||
if (currentOffsetIdx + 1 >= offsets.size()) {
|
||||
return false;
|
||||
}
|
||||
valid = uncompress(in, uncompressed);
|
||||
if (valid < 0) {
|
||||
return false;
|
||||
}
|
||||
currentOffsetIdx++;
|
||||
currentUncompressedChunkPointer = ((long) currentOffsetIdx) * uncompressedLength;
|
||||
position = 0;
|
||||
return (position < valid);
|
||||
}
|
||||
|
||||
protected abstract void readHeader(IndexInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Uncompress the data into the out array, returning the size uncompressed
|
||||
*/
|
||||
protected abstract int uncompress(IndexInput in, byte[] out) throws IOException;
|
||||
|
||||
@Override
|
||||
public IndexInput clone() {
|
||||
// we clone and we need to make sure we keep the same positions!
|
||||
CompressedIndexInput cloned = (CompressedIndexInput) super.clone();
|
||||
cloned.uncompressed = new byte[uncompressedLength];
|
||||
System.arraycopy(uncompressed, 0, cloned.uncompressed, 0, uncompressedLength);
|
||||
cloned.in = (IndexInput) cloned.in.clone();
|
||||
return cloned;
|
||||
}
|
||||
}
|
|
@ -1,174 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public abstract class CompressedStreamInput extends StreamInput {
|
||||
|
||||
private final StreamInput in;
|
||||
|
||||
private boolean closed;
|
||||
|
||||
protected byte[] uncompressed;
|
||||
private int position = 0;
|
||||
private int valid = 0;
|
||||
|
||||
public CompressedStreamInput(StreamInput in) throws IOException {
|
||||
this.in = in;
|
||||
super.setVersion(in.getVersion());
|
||||
readHeader(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setVersion(Version version) {
|
||||
in.setVersion(version);
|
||||
super.setVersion(version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Method is overridden to report number of bytes that can now be read
|
||||
* from decoded data buffer, without reading bytes from the underlying
|
||||
* stream.
|
||||
* Never throws an exception; returns number of bytes available without
|
||||
* further reads from underlying source; -1 if stream has been closed, or
|
||||
* 0 if an actual read (and possible blocking) is needed to find out.
|
||||
*/
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
// if closed, return -1;
|
||||
if (closed) {
|
||||
return -1;
|
||||
}
|
||||
int left = (valid - position);
|
||||
return (left <= 0) ? 0 : left;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if (!readyBuffer()) {
|
||||
return -1;
|
||||
}
|
||||
return uncompressed[position++] & 255;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte readByte() throws IOException {
|
||||
if (!readyBuffer()) {
|
||||
throw new EOFException();
|
||||
}
|
||||
return uncompressed[position++];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] buffer, int offset, int length) throws IOException {
|
||||
return read(buffer, offset, length, false);
|
||||
}
|
||||
|
||||
public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException {
|
||||
if (length < 1) {
|
||||
return 0;
|
||||
}
|
||||
if (!readyBuffer()) {
|
||||
return -1;
|
||||
}
|
||||
// First let's read however much data we happen to have...
|
||||
int chunkLength = Math.min(valid - position, length);
|
||||
System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
|
||||
position += chunkLength;
|
||||
|
||||
if (chunkLength == length || !fullRead) {
|
||||
return chunkLength;
|
||||
}
|
||||
// Need more data, then
|
||||
int totalRead = chunkLength;
|
||||
do {
|
||||
offset += chunkLength;
|
||||
if (!readyBuffer()) {
|
||||
break;
|
||||
}
|
||||
chunkLength = Math.min(valid - position, (length - totalRead));
|
||||
System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
|
||||
position += chunkLength;
|
||||
totalRead += chunkLength;
|
||||
} while (totalRead < length);
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readBytes(byte[] b, int offset, int len) throws IOException {
|
||||
int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
|
||||
if (result < len) {
|
||||
throw new EOFException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
this.position = 0;
|
||||
this.valid = 0;
|
||||
in.reset();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
position = valid = 0;
|
||||
if (!closed) {
|
||||
closed = true;
|
||||
doClose();
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void doClose() throws IOException;
|
||||
|
||||
/**
|
||||
* Fill the uncompressed bytes buffer by reading the underlying inputStream.
|
||||
*/
|
||||
protected boolean readyBuffer() throws IOException {
|
||||
if (position < valid) {
|
||||
return true;
|
||||
}
|
||||
if (closed) {
|
||||
return false;
|
||||
}
|
||||
valid = uncompress(in, uncompressed);
|
||||
if (valid < 0) {
|
||||
return false;
|
||||
}
|
||||
position = 0;
|
||||
return (position < valid);
|
||||
}
|
||||
|
||||
protected abstract void readHeader(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Uncompress the data into the out array, returning the size uncompressed
|
||||
*/
|
||||
protected abstract int uncompress(StreamInput in, byte[] out) throws IOException;
|
||||
|
||||
}
|
|
@ -80,7 +80,7 @@ public final class CompressedXContent {
|
|||
*/
|
||||
public CompressedXContent(ToXContent xcontent, XContentType type, ToXContent.Params params) throws IOException {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
OutputStream compressedStream = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
OutputStream compressedStream = CompressorFactory.COMPRESSOR.streamOutput(bStream);
|
||||
CRC32 crc32 = new CRC32();
|
||||
OutputStream checkedStream = new CheckedOutputStream(compressedStream, crc32);
|
||||
try (XContentBuilder builder = XContentFactory.contentBuilder(type, checkedStream)) {
|
||||
|
@ -105,7 +105,7 @@ public final class CompressedXContent {
|
|||
this.crc32 = crc32(new BytesArray(uncompressed()));
|
||||
} else {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
try (OutputStream compressedOutput = CompressorFactory.defaultCompressor().streamOutput(out)) {
|
||||
try (OutputStream compressedOutput = CompressorFactory.COMPRESSOR.streamOutput(out)) {
|
||||
data.writeTo(compressedOutput);
|
||||
}
|
||||
this.bytes = out.bytes().toBytes();
|
||||
|
|
|
@ -33,21 +33,7 @@ public interface Compressor {
|
|||
|
||||
boolean isCompressed(BytesReference bytes);
|
||||
|
||||
boolean isCompressed(ChannelBuffer buffer);
|
||||
|
||||
StreamInput streamInput(StreamInput in) throws IOException;
|
||||
|
||||
StreamOutput streamOutput(StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Used for backward comp. since we now use Lucene compressed codec.
|
||||
*/
|
||||
@Deprecated
|
||||
boolean isCompressed(IndexInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Used for backward comp. since we now use Lucene compressed codec.
|
||||
*/
|
||||
@Deprecated
|
||||
CompressedIndexInput indexInput(IndexInput in) throws IOException;
|
||||
}
|
||||
|
|
|
@ -19,16 +19,13 @@
|
|||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -36,47 +33,21 @@ import java.io.IOException;
|
|||
*/
|
||||
public class CompressorFactory {
|
||||
|
||||
private static final Compressor[] compressors;
|
||||
private static volatile Compressor defaultCompressor;
|
||||
|
||||
static {
|
||||
compressors = new Compressor[] {
|
||||
new DeflateCompressor()
|
||||
};
|
||||
defaultCompressor = new DeflateCompressor();
|
||||
}
|
||||
|
||||
public static void setDefaultCompressor(Compressor defaultCompressor) {
|
||||
CompressorFactory.defaultCompressor = defaultCompressor;
|
||||
}
|
||||
|
||||
public static Compressor defaultCompressor() {
|
||||
return defaultCompressor;
|
||||
}
|
||||
public static final Compressor COMPRESSOR = new DeflateCompressor();
|
||||
|
||||
public static boolean isCompressed(BytesReference bytes) {
|
||||
return compressor(bytes) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated we don't compress lucene indexes anymore and rely on lucene codecs
|
||||
*/
|
||||
@Deprecated
|
||||
public static boolean isCompressed(IndexInput in) throws IOException {
|
||||
return compressor(in) != null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static Compressor compressor(BytesReference bytes) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(bytes)) {
|
||||
if (COMPRESSOR.isCompressed(bytes)) {
|
||||
// bytes should be either detected as compressed or as xcontent,
|
||||
// if we have bytes that can be either detected as compressed or
|
||||
// as a xcontent, we have a problem
|
||||
assert XContentFactory.xContentType(bytes) == null;
|
||||
return compressor;
|
||||
return COMPRESSOR;
|
||||
}
|
||||
}
|
||||
|
||||
XContentType contentType = XContentFactory.xContentType(bytes);
|
||||
if (contentType == null) {
|
||||
|
@ -97,29 +68,6 @@ public class CompressorFactory {
|
|||
(bytes.get(2) == 0 || bytes.get(2) == 1);
|
||||
}
|
||||
|
||||
public static Compressor compressor(ChannelBuffer buffer) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(buffer)) {
|
||||
return compressor;
|
||||
}
|
||||
}
|
||||
throw new NotCompressedException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated we don't compress lucene indexes anymore and rely on lucene codecs
|
||||
*/
|
||||
@Deprecated
|
||||
@Nullable
|
||||
public static Compressor compressor(IndexInput in) throws IOException {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(in)) {
|
||||
return compressor;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(BytesReference)}.
|
||||
*/
|
||||
|
|
|
@ -17,17 +17,14 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
|
@ -35,6 +32,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.zip.Deflater;
|
||||
import java.util.zip.DeflaterOutputStream;
|
||||
import java.util.zip.Inflater;
|
||||
|
@ -69,20 +67,6 @@ public class DeflateCompressor implements Compressor {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
if (buffer.readableBytes() < HEADER.length) {
|
||||
return false;
|
||||
}
|
||||
final int offset = buffer.readerIndex();
|
||||
for (int i = 0; i < HEADER.length; ++i) {
|
||||
if (buffer.getByte(offset + i) != HEADER[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
final byte[] headerBytes = new byte[HEADER.length];
|
||||
|
@ -103,16 +87,14 @@ public class DeflateCompressor implements Compressor {
|
|||
InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE);
|
||||
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
|
||||
return new InputStreamStreamInput(decompressedIn) {
|
||||
private boolean closed = false;
|
||||
|
||||
final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
super.close();
|
||||
} finally {
|
||||
if (closed == false) {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
// important to release native memory
|
||||
inflater.end();
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,29 +110,17 @@ public class DeflateCompressor implements Compressor {
|
|||
OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
|
||||
compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE);
|
||||
return new OutputStreamStreamOutput(compressedOut) {
|
||||
private boolean closed = false;
|
||||
|
||||
final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
super.close();
|
||||
} finally {
|
||||
if (closed == false) {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
// important to release native memory
|
||||
deflater.end();
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(IndexInput in) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
|
@ -24,10 +24,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
|
||||
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
|
||||
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
|
||||
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
|
||||
* {@linkplain org.elasticsearch.common.unit.TimeValue} actually implements {@linkplain Writeable} not {@linkplain Streamable} but it has
|
||||
* the same contract.
|
||||
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged.
|
||||
*
|
||||
* Prefer implementing {@link Writeable} over implementing this interface where possible. Lots of code depends on this interface so this
|
||||
* isn't always possible.
|
||||
|
|
|
@ -24,8 +24,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
|
||||
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
|
||||
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
|
||||
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
|
||||
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged.
|
||||
*
|
||||
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
|
||||
* so this isn't always possible.
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.joda.time.format.StrictISODateTimeFormat;
|
|||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Locale;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -375,21 +374,30 @@ public class Joda {
|
|||
return hasMilliSecondPrecision ? 19 : 16;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* We adjust the instant by displayOffset to adjust for the offset that might have been added in
|
||||
* {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone.
|
||||
*/
|
||||
@Override
|
||||
public void printTo(StringBuffer buf, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) {
|
||||
if (hasMilliSecondPrecision) {
|
||||
buf.append(instant);
|
||||
buf.append(instant - displayOffset);
|
||||
} else {
|
||||
buf.append(instant / 1000);
|
||||
buf.append((instant - displayOffset) / 1000);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We adjust the instant by displayOffset to adjust for the offset that might have been added in
|
||||
* {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone.
|
||||
*/
|
||||
@Override
|
||||
public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) throws IOException {
|
||||
if (hasMilliSecondPrecision) {
|
||||
out.write(String.valueOf(instant));
|
||||
out.write(String.valueOf(instant - displayOffset));
|
||||
} else {
|
||||
out.append(String.valueOf(instant / 1000));
|
||||
out.append(String.valueOf((instant - displayOffset) / 1000));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,12 +21,14 @@ package org.elasticsearch.common.logging;
|
|||
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
|
||||
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
|
||||
|
||||
/**
|
||||
* Elasticsearch's logger wrapper.
|
||||
*/
|
||||
@SuppressLoggerChecks(reason = "safely delegates to itself")
|
||||
public class ESLogger {
|
||||
private static final String FQCN = ESLogger.class.getName();
|
||||
|
||||
|
|
|
@ -16,26 +16,26 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.bytes;
|
||||
package org.elasticsearch.common.netty;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.io.Channels;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.transport.netty.ChannelBufferStreamInputFactory;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.GatheringByteChannel;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ChannelBufferBytesReference implements BytesReference {
|
||||
final class ChannelBufferBytesReference implements BytesReference {
|
||||
|
||||
private final ChannelBuffer buffer;
|
||||
|
||||
public ChannelBufferBytesReference(ChannelBuffer buffer) {
|
||||
ChannelBufferBytesReference(ChannelBuffer buffer) {
|
||||
this.buffer = buffer;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ public class ChannelBufferBytesReference implements BytesReference {
|
|||
|
||||
@Override
|
||||
public BytesReference slice(int from, int length) {
|
||||
return new ChannelBufferBytesReference(buffer.slice(from, length));
|
||||
return new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex() + from, length));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -64,11 +64,6 @@ public class ChannelBufferBytesReference implements BytesReference {
|
|||
buffer.getBytes(buffer.readerIndex(), os, length());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(GatheringByteChannel channel) throws IOException {
|
||||
Channels.writeToChannel(buffer, buffer.readerIndex(), length(), channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toBytes() {
|
||||
return copyBytesArray().toBytes();
|
||||
|
@ -89,7 +84,6 @@ public class ChannelBufferBytesReference implements BytesReference {
|
|||
return new BytesArray(copy);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChannelBuffer toChannelBuffer() {
|
||||
return buffer.duplicate();
|
||||
}
|
|
@ -18,12 +18,20 @@
|
|||
*/
|
||||
package org.elasticsearch.common.netty;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.transport.netty.NettyInternalESLoggerFactory;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
import org.jboss.netty.logging.InternalLogger;
|
||||
import org.jboss.netty.logging.InternalLoggerFactory;
|
||||
import org.jboss.netty.util.ThreadNameDeterminer;
|
||||
import org.jboss.netty.util.ThreadRenamingRunnable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class NettyUtils {
|
||||
|
@ -98,4 +106,36 @@ public class NettyUtils {
|
|||
public static void setup() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Turns the given BytesReference into a ChannelBuffer. Note: the returned ChannelBuffer will reference the internal
|
||||
* pages of the BytesReference. Don't free the bytes of reference before the ChannelBuffer goes out of scope.
|
||||
*/
|
||||
public static ChannelBuffer toChannelBuffer(BytesReference reference) {
|
||||
if (reference.length() == 0) {
|
||||
return ChannelBuffers.EMPTY_BUFFER;
|
||||
}
|
||||
if (reference instanceof ChannelBufferBytesReference) {
|
||||
return ((ChannelBufferBytesReference) reference).toChannelBuffer();
|
||||
} else {
|
||||
final BytesRefIterator iterator = reference.iterator();
|
||||
BytesRef slice;
|
||||
final ArrayList<ChannelBuffer> buffers = new ArrayList<>();
|
||||
try {
|
||||
while ((slice = iterator.next()) != null) {
|
||||
buffers.add(ChannelBuffers.wrappedBuffer(slice.bytes, slice.offset, slice.length));
|
||||
}
|
||||
return ChannelBuffers.wrappedBuffer(DEFAULT_GATHERING, buffers.toArray(new ChannelBuffer[buffers.size()]));
|
||||
} catch (IOException ex) {
|
||||
throw new AssertionError("no IO happens here", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps the given ChannelBuffer with a BytesReference
|
||||
*/
|
||||
public static BytesReference toBytesReference(ChannelBuffer channelBuffer) {
|
||||
return new ChannelBufferBytesReference(channelBuffer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,14 +30,50 @@ import org.joda.time.format.PeriodFormat;
|
|||
import org.joda.time.format.PeriodFormatter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class TimeValue implements Writeable {
|
||||
|
||||
/** How many nano-seconds in one milli-second */
|
||||
public static final long NSEC_PER_MSEC = 1000000;
|
||||
public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
|
||||
|
||||
private static Map<TimeUnit, Byte> TIME_UNIT_BYTE_MAP;
|
||||
private static Map<Byte, TimeUnit> BYTE_TIME_UNIT_MAP;
|
||||
|
||||
static {
|
||||
final Map<TimeUnit, Byte> timeUnitByteMap = new HashMap<>();
|
||||
timeUnitByteMap.put(TimeUnit.NANOSECONDS, (byte)0);
|
||||
timeUnitByteMap.put(TimeUnit.MICROSECONDS, (byte)1);
|
||||
timeUnitByteMap.put(TimeUnit.MILLISECONDS, (byte)2);
|
||||
timeUnitByteMap.put(TimeUnit.SECONDS, (byte)3);
|
||||
timeUnitByteMap.put(TimeUnit.MINUTES, (byte)4);
|
||||
timeUnitByteMap.put(TimeUnit.HOURS, (byte)5);
|
||||
timeUnitByteMap.put(TimeUnit.DAYS, (byte)6);
|
||||
|
||||
final Set<Byte> bytes = new HashSet<>();
|
||||
for (TimeUnit value : TimeUnit.values()) {
|
||||
assert timeUnitByteMap.containsKey(value) : value;
|
||||
assert bytes.add(timeUnitByteMap.get(value));
|
||||
}
|
||||
|
||||
final Map<Byte, TimeUnit> byteTimeUnitMap = new HashMap<>();
|
||||
for (Map.Entry<TimeUnit, Byte> entry : timeUnitByteMap.entrySet()) {
|
||||
byteTimeUnitMap.put(entry.getValue(), entry.getKey());
|
||||
}
|
||||
|
||||
TIME_UNIT_BYTE_MAP = Collections.unmodifiableMap(timeUnitByteMap);
|
||||
BYTE_TIME_UNIT_MAP = Collections.unmodifiableMap(byteTimeUnitMap);
|
||||
}
|
||||
|
||||
public static final TimeValue MINUS_ONE = timeValueMillis(-1);
|
||||
public static final TimeValue ZERO = timeValueMillis(0);
|
||||
|
||||
public static TimeValue timeValueNanos(long nanos) {
|
||||
return new TimeValue(nanos, TimeUnit.NANOSECONDS);
|
||||
|
@ -60,8 +96,19 @@ public class TimeValue implements Writeable {
|
|||
}
|
||||
|
||||
private final long duration;
|
||||
|
||||
// visible for testing
|
||||
long duration() {
|
||||
return duration;
|
||||
}
|
||||
|
||||
private final TimeUnit timeUnit;
|
||||
|
||||
// visible for testing
|
||||
TimeUnit timeUnit() {
|
||||
return timeUnit;
|
||||
}
|
||||
|
||||
public TimeValue(long millis) {
|
||||
this(millis, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
@ -76,12 +123,13 @@ public class TimeValue implements Writeable {
|
|||
*/
|
||||
public TimeValue(StreamInput in) throws IOException {
|
||||
duration = in.readZLong();
|
||||
timeUnit = TimeUnit.NANOSECONDS;
|
||||
timeUnit = BYTE_TIME_UNIT_MAP.get(in.readByte());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeZLong(nanos());
|
||||
out.writeZLong(duration);
|
||||
out.writeByte(TIME_UNIT_BYTE_MAP.get(timeUnit));
|
||||
}
|
||||
|
||||
public long nanos() {
|
||||
|
@ -240,19 +288,19 @@ public class TimeValue implements Writeable {
|
|||
}
|
||||
switch (timeUnit) {
|
||||
case NANOSECONDS:
|
||||
return Strings.format1Decimals(duration, "nanos");
|
||||
return duration + "nanos";
|
||||
case MICROSECONDS:
|
||||
return Strings.format1Decimals(duration, "micros");
|
||||
return duration + "micros";
|
||||
case MILLISECONDS:
|
||||
return Strings.format1Decimals(duration, "ms");
|
||||
return duration + "ms";
|
||||
case SECONDS:
|
||||
return Strings.format1Decimals(duration, "s");
|
||||
return duration + "s";
|
||||
case MINUTES:
|
||||
return Strings.format1Decimals(duration, "m");
|
||||
return duration + "m";
|
||||
case HOURS:
|
||||
return Strings.format1Decimals(duration, "h");
|
||||
return duration + "h";
|
||||
case DAYS:
|
||||
return Strings.format1Decimals(duration, "d");
|
||||
return duration + "d";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown time unit: " + timeUnit.name());
|
||||
}
|
||||
|
@ -269,48 +317,55 @@ public class TimeValue implements Writeable {
|
|||
if (sValue == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
try {
|
||||
long millis;
|
||||
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
|
||||
if (lowerSValue.endsWith("ms")) {
|
||||
millis = parse(lowerSValue, 2, 1);
|
||||
} else if (lowerSValue.endsWith("s")) {
|
||||
millis = parse(lowerSValue, 1, 1000);
|
||||
} else if (lowerSValue.endsWith("m")) {
|
||||
millis = parse(lowerSValue, 1, 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("h")) {
|
||||
millis = parse(lowerSValue, 1, 60 * 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("d")) {
|
||||
millis = parse(lowerSValue, 1, 24 * 60 * 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("w")) {
|
||||
millis = parse(lowerSValue, 1, 7 * 24 * 60 * 60 * 1000);
|
||||
} else if (lowerSValue.equals("-1")) {
|
||||
// Allow this special value to be unit-less:
|
||||
millis = -1;
|
||||
} else if (lowerSValue.equals("0")) {
|
||||
// Allow this special value to be unit-less:
|
||||
millis = 0;
|
||||
} else {
|
||||
// Missing units:
|
||||
throw new ElasticsearchParseException("Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", settingName, sValue);
|
||||
}
|
||||
return new TimeValue(millis, TimeUnit.MILLISECONDS);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new ElasticsearchParseException("Failed to parse [{}]", e, sValue);
|
||||
final String normalized = sValue.toLowerCase(Locale.ROOT).trim();
|
||||
if (normalized.endsWith("nanos")) {
|
||||
return new TimeValue(parse(sValue, normalized, 5), TimeUnit.NANOSECONDS);
|
||||
} else if (normalized.endsWith("micros")) {
|
||||
return new TimeValue(parse(sValue, normalized, 6), TimeUnit.MICROSECONDS);
|
||||
} else if (normalized.endsWith("ms")) {
|
||||
return new TimeValue(parse(sValue, normalized, 2), TimeUnit.MILLISECONDS);
|
||||
} else if (normalized.endsWith("s")) {
|
||||
return new TimeValue(parse(sValue, normalized, 1), TimeUnit.SECONDS);
|
||||
} else if (normalized.endsWith("m")) {
|
||||
return new TimeValue(parse(sValue, normalized, 1), TimeUnit.MINUTES);
|
||||
} else if (normalized.endsWith("h")) {
|
||||
return new TimeValue(parse(sValue, normalized, 1), TimeUnit.HOURS);
|
||||
} else if (normalized.endsWith("d")) {
|
||||
return new TimeValue(parse(sValue, normalized, 1), TimeUnit.DAYS);
|
||||
} else if (normalized.matches("-0*1")) {
|
||||
return TimeValue.MINUS_ONE;
|
||||
} else if (normalized.matches("0+")) {
|
||||
return TimeValue.ZERO;
|
||||
} else {
|
||||
// Missing units:
|
||||
throw new ElasticsearchParseException(
|
||||
"failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized",
|
||||
settingName,
|
||||
sValue);
|
||||
}
|
||||
}
|
||||
|
||||
private static long parse(String s, int suffixLength, long scale) {
|
||||
return (long) (Double.parseDouble(s.substring(0, s.length() - suffixLength)) * scale);
|
||||
private static long parse(final String initialInput, final String normalized, final int suffixLength) {
|
||||
final String s = normalized.substring(0, normalized.length() - suffixLength).trim();
|
||||
try {
|
||||
return Long.parseLong(s);
|
||||
} catch (final NumberFormatException e) {
|
||||
try {
|
||||
@SuppressWarnings("unused") final double ignored = Double.parseDouble(s);
|
||||
throw new ElasticsearchParseException("failed to parse [{}], fractional time values are not supported", e, initialInput);
|
||||
} catch (final NumberFormatException ignored) {
|
||||
throw new ElasticsearchParseException("failed to parse [{}]", e, initialInput);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static final long C0 = 1L;
|
||||
static final long C1 = C0 * 1000L;
|
||||
static final long C2 = C1 * 1000L;
|
||||
static final long C3 = C2 * 1000L;
|
||||
static final long C4 = C3 * 60L;
|
||||
static final long C5 = C4 * 60L;
|
||||
static final long C6 = C5 * 24L;
|
||||
private static final long C0 = 1L;
|
||||
private static final long C1 = C0 * 1000L;
|
||||
private static final long C2 = C1 * 1000L;
|
||||
private static final long C3 = C2 * 1000L;
|
||||
private static final long C4 = C3 * 60L;
|
||||
private static final long C5 = C4 * 60L;
|
||||
private static final long C6 = C5 * 24L;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
|
|
@ -29,28 +29,28 @@ public interface ByteArray extends BigArray {
|
|||
/**
|
||||
* Get an element given its index.
|
||||
*/
|
||||
public abstract byte get(long index);
|
||||
byte get(long index);
|
||||
|
||||
/**
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
public abstract byte set(long index, byte value);
|
||||
byte set(long index, byte value);
|
||||
|
||||
/**
|
||||
* Get a reference to a slice.
|
||||
*
|
||||
*
|
||||
* @return <code>true</code> when a byte[] was materialized, <code>false</code> otherwise.
|
||||
*/
|
||||
public abstract boolean get(long index, int len, BytesRef ref);
|
||||
boolean get(long index, int len, BytesRef ref);
|
||||
|
||||
/**
|
||||
* Bulk set.
|
||||
*/
|
||||
public abstract void set(long index, byte[] buf, int offset, int len);
|
||||
void set(long index, byte[] buf, int offset, int len);
|
||||
|
||||
/**
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
public abstract void fill(long fromIndex, long toIndex, byte value);
|
||||
void fill(long fromIndex, long toIndex, byte value);
|
||||
|
||||
}
|
||||
|
|
|
@ -94,11 +94,10 @@ public class ElectMasterService extends AbstractComponent {
|
|||
public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, ClusterState newState) {
|
||||
// check if min_master_nodes setting is too low and log warning
|
||||
if (hasTooManyMasterNodes(oldState.nodes()) == false && hasTooManyMasterNodes(newState.nodes())) {
|
||||
logger.warn("value for setting \""
|
||||
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()
|
||||
+ "\" is too low. This can result in data loss! Please set it to at least a quorum of master-eligible nodes "
|
||||
+ "(current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])",
|
||||
minimumMasterNodes(), newState.getNodes().getMasterNodes().size());
|
||||
logger.warn("value for setting \"{}\" is too low. This can result in data loss! Please set it to at least a quorum of master-" +
|
||||
"eligible nodes (current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])",
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNodes(),
|
||||
newState.getNodes().getMasterNodes().size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -335,7 +335,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
|
||||
public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
|
||||
try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) {
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(true);
|
||||
clusterState.writeTo(stream);
|
||||
|
@ -345,7 +345,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
|
||||
public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
|
||||
try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) {
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(false);
|
||||
diff.writeTo(stream);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gateway;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -38,7 +37,6 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
|
@ -141,7 +139,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
try {
|
||||
if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
// verify that we can actually create this index - if not we recover it as closed with lots of warn logs
|
||||
indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData);
|
||||
indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex());
|
||||
|
@ -159,15 +157,6 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
builder.metaData(metaDataBuilder);
|
||||
listener.onSuccess(builder.build());
|
||||
}
|
||||
public void reset() throws Exception {
|
||||
try {
|
||||
Path[] dataPaths = nodeEnv.nodeDataPaths();
|
||||
logger.trace("removing node data paths: [{}]", (Object)dataPaths);
|
||||
IOUtils.rm(dataPaths);
|
||||
} catch (Exception ex) {
|
||||
logger.debug("failed to delete shard locations", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(final ClusterChangedEvent event) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
|
||||
import org.elasticsearch.http.netty.cors.CorsHandler;
|
||||
import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent;
|
||||
|
@ -105,7 +106,7 @@ public final class NettyHttpChannel extends AbstractRestChannel {
|
|||
ChannelBuffer buffer;
|
||||
boolean addedReleaseListener = false;
|
||||
try {
|
||||
buffer = content.toChannelBuffer();
|
||||
buffer = NettyUtils.toChannelBuffer(content);
|
||||
resp.setContent(buffer);
|
||||
|
||||
// If our response doesn't specify a content-type header, set one
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.http.netty;
|
|||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.bytes.ChannelBufferBytesReference;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -47,7 +47,7 @@ public class NettyHttpRequest extends RestRequest {
|
|||
this.channel = channel;
|
||||
this.params = new HashMap<>();
|
||||
if (request.getContent().readable()) {
|
||||
this.content = new ChannelBufferBytesReference(request.getContent());
|
||||
this.content = NettyUtils.toBytesReference(request.getContent());
|
||||
} else {
|
||||
this.content = BytesArray.EMPTY;
|
||||
}
|
||||
|
|
|
@ -412,10 +412,12 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService>
|
|||
}
|
||||
|
||||
/**
|
||||
* This method verifies that the given {@link IndexMetaData} holds sane values to create an {@link IndexService}. This method will throw an
|
||||
* exception if the creation fails. The created {@link IndexService} will not be registered and will be closed immediately.
|
||||
* This method verifies that the given {@code metaData} holds sane values to create an {@link IndexService}.
|
||||
* This method tries to update the meta data of the created {@link IndexService} if the given {@code metaDataUpdate} is different from the given {@code metaData}.
|
||||
* This method will throw an exception if the creation or the update fails.
|
||||
* The created {@link IndexService} will not be registered and will be closed immediately.
|
||||
*/
|
||||
public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData) throws IOException {
|
||||
public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData, IndexMetaData metaDataUpdate) throws IOException {
|
||||
final List<Closeable> closeables = new ArrayList<>();
|
||||
try {
|
||||
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {});
|
||||
|
@ -431,7 +433,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService>
|
|||
service.mapperService().merge(typeMapping.value.type(), typeMapping.value.source(),
|
||||
MapperService.MergeReason.MAPPING_RECOVERY, true);
|
||||
}
|
||||
service.getIndexSettings().getScopedSettings().validateUpdate(metaData.getSettings());
|
||||
if (metaData.equals(metaDataUpdate) == false) {
|
||||
service.updateMetaData(metaDataUpdate);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(closeables);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.indices.analysis;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.NamedRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -143,12 +144,7 @@ import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFil
|
|||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* Sets up {@link AnalysisRegistry}.
|
||||
|
@ -170,12 +166,12 @@ public final class AnalysisModule {
|
|||
public AnalysisModule(Environment environment, List<AnalysisPlugin> plugins) throws IOException {
|
||||
NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = setupCharFilters(plugins);
|
||||
NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> hunspellDictionaries = setupHunspellDictionaries(plugins);
|
||||
hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.registry);
|
||||
hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.getRegistry());
|
||||
NamedRegistry<AnalysisProvider<TokenFilterFactory>> tokenFilters = setupTokenFilters(plugins, hunspellService);
|
||||
NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = setupTokenizers(plugins);
|
||||
NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> analyzers = setupAnalyzers(plugins);
|
||||
analysisRegistry = new AnalysisRegistry(environment, charFilters.registry, tokenFilters.registry,
|
||||
tokenizers.registry, analyzers.registry);
|
||||
analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(),
|
||||
tokenizers.getRegistry(), analyzers.getRegistry());
|
||||
}
|
||||
|
||||
HunspellService getHunspellService() {
|
||||
|
@ -191,13 +187,13 @@ public final class AnalysisModule {
|
|||
charFilters.register("html_strip", HtmlStripCharFilterFactory::new);
|
||||
charFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
|
||||
charFilters.register("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
|
||||
charFilters.registerPlugins(plugins, AnalysisPlugin::getCharFilters);
|
||||
charFilters.extractAndRegister(plugins, AnalysisPlugin::getCharFilters);
|
||||
return charFilters;
|
||||
}
|
||||
|
||||
public NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> setupHunspellDictionaries(List<AnalysisPlugin> plugins) {
|
||||
NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> hunspellDictionaries = new NamedRegistry<>("dictionary");
|
||||
hunspellDictionaries.registerPlugins(plugins, AnalysisPlugin::getHunspellDictionaries);
|
||||
hunspellDictionaries.extractAndRegister(plugins, AnalysisPlugin::getHunspellDictionaries);
|
||||
return hunspellDictionaries;
|
||||
}
|
||||
|
||||
|
@ -262,7 +258,7 @@ public final class AnalysisModule {
|
|||
tokenFilters.register("classic", ClassicFilterFactory::new);
|
||||
tokenFilters.register("decimal_digit", DecimalDigitFilterFactory::new);
|
||||
tokenFilters.register("fingerprint", FingerprintTokenFilterFactory::new);
|
||||
tokenFilters.registerPlugins(plugins, AnalysisPlugin::getTokenFilters);
|
||||
tokenFilters.extractAndRegister(plugins, AnalysisPlugin::getTokenFilters);
|
||||
return tokenFilters;
|
||||
}
|
||||
|
||||
|
@ -283,7 +279,7 @@ public final class AnalysisModule {
|
|||
tokenizers.register("pattern", PatternTokenizerFactory::new);
|
||||
tokenizers.register("classic", ClassicTokenizerFactory::new);
|
||||
tokenizers.register("thai", ThaiTokenizerFactory::new);
|
||||
tokenizers.registerPlugins(plugins, AnalysisPlugin::getTokenizers);
|
||||
tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers);
|
||||
return tokenizers;
|
||||
}
|
||||
|
||||
|
@ -333,7 +329,7 @@ public final class AnalysisModule {
|
|||
analyzers.register("turkish", TurkishAnalyzerProvider::new);
|
||||
analyzers.register("thai", ThaiAnalyzerProvider::new);
|
||||
analyzers.register("fingerprint", FingerprintAnalyzerProvider::new);
|
||||
analyzers.registerPlugins(plugins, AnalysisPlugin::getAnalyzers);
|
||||
analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers);
|
||||
return analyzers;
|
||||
}
|
||||
|
||||
|
@ -392,29 +388,4 @@ public final class AnalysisModule {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static class NamedRegistry<T> {
|
||||
private final Map<String, T> registry = new HashMap<>();
|
||||
private final String targetName;
|
||||
|
||||
public NamedRegistry(String targetName) {
|
||||
this.targetName = targetName;
|
||||
}
|
||||
|
||||
private void register(String name, T t) {
|
||||
requireNonNull(name, "name is required");
|
||||
requireNonNull(t, targetName + " is required");
|
||||
if (registry.putIfAbsent(name, t) != null) {
|
||||
throw new IllegalArgumentException(targetName + " for name " + name + " already registered");
|
||||
}
|
||||
}
|
||||
|
||||
private <P> void registerPlugins(List<P> plugins, Function<P, Map<String, T>> lookup) {
|
||||
for (P plugin : plugins) {
|
||||
for (Map.Entry<String, T> entry : lookup.apply(plugin).entrySet()) {
|
||||
register(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Map;
|
|||
/**
|
||||
* Holder class for several ingest related services.
|
||||
*/
|
||||
public class IngestService implements Closeable {
|
||||
public class IngestService {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final PipelineExecutionService pipelineExecutionService;
|
||||
|
@ -65,10 +65,4 @@ public class IngestService implements Closeable {
|
|||
}
|
||||
return new IngestInfo(processorInfoList);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
pipelineStore.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class PipelineStore extends AbstractComponent implements Closeable, ClusterStateListener {
|
||||
public class PipelineStore extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
private final Pipeline.Factory factory = new Pipeline.Factory();
|
||||
private ProcessorsRegistry processorRegistry;
|
||||
|
@ -67,13 +67,6 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
|
|||
this.processorRegistry = processorsRegistryBuilder.build(scriptService, clusterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
// TODO: When org.elasticsearch.node.Node can close Closable instances we should try to remove this code,
|
||||
// since any wired closable should be able to close itself
|
||||
processorRegistry.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
innerUpdatePipelines(event.state());
|
||||
|
|
|
@ -19,20 +19,17 @@
|
|||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public final class ProcessorsRegistry implements Closeable {
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
public final class ProcessorsRegistry {
|
||||
|
||||
private final Map<String, Processor.Factory> processorFactories;
|
||||
|
||||
|
@ -44,17 +41,6 @@ public final class ProcessorsRegistry implements Closeable {
|
|||
return processorFactories.get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
List<Closeable> closeables = new ArrayList<>();
|
||||
for (Processor.Factory factory : processorFactories.values()) {
|
||||
if (factory instanceof Closeable) {
|
||||
closeables.add((Closeable) factory);
|
||||
}
|
||||
}
|
||||
IOUtils.close(closeables);
|
||||
}
|
||||
|
||||
// For testing:
|
||||
Map<String, Processor.Factory> getProcessorFactories() {
|
||||
return processorFactories;
|
||||
|
|
|
@ -86,8 +86,9 @@ import org.elasticsearch.monitor.MonitorService;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.ScriptPlugin;
|
||||
|
@ -162,6 +163,7 @@ public class Node implements Closeable {
|
|||
private final Injector injector;
|
||||
private final Settings settings;
|
||||
private final Environment environment;
|
||||
private final NodeEnvironment nodeEnvironment;
|
||||
private final PluginsService pluginsService;
|
||||
private final Client client;
|
||||
|
||||
|
@ -235,7 +237,6 @@ public class Node implements Closeable {
|
|||
// this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool
|
||||
// so we might be late here already
|
||||
final SettingsModule settingsModule = new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter);
|
||||
final NodeEnvironment nodeEnvironment;
|
||||
try {
|
||||
nodeEnvironment = new NodeEnvironment(this.settings, this.environment);
|
||||
resourcesToClose.add(nodeEnvironment);
|
||||
|
@ -258,10 +259,13 @@ public class Node implements Closeable {
|
|||
modules.add(new NodeModule(this, monitorService));
|
||||
modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry));
|
||||
modules.add(new DiscoveryModule(this.settings));
|
||||
modules.add(new ClusterModule(this.settings, clusterService));
|
||||
ClusterModule clusterModule = new ClusterModule(settings, clusterService);
|
||||
modules.add(clusterModule);
|
||||
modules.add(new IndicesModule(namedWriteableRegistry, pluginsService.filterPlugins(MapperPlugin.class)));
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry));
|
||||
modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false));
|
||||
modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings,
|
||||
clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class)));
|
||||
modules.add(new GatewayModule());
|
||||
modules.add(new RepositoriesModule());
|
||||
pluginsService.processModules(modules);
|
||||
|
@ -320,6 +324,14 @@ public class Node implements Closeable {
|
|||
return environment;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link NodeEnvironment} instance of this node
|
||||
*/
|
||||
public NodeEnvironment getNodeEnvironment() {
|
||||
return nodeEnvironment;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Start the node. If the node is already started, this method is no-op.
|
||||
*/
|
||||
|
@ -540,6 +552,7 @@ public class Node implements Closeable {
|
|||
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getName() + ")"));
|
||||
toClose.add(injector.getInstance(plugin));
|
||||
}
|
||||
toClose.addAll(pluginsService.filterPlugins(Closeable.class));
|
||||
|
||||
toClose.add(() -> stopWatch.stop().start("script"));
|
||||
toClose.add(injector.getInstance(ScriptService.class));
|
||||
|
|
|
@ -199,7 +199,6 @@ public class NodeService extends AbstractComponent implements Closeable {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
ingestService.close();
|
||||
indicesService.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.GenericAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
/**
|
||||
* An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this:
|
||||
* <pre>{@code
|
||||
* {@literal @}Override
|
||||
* public List<ActionHandler<?, ?>> getActions() {
|
||||
* return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class),
|
||||
* new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class),
|
||||
* new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class),
|
||||
* new ActionHandler<>(RethrottleAction.INSTANCE, TransportRethrottleAction.class));
|
||||
* }
|
||||
* }</pre>
|
||||
*/
|
||||
public interface ActionPlugin {
|
||||
/**
|
||||
* Actions added by this plugin.
|
||||
*/
|
||||
default List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
|
||||
return emptyList();
|
||||
}
|
||||
/**
|
||||
* Action filters added by this plugin.
|
||||
*/
|
||||
default List<Class<? extends ActionFilter>> getActionFilters() {
|
||||
return emptyList();
|
||||
}
|
||||
|
||||
public static final class ActionHandler<Request extends ActionRequest<Request>, Response extends ActionResponse> {
|
||||
private final GenericAction<Request, Response> action;
|
||||
private final Class<? extends TransportAction<Request, Response>> transportAction;
|
||||
private final Class<?>[] supportTransportActions;
|
||||
|
||||
/**
|
||||
* Create a record of an action, the {@linkplain TransportAction} that handles it, and any supporting {@linkplain TransportActions}
|
||||
* that are needed by that {@linkplain TransportAction}.
|
||||
*/
|
||||
public ActionHandler(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction,
|
||||
Class<?>... supportTransportActions) {
|
||||
this.action = action;
|
||||
this.transportAction = transportAction;
|
||||
this.supportTransportActions = supportTransportActions;
|
||||
}
|
||||
|
||||
public GenericAction<Request, Response> getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
public Class<? extends TransportAction<Request, Response>> getTransportAction() {
|
||||
return transportAction;
|
||||
}
|
||||
|
||||
public Class<?>[] getSupportTransportActions() {
|
||||
return supportTransportActions;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,12 +19,31 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.hash.MessageDigests;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.net.URLDecoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.DirectoryStream;
|
||||
|
@ -49,24 +68,6 @@ import java.util.stream.Collectors;
|
|||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.hash.MessageDigests;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
|
||||
|
||||
/**
|
||||
|
@ -107,7 +108,7 @@ class InstallPluginCommand extends SettingCommand {
|
|||
static final Set<String> MODULES;
|
||||
static {
|
||||
try (InputStream stream = InstallPluginCommand.class.getResourceAsStream("/modules.txt");
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) {
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) {
|
||||
Set<String> modules = new HashSet<>();
|
||||
String line = reader.readLine();
|
||||
while (line != null) {
|
||||
|
@ -124,7 +125,7 @@ class InstallPluginCommand extends SettingCommand {
|
|||
static final Set<String> OFFICIAL_PLUGINS;
|
||||
static {
|
||||
try (InputStream stream = InstallPluginCommand.class.getResourceAsStream("/plugins.txt");
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) {
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) {
|
||||
Set<String> plugins = new TreeSet<>(); // use tree set to get sorting for help command
|
||||
String line = reader.readLine();
|
||||
while (line != null) {
|
||||
|
@ -141,6 +142,7 @@ class InstallPluginCommand extends SettingCommand {
|
|||
private final OptionSpec<Void> batchOption;
|
||||
private final OptionSpec<String> arguments;
|
||||
|
||||
|
||||
public static final Set<PosixFilePermission> DIR_AND_EXECUTABLE_PERMS;
|
||||
public static final Set<PosixFilePermission> FILE_PERMS;
|
||||
|
||||
|
@ -273,13 +275,49 @@ class InstallPluginCommand extends SettingCommand {
|
|||
terminal.println(VERBOSE, "Retrieving zip from " + urlString);
|
||||
URL url = new URL(urlString);
|
||||
Path zip = Files.createTempFile(tmpDir, null, ".zip");
|
||||
try (InputStream in = url.openStream()) {
|
||||
URLConnection urlConnection = url.openConnection();
|
||||
int contentLength = urlConnection.getContentLength();
|
||||
try (InputStream in = new TerminalProgressInputStream(urlConnection.getInputStream(), contentLength, terminal)) {
|
||||
// must overwrite since creating the temp file above actually created the file
|
||||
Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
return zip;
|
||||
}
|
||||
|
||||
/**
|
||||
* content length might be -1 for unknown and progress only makes sense if the content length is greater than 0
|
||||
*/
|
||||
private class TerminalProgressInputStream extends ProgressInputStream {
|
||||
|
||||
private final Terminal terminal;
|
||||
private int width = 50;
|
||||
private final boolean enabled;
|
||||
|
||||
public TerminalProgressInputStream(InputStream is, int expectedTotalSize, Terminal terminal) {
|
||||
super(is, expectedTotalSize);
|
||||
this.terminal = terminal;
|
||||
this.enabled = expectedTotalSize > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onProgress(int percent) {
|
||||
if (enabled) {
|
||||
int currentPosition = percent * width / 100;
|
||||
StringBuilder sb = new StringBuilder("\r[");
|
||||
sb.append(String.join("=", Collections.nCopies(currentPosition, "")));
|
||||
if (currentPosition > 0 && percent < 100) {
|
||||
sb.append(">");
|
||||
}
|
||||
sb.append(String.join(" ", Collections.nCopies(width - currentPosition, "")));
|
||||
sb.append("] %s ");
|
||||
if (percent == 100) {
|
||||
sb.append("\n");
|
||||
}
|
||||
terminal.print(Terminal.Verbosity.NORMAL, String.format(Locale.ROOT, sb.toString(), percent + "%"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */
|
||||
private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tmpDir) throws Exception {
|
||||
Path zip = downloadZip(terminal, urlString, tmpDir);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -112,6 +113,14 @@ public abstract class Plugin {
|
|||
@Deprecated
|
||||
public final void onModule(AnalysisModule module) {}
|
||||
|
||||
/**
|
||||
* Old-style action extension point.
|
||||
*
|
||||
* @deprecated implement {@link ActionPlugin} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public final void onModule(ActionModule module) {}
|
||||
|
||||
/**
|
||||
* Provides the list of this plugin's custom thread pools, empty if
|
||||
* none.
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* A cli tool for adding, removing and listing plugins for elasticsearch.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* An input stream that allows to add a listener to monitor progress
|
||||
* The listener is triggered whenever a full percent is increased
|
||||
* The listener is never triggered twice on the same percentage
|
||||
* The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished
|
||||
*
|
||||
* Only used by the InstallPluginCommand, thus package private here
|
||||
*/
|
||||
abstract class ProgressInputStream extends FilterInputStream {
|
||||
|
||||
private final int expectedTotalSize;
|
||||
private int currentPercent;
|
||||
private int count = 0;
|
||||
|
||||
public ProgressInputStream(InputStream is, int expectedTotalSize) {
|
||||
super(is);
|
||||
this.expectedTotalSize = expectedTotalSize;
|
||||
this.currentPercent = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
int read = in.read();
|
||||
checkProgress(read == -1 ? -1 : 1);
|
||||
return read;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
int byteCount = super.read(b, off, len);
|
||||
checkProgress(byteCount);
|
||||
return byteCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte b[]) throws IOException {
|
||||
return read(b, 0, b.length);
|
||||
}
|
||||
|
||||
void checkProgress(int byteCount) {
|
||||
// are we done?
|
||||
if (byteCount == -1) {
|
||||
currentPercent = 100;
|
||||
onProgress(currentPercent);
|
||||
} else {
|
||||
count += byteCount;
|
||||
// rounding up to 100% would mean we say we are done, before we are...
|
||||
// this also catches issues, when expectedTotalSize was guessed wrong
|
||||
int percent = Math.min(99, (int) Math.floor(100.0*count/expectedTotalSize));
|
||||
if (percent > currentPercent) {
|
||||
currentPercent = percent;
|
||||
onProgress(percent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void onProgress(int percent) {}
|
||||
}
|
|
@ -195,7 +195,7 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
|||
protected BytesReference write(T obj) throws IOException {
|
||||
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
|
||||
if (compress) {
|
||||
try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) {
|
||||
try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) {
|
||||
write(obj, compressedStreamOutput);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -115,6 +115,7 @@ public interface DocValueFormat extends NamedWriteable {
|
|||
return Double.parseDouble(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef parseBytesRef(String value) {
|
||||
return new BytesRef(value);
|
||||
}
|
||||
|
|
|
@ -685,7 +685,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
if (source.profile()) {
|
||||
context.setProfilers(new Profilers(context.searcher()));
|
||||
}
|
||||
context.timeoutInMillis(source.timeoutInMillis());
|
||||
context.timeout(source.timeout());
|
||||
context.terminateAfter(source.terminateAfter());
|
||||
if (source.aggregations() != null) {
|
||||
try {
|
||||
|
|
|
@ -143,7 +143,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
|
||||
private Float minScore;
|
||||
|
||||
private long timeoutInMillis = -1;
|
||||
private TimeValue timeout = null;
|
||||
private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER;
|
||||
|
||||
private List<String> fieldNames;
|
||||
|
@ -241,7 +241,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
}
|
||||
suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new);
|
||||
terminateAfter = in.readVInt();
|
||||
timeoutInMillis = in.readLong();
|
||||
timeout = in.readOptionalWriteable(TimeValue::new);
|
||||
trackScores = in.readBoolean();
|
||||
version = in.readOptionalBoolean();
|
||||
ext = in.readOptionalBytesReference();
|
||||
|
@ -320,7 +320,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
}
|
||||
out.writeOptionalWriteable(suggestBuilder);
|
||||
out.writeVInt(terminateAfter);
|
||||
out.writeLong(timeoutInMillis);
|
||||
out.writeOptionalWriteable(timeout);
|
||||
out.writeBoolean(trackScores);
|
||||
out.writeOptionalBoolean(version);
|
||||
out.writeOptionalBytesReference(ext);
|
||||
|
@ -446,15 +446,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
* An optional timeout to control how long search is allowed to take.
|
||||
*/
|
||||
public SearchSourceBuilder timeout(TimeValue timeout) {
|
||||
this.timeoutInMillis = timeout.millis();
|
||||
this.timeout = timeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the timeout to control how long search is allowed to take.
|
||||
*/
|
||||
public long timeoutInMillis() {
|
||||
return timeoutInMillis;
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -928,7 +928,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
rewrittenBuilder.stats = stats;
|
||||
rewrittenBuilder.suggestBuilder = suggestBuilder;
|
||||
rewrittenBuilder.terminateAfter = terminateAfter;
|
||||
rewrittenBuilder.timeoutInMillis = timeoutInMillis;
|
||||
rewrittenBuilder.timeout = timeout;
|
||||
rewrittenBuilder.trackScores = trackScores;
|
||||
rewrittenBuilder.version = version;
|
||||
return rewrittenBuilder;
|
||||
|
@ -958,7 +958,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
} else if (context.getParseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
|
||||
size = parser.intValue();
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
|
||||
timeoutInMillis = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()).millis();
|
||||
timeout = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName());
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
|
||||
terminateAfter = parser.intValue();
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
|
||||
|
@ -1095,8 +1095,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
builder.field(SIZE_FIELD.getPreferredName(), size);
|
||||
}
|
||||
|
||||
if (timeoutInMillis != -1) {
|
||||
builder.field(TIMEOUT_FIELD.getPreferredName(), TimeValue.timeValueMillis(timeoutInMillis).toString());
|
||||
if (timeout != null && !timeout.equals(TimeValue.MINUS_ONE)) {
|
||||
builder.field(TIMEOUT_FIELD.getPreferredName(), timeout.getStringRep());
|
||||
}
|
||||
|
||||
if (terminateAfter != SearchContext.DEFAULT_TERMINATE_AFTER) {
|
||||
|
@ -1341,7 +1341,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
public int hashCode() {
|
||||
return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from,
|
||||
highlightBuilder, indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
|
||||
size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile);
|
||||
size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, profile);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1373,7 +1373,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
&& Objects.equals(stats, other.stats)
|
||||
&& Objects.equals(suggestBuilder, other.suggestBuilder)
|
||||
&& Objects.equals(terminateAfter, other.terminateAfter)
|
||||
&& Objects.equals(timeoutInMillis, other.timeoutInMillis)
|
||||
&& Objects.equals(timeout, other.timeout)
|
||||
&& Objects.equals(trackScores, other.trackScores)
|
||||
&& Objects.equals(version, other.version)
|
||||
&& Objects.equals(profile, other.profile);
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Counter;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
|
@ -99,8 +99,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
private final QuerySearchResult queryResult;
|
||||
private final FetchSearchResult fetchResult;
|
||||
private float queryBoost = 1.0f;
|
||||
// timeout in millis
|
||||
private long timeoutInMillis;
|
||||
private TimeValue timeout;
|
||||
// terminate after count
|
||||
private int terminateAfter = DEFAULT_TERMINATE_AFTER;
|
||||
private List<String> groupStats;
|
||||
|
@ -174,7 +173,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
this.indexService = indexService;
|
||||
this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
|
||||
this.timeEstimateCounter = timeEstimateCounter;
|
||||
this.timeoutInMillis = timeout.millis();
|
||||
this.timeout = timeout;
|
||||
queryShardContext = indexService.newQueryShardContext(searcher.getIndexReader());
|
||||
queryShardContext.setTypes(request.types());
|
||||
}
|
||||
|
@ -512,13 +511,13 @@ public class DefaultSearchContext extends SearchContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long timeoutInMillis() {
|
||||
return timeoutInMillis;
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void timeoutInMillis(long timeoutInMillis) {
|
||||
this.timeoutInMillis = timeoutInMillis;
|
||||
public void timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.util.Counter;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
|
@ -264,13 +265,13 @@ public abstract class FilteredSearchContext extends SearchContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long timeoutInMillis() {
|
||||
return in.timeoutInMillis();
|
||||
public TimeValue timeout() {
|
||||
return in.timeout();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void timeoutInMillis(long timeoutInMillis) {
|
||||
in.timeoutInMillis(timeoutInMillis);
|
||||
public void timeout(TimeValue timeout) {
|
||||
in.timeout(timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
|
@ -226,9 +227,9 @@ public abstract class SearchContext implements Releasable {
|
|||
|
||||
public abstract IndexFieldDataService fieldData();
|
||||
|
||||
public abstract long timeoutInMillis();
|
||||
public abstract TimeValue timeout();
|
||||
|
||||
public abstract void timeoutInMillis(long timeoutInMillis);
|
||||
public abstract void timeout(TimeValue timeout);
|
||||
|
||||
public abstract int terminateAfter();
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.search.internal;
|
|||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.Counter;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.search.aggregations.SearchContextAggregations;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
|
@ -155,7 +156,7 @@ public class SubSearchContext extends FilteredSearchContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void timeoutInMillis(long timeoutInMillis) {
|
||||
public void timeout(TimeValue timeout) {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
}
|
||||
|
||||
|
|
|
@ -349,12 +349,12 @@ public class QueryPhase implements SearchPhase {
|
|||
}
|
||||
}
|
||||
|
||||
final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis();
|
||||
final boolean timeoutSet = searchContext.timeout() != null && !searchContext.timeout().equals(SearchService.NO_TIMEOUT);
|
||||
if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed
|
||||
final Collector child = collector;
|
||||
// TODO: change to use our own counter that uses the scheduler in ThreadPool
|
||||
// throws TimeLimitingCollector.TimeExceededException when timeout has reached
|
||||
collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
|
||||
collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeout().millis());
|
||||
if (doProfile) {
|
||||
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT,
|
||||
Collections.singletonList((InternalProfileCollector) child));
|
||||
|
|
|
@ -196,7 +196,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
if (executors.containsKey(executorHolder.info.getName())) {
|
||||
throw new IllegalStateException("duplicate executors with name [" + executorHolder.info.getName() + "] registered");
|
||||
}
|
||||
logger.debug("created thread pool: " + entry.getValue().formatInfo(executorHolder.info));
|
||||
logger.debug("created thread pool: {}", entry.getValue().formatInfo(executorHolder.info));
|
||||
executors.put(entry.getKey(), executorHolder);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ package org.elasticsearch.transport.netty;
|
|||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.bytes.ChannelBufferBytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
@ -37,10 +37,6 @@ public class ChannelBufferStreamInput extends StreamInput {
|
|||
private final int startIndex;
|
||||
private final int endIndex;
|
||||
|
||||
public ChannelBufferStreamInput(ChannelBuffer buffer) {
|
||||
this(buffer, buffer.readableBytes());
|
||||
}
|
||||
|
||||
public ChannelBufferStreamInput(ChannelBuffer buffer, int length) {
|
||||
if (length > buffer.readableBytes()) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
|
@ -53,7 +49,7 @@ public class ChannelBufferStreamInput extends StreamInput {
|
|||
|
||||
@Override
|
||||
public BytesReference readBytesReference(int length) throws IOException {
|
||||
ChannelBufferBytesReference ref = new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex(), length));
|
||||
BytesReference ref = NettyUtils.toBytesReference(buffer.slice(buffer.readerIndex(), length));
|
||||
buffer.skipBytes(length);
|
||||
return ref;
|
||||
}
|
||||
|
@ -136,7 +132,7 @@ public class ChannelBufferStreamInput extends StreamInput {
|
|||
public void readBytes(byte[] b, int offset, int len) throws IOException {
|
||||
int read = read(b, offset, len);
|
||||
if (read < len) {
|
||||
throw new EOFException();
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.compress.NotCompressedException;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -110,7 +111,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
|||
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
|
||||
Compressor compressor;
|
||||
try {
|
||||
compressor = CompressorFactory.compressor(buffer);
|
||||
compressor = CompressorFactory.compressor(NettyUtils.toBytesReference(buffer));
|
||||
} catch (NotCompressedException ex) {
|
||||
int maxToRead = Math.min(buffer.readableBytes(), 10);
|
||||
int offset = buffer.readerIndex();
|
||||
|
|
|
@ -886,7 +886,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
|||
// the header part is compressed, and the "body" can't be extracted as compressed
|
||||
if (options.compress() && (!(request instanceof BytesTransportRequest))) {
|
||||
status = TransportStatus.setCompress(status);
|
||||
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
|
||||
stream = CompressorFactory.COMPRESSOR.streamOutput(stream);
|
||||
}
|
||||
|
||||
// we pick the smallest of the 2, to support both backward and forward compatibility
|
||||
|
@ -909,14 +909,14 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
|||
bRequest.writeThin(stream);
|
||||
stream.close();
|
||||
bytes = bStream.bytes();
|
||||
ChannelBuffer headerBuffer = bytes.toChannelBuffer();
|
||||
ChannelBuffer contentBuffer = bRequest.bytes().toChannelBuffer();
|
||||
ChannelBuffer headerBuffer = NettyUtils.toChannelBuffer(bytes);
|
||||
ChannelBuffer contentBuffer = NettyUtils.toChannelBuffer(bRequest.bytes());
|
||||
buffer = ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, headerBuffer, contentBuffer);
|
||||
} else {
|
||||
request.writeTo(stream);
|
||||
stream.close();
|
||||
bytes = bStream.bytes();
|
||||
buffer = bytes.toChannelBuffer();
|
||||
buffer = NettyUtils.toChannelBuffer(bytes);
|
||||
}
|
||||
NettyHeader.writeHeader(buffer, requestId, status, version);
|
||||
ChannelFuture future = targetChannel.write(buffer);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
|
||||
import org.elasticsearch.transport.RemoteTransportException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
|
@ -99,14 +100,14 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
StreamOutput stream = bStream;
|
||||
if (options.compress()) {
|
||||
status = TransportStatus.setCompress(status);
|
||||
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
|
||||
stream = CompressorFactory.COMPRESSOR.streamOutput(stream);
|
||||
}
|
||||
stream.setVersion(version);
|
||||
response.writeTo(stream);
|
||||
stream.close();
|
||||
|
||||
ReleasablePagedBytesReference bytes = bStream.bytes();
|
||||
ChannelBuffer buffer = bytes.toChannelBuffer();
|
||||
ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes);
|
||||
NettyHeader.writeHeader(buffer, requestId, status, version);
|
||||
ChannelFuture future = channel.write(buffer);
|
||||
ReleaseChannelFutureListener listener = new ReleaseChannelFutureListener(bytes);
|
||||
|
@ -136,7 +137,7 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
status = TransportStatus.setError(status);
|
||||
|
||||
BytesReference bytes = stream.bytes();
|
||||
ChannelBuffer buffer = bytes.toChannelBuffer();
|
||||
ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes);
|
||||
NettyHeader.writeHeader(buffer, requestId, status, version);
|
||||
ChannelFuture future = channel.write(buffer);
|
||||
ChannelFutureListener onResponseSentListener =
|
||||
|
|
|
@ -20,8 +20,9 @@ package org.elasticsearch.action.admin.cluster.node.tasks;
|
|||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -46,6 +47,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -55,6 +57,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -64,11 +67,12 @@ import static org.elasticsearch.test.ESTestCase.awaitBusy;
|
|||
/**
|
||||
* A plugin that adds a cancellable blocking test task of integration testing of the task manager.
|
||||
*/
|
||||
public class TestTaskPlugin extends Plugin {
|
||||
public class TestTaskPlugin extends Plugin implements ActionPlugin {
|
||||
|
||||
public void onModule(ActionModule module) {
|
||||
module.registerAction(TestTaskAction.INSTANCE, TransportTestTaskAction.class);
|
||||
module.registerAction(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class);
|
||||
@Override
|
||||
public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
|
||||
return Arrays.asList(new ActionHandler<>(TestTaskAction.INSTANCE, TransportTestTaskAction.class),
|
||||
new ActionHandler<>(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class));
|
||||
}
|
||||
|
||||
static class TestTask extends CancellableTask {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -46,6 +47,7 @@ public class MaxMapCountCheckTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressLoggerChecks(reason = "mock usage")
|
||||
public void testGetMaxMapCount() throws IOException {
|
||||
final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
final BufferedReader reader = mock(BufferedReader.class);
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
|
@ -31,7 +30,6 @@ import org.elasticsearch.action.support.ActionFilter;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -44,6 +42,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
@ -56,11 +55,13 @@ import org.hamcrest.Matchers;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -74,10 +75,10 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
|||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
public class ClusterInfoServiceIT extends ESIntegTestCase {
|
||||
|
||||
public static class TestPlugin extends Plugin {
|
||||
|
||||
public void onModule(ActionModule module) {
|
||||
module.registerFilter(BlockingActionFilter.class);
|
||||
public static class TestPlugin extends Plugin implements ActionPlugin {
|
||||
@Override
|
||||
public List<Class<? extends ActionFilter>> getActionFilters() {
|
||||
return singletonList(BlockingActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
|
|||
.build());
|
||||
this.numberOfShards = randomIntBetween(1, 5);
|
||||
this.numberOfReplicas = randomIntBetween(1, 5);
|
||||
logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
|
||||
logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas);
|
||||
this.primaryTermsPerIndex.clear();
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(createIndexMetaData(TEST_INDEX_1))
|
||||
|
@ -87,7 +87,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
|
|||
* puts primary shard routings into initializing state
|
||||
*/
|
||||
private void initPrimaries() {
|
||||
logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
|
||||
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||
Builder discoBuilder = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
|
||||
discoBuilder = discoBuilder.put(newNode("node" + i));
|
||||
|
|
|
@ -263,7 +263,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
|
||||
|
|
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.bytes;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.ByteArray;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
public abstract class AbstractBytesReferenceTestCase extends ESTestCase {
|
||||
|
||||
protected static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
|
||||
protected final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false);
|
||||
|
||||
public void testGet() throws IOException {
|
||||
int length = randomIntBetween(1, PAGE_SIZE * 3);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, length / 2);
|
||||
int sliceLength = Math.max(1, length - sliceOffset - 1);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
assertEquals(pbr.get(sliceOffset), slice.get(0));
|
||||
assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1));
|
||||
}
|
||||
|
||||
public void testLength() throws IOException {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = newBytesReference(sizes[i]);
|
||||
assertEquals(sizes[i], pbr.length());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSlice() throws IOException {
|
||||
int length = randomInt(PAGE_SIZE * 3);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, length / 2);
|
||||
int sliceLength = Math.max(0, length - sliceOffset - 1);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
assertEquals(sliceLength, slice.length());
|
||||
|
||||
if (slice.hasArray()) {
|
||||
assertEquals(sliceOffset, slice.arrayOffset());
|
||||
} else {
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
slice.arrayOffset());
|
||||
}
|
||||
}
|
||||
|
||||
public void testStreamInput() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
StreamInput si = pbr.streamInput();
|
||||
assertNotNull(si);
|
||||
|
||||
// read single bytes one by one
|
||||
assertEquals(pbr.get(0), si.readByte());
|
||||
assertEquals(pbr.get(1), si.readByte());
|
||||
assertEquals(pbr.get(2), si.readByte());
|
||||
|
||||
// reset the stream for bulk reading
|
||||
si.reset();
|
||||
|
||||
// buffer for bulk reads
|
||||
byte[] origBuf = new byte[length];
|
||||
random().nextBytes(origBuf);
|
||||
byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length);
|
||||
|
||||
// bulk-read 0 bytes: must not modify buffer
|
||||
si.readBytes(targetBuf, 0, 0);
|
||||
assertEquals(origBuf[0], targetBuf[0]);
|
||||
si.reset();
|
||||
|
||||
// read a few few bytes as ints
|
||||
int bytesToRead = randomIntBetween(1, length / 2);
|
||||
for (int i = 0; i < bytesToRead; i++) {
|
||||
int b = si.read();
|
||||
assertEquals(pbr.get(i) & 0xff, b);
|
||||
}
|
||||
si.reset();
|
||||
|
||||
// bulk-read all
|
||||
si.readFully(targetBuf);
|
||||
assertArrayEquals(pbr.toBytes(), targetBuf);
|
||||
|
||||
// continuing to read should now fail with EOFException
|
||||
try {
|
||||
si.readByte();
|
||||
fail("expected EOF");
|
||||
} catch (EOFException | IndexOutOfBoundsException eof) {
|
||||
// yay
|
||||
}
|
||||
|
||||
// try to read more than the stream contains
|
||||
si.reset();
|
||||
expectThrows(IndexOutOfBoundsException.class, () ->
|
||||
si.readBytes(targetBuf, 0, length * 2));
|
||||
}
|
||||
|
||||
public void testStreamInputBulkReadWithOffset() throws IOException {
|
||||
final int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
StreamInput si = pbr.streamInput();
|
||||
assertNotNull(si);
|
||||
|
||||
// read a bunch of single bytes one by one
|
||||
int offset = randomIntBetween(1, length / 2);
|
||||
for (int i = 0; i < offset; i++) {
|
||||
assertEquals(si.available(), length - i);
|
||||
assertEquals(pbr.get(i), si.readByte());
|
||||
}
|
||||
|
||||
// now do NOT reset the stream - keep the stream's offset!
|
||||
|
||||
// buffer to compare remaining bytes against bulk read
|
||||
byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length);
|
||||
// randomized target buffer to ensure no stale slots
|
||||
byte[] targetBytes = new byte[pbrBytesWithOffset.length];
|
||||
random().nextBytes(targetBytes);
|
||||
|
||||
// bulk-read all
|
||||
si.readFully(targetBytes);
|
||||
assertArrayEquals(pbrBytesWithOffset, targetBytes);
|
||||
assertEquals(si.available(), 0);
|
||||
}
|
||||
|
||||
public void testRandomReads() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
StreamInput streamInput = pbr.streamInput();
|
||||
BytesRefBuilder target = new BytesRefBuilder();
|
||||
while (target.length() < pbr.length()) {
|
||||
switch (randomIntBetween(0, 10)) {
|
||||
case 6:
|
||||
case 5:
|
||||
target.append(new BytesRef(new byte[]{streamInput.readByte()}));
|
||||
break;
|
||||
case 4:
|
||||
case 3:
|
||||
BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length()));
|
||||
target.append(bytesRef);
|
||||
break;
|
||||
default:
|
||||
byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())];
|
||||
int offset = scaledRandomIntBetween(0, buffer.length - 1);
|
||||
int read = streamInput.read(buffer, offset, buffer.length - offset);
|
||||
target.append(new BytesRef(buffer, offset, read));
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertEquals(pbr.length(), target.length());
|
||||
BytesRef targetBytes = target.get();
|
||||
assertArrayEquals(pbr.toBytes(), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length));
|
||||
}
|
||||
|
||||
public void testSliceStreamInput() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
|
||||
// test stream input over slice (upper half of original)
|
||||
int sliceOffset = randomIntBetween(1, length / 2);
|
||||
int sliceLength = length - sliceOffset;
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
StreamInput sliceInput = slice.streamInput();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
|
||||
// single reads
|
||||
assertEquals(slice.get(0), sliceInput.readByte());
|
||||
assertEquals(slice.get(1), sliceInput.readByte());
|
||||
assertEquals(slice.get(2), sliceInput.readByte());
|
||||
assertEquals(sliceInput.available(), sliceLength - 3);
|
||||
|
||||
// reset the slice stream for bulk reading
|
||||
sliceInput.reset();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
|
||||
// bulk read
|
||||
byte[] sliceBytes = new byte[sliceLength];
|
||||
sliceInput.readFully(sliceBytes);
|
||||
assertEquals(sliceInput.available(), 0);
|
||||
|
||||
// compare slice content with upper half of original
|
||||
byte[] pbrSliceBytes = Arrays.copyOfRange(pbr.toBytes(), sliceOffset, length);
|
||||
assertArrayEquals(pbrSliceBytes, sliceBytes);
|
||||
|
||||
// compare slice bytes with bytes read from slice via streamInput :D
|
||||
byte[] sliceToBytes = slice.toBytes();
|
||||
assertEquals(sliceBytes.length, sliceToBytes.length);
|
||||
assertArrayEquals(sliceBytes, sliceToBytes);
|
||||
|
||||
sliceInput.reset();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
byte[] buffer = new byte[sliceLength + scaledRandomIntBetween(1, 100)];
|
||||
int offset = scaledRandomIntBetween(0, Math.max(1, buffer.length - sliceLength - 1));
|
||||
int read = sliceInput.read(buffer, offset, sliceLength / 2);
|
||||
assertEquals(sliceInput.available(), sliceLength - read);
|
||||
sliceInput.read(buffer, offset + read, sliceLength - read);
|
||||
assertArrayEquals(sliceBytes, Arrays.copyOfRange(buffer, offset, offset + sliceLength));
|
||||
assertEquals(sliceInput.available(), 0);
|
||||
}
|
||||
|
||||
public void testWriteToOutputStream() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * 4);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
pbr.writeTo(out);
|
||||
assertEquals(pbr.length(), out.size());
|
||||
assertArrayEquals(pbr.toBytes(), out.bytes().toBytes());
|
||||
out.close();
|
||||
}
|
||||
|
||||
public void testSliceWriteToOutputStream() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(1, length / 2);
|
||||
int sliceLength = length - sliceOffset;
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
BytesStreamOutput sliceOut = new BytesStreamOutput(sliceLength);
|
||||
slice.writeTo(sliceOut);
|
||||
assertEquals(slice.length(), sliceOut.size());
|
||||
assertArrayEquals(slice.toBytes(), sliceOut.bytes().toBytes());
|
||||
sliceOut.close();
|
||||
}
|
||||
|
||||
public void testToBytes() throws IOException {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = newBytesReference(sizes[i]);
|
||||
byte[] bytes = pbr.toBytes();
|
||||
assertEquals(sizes[i], bytes.length);
|
||||
}
|
||||
}
|
||||
|
||||
public void testToBytesArraySharedPage() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesArray ba = pbr.toBytesArray();
|
||||
BytesArray ba2 = pbr.toBytesArray();
|
||||
assertNotNull(ba);
|
||||
assertNotNull(ba2);
|
||||
assertEquals(pbr.length(), ba.length());
|
||||
assertEquals(ba.length(), ba2.length());
|
||||
// single-page optimization
|
||||
assertSame(ba.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testToBytesArrayMaterializedPages() throws IOException {
|
||||
// we need a length != (n * pagesize) to avoid page sharing at boundaries
|
||||
int length = 0;
|
||||
while ((length % PAGE_SIZE) == 0) {
|
||||
length = randomIntBetween(PAGE_SIZE, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
}
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesArray ba = pbr.toBytesArray();
|
||||
BytesArray ba2 = pbr.toBytesArray();
|
||||
assertNotNull(ba);
|
||||
assertNotNull(ba2);
|
||||
assertEquals(pbr.length(), ba.length());
|
||||
assertEquals(ba.length(), ba2.length());
|
||||
}
|
||||
|
||||
public void testCopyBytesArray() throws IOException {
|
||||
// small PBR which would normally share the first page
|
||||
int length = randomIntBetween(10, PAGE_SIZE);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesArray ba = pbr.copyBytesArray();
|
||||
BytesArray ba2 = pbr.copyBytesArray();
|
||||
assertNotNull(ba);
|
||||
assertNotSame(ba, ba2);
|
||||
assertNotSame(ba.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testSliceCopyBytesArray() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
|
||||
BytesArray ba1 = slice.copyBytesArray();
|
||||
BytesArray ba2 = slice.copyBytesArray();
|
||||
assertNotNull(ba1);
|
||||
assertNotNull(ba2);
|
||||
assertNotSame(ba1.array(), ba2.array());
|
||||
assertArrayEquals(slice.toBytes(), ba1.array());
|
||||
assertArrayEquals(slice.toBytes(), ba2.array());
|
||||
assertArrayEquals(ba1.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testEmptyToBytesRefIterator() throws IOException {
|
||||
BytesReference pbr = newBytesReference(0);
|
||||
assertNull(pbr.iterator().next());
|
||||
}
|
||||
|
||||
public void testIterator() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesRefIterator iterator = pbr.iterator();
|
||||
BytesRef ref;
|
||||
BytesRefBuilder builder = new BytesRefBuilder();
|
||||
while((ref = iterator.next()) != null) {
|
||||
builder.append(ref);
|
||||
}
|
||||
assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
|
||||
}
|
||||
|
||||
public void testSliceIterator() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
BytesRefIterator iterator = slice.iterator();
|
||||
BytesRef ref = null;
|
||||
BytesRefBuilder builder = new BytesRefBuilder();
|
||||
while((ref = iterator.next()) != null) {
|
||||
builder.append(ref);
|
||||
}
|
||||
assertArrayEquals(slice.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
|
||||
}
|
||||
|
||||
public void testIteratorRandom() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
if (randomBoolean()) {
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
pbr = pbr.slice(sliceOffset, sliceLength);
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
pbr = pbr.toBytesArray();
|
||||
}
|
||||
BytesRefIterator iterator = pbr.iterator();
|
||||
BytesRef ref = null;
|
||||
BytesRefBuilder builder = new BytesRefBuilder();
|
||||
while((ref = iterator.next()) != null) {
|
||||
builder.append(ref);
|
||||
}
|
||||
assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
|
||||
}
|
||||
|
||||
public void testArray() throws IOException {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = newBytesReference(sizes[i]);
|
||||
byte[] array = pbr.array();
|
||||
assertNotNull(array);
|
||||
assertEquals(sizes[i], array.length);
|
||||
assertSame(array, pbr.array());
|
||||
}
|
||||
}
|
||||
|
||||
public void testArrayOffset() throws IOException {
|
||||
int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
if (pbr.hasArray()) {
|
||||
assertEquals(0, pbr.arrayOffset());
|
||||
} else {
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
pbr.arrayOffset());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSliceArrayOffset() throws IOException {
|
||||
int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
if (slice.hasArray()) {
|
||||
assertEquals(sliceOffset, slice.arrayOffset());
|
||||
} else {
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
slice.arrayOffset());
|
||||
}
|
||||
}
|
||||
|
||||
public void testToUtf8() throws IOException {
|
||||
// test empty
|
||||
BytesReference pbr = newBytesReference(0);
|
||||
assertEquals("", pbr.toUtf8());
|
||||
// TODO: good way to test?
|
||||
}
|
||||
|
||||
public void testToBytesRef() throws IOException {
|
||||
int length = randomIntBetween(0, PAGE_SIZE);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesRef ref = pbr.toBytesRef();
|
||||
assertNotNull(ref);
|
||||
assertEquals(pbr.arrayOffset(), ref.offset);
|
||||
assertEquals(pbr.length(), ref.length);
|
||||
}
|
||||
|
||||
public void testSliceToBytesRef() throws IOException {
|
||||
int length = randomIntBetween(0, PAGE_SIZE);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
// get a BytesRef from a slice
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef();
|
||||
// note that these are only true if we have <= than a page, otherwise offset/length are shifted
|
||||
assertEquals(sliceOffset, sliceRef.offset);
|
||||
assertEquals(sliceLength, sliceRef.length);
|
||||
}
|
||||
|
||||
public void testCopyBytesRef() throws IOException {
|
||||
int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesRef ref = pbr.copyBytesRef();
|
||||
assertNotNull(ref);
|
||||
assertEquals(pbr.length(), ref.length);
|
||||
}
|
||||
|
||||
public void testHashCode() throws IOException {
|
||||
// empty content must have hash 1 (JDK compat)
|
||||
BytesReference pbr = newBytesReference(0);
|
||||
assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode());
|
||||
|
||||
// test with content
|
||||
pbr = newBytesReference(randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)));
|
||||
int jdkHash = Arrays.hashCode(pbr.toBytes());
|
||||
int pbrHash = pbr.hashCode();
|
||||
assertEquals(jdkHash, pbrHash);
|
||||
|
||||
// test hashes of slices
|
||||
int sliceFrom = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
|
||||
BytesReference slice = pbr.slice(sliceFrom, sliceLength);
|
||||
int sliceJdkHash = Arrays.hashCode(slice.toBytes());
|
||||
int sliceHash = slice.hashCode();
|
||||
assertEquals(sliceJdkHash, sliceHash);
|
||||
}
|
||||
|
||||
public void testEquals() {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
ByteArray ba1 = bigarrays.newByteArray(length, false);
|
||||
ByteArray ba2 = bigarrays.newByteArray(length, false);
|
||||
|
||||
// copy contents
|
||||
for (long i = 0; i < length; i++) {
|
||||
ba2.set(i, ba1.get(i));
|
||||
}
|
||||
|
||||
// get refs & compare
|
||||
BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
|
||||
BytesReference pbr2 = new PagedBytesReference(bigarrays, ba2, length);
|
||||
assertEquals(pbr, pbr2);
|
||||
}
|
||||
|
||||
public void testEqualsPeerClass() throws IOException {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesReference ba = new BytesArray(pbr.toBytes());
|
||||
assertEquals(pbr, ba);
|
||||
}
|
||||
|
||||
public void testSliceEquals() {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
ByteArray ba1 = bigarrays.newByteArray(length, false);
|
||||
BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
|
||||
|
||||
// test equality of slices
|
||||
int sliceFrom = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
|
||||
BytesReference slice1 = pbr.slice(sliceFrom, sliceLength);
|
||||
BytesReference slice2 = pbr.slice(sliceFrom, sliceLength);
|
||||
assertArrayEquals(slice1.toBytes(), slice2.toBytes());
|
||||
|
||||
// test a slice with same offset but different length,
|
||||
// unless randomized testing gave us a 0-length slice.
|
||||
if (sliceLength > 0) {
|
||||
BytesReference slice3 = pbr.slice(sliceFrom, sliceLength / 2);
|
||||
assertFalse(Arrays.equals(slice1.toBytes(), slice3.toBytes()));
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract BytesReference newBytesReference(int length) throws IOException;
|
||||
|
||||
}
|
|
@ -16,26 +16,26 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.script.mustache;
|
||||
package org.elasticsearch.common.bytes;
|
||||
|
||||
import com.fasterxml.jackson.core.io.JsonStringEncoder;
|
||||
import com.github.mustachejava.DefaultMustacheFactory;
|
||||
import com.github.mustachejava.MustacheException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
|
||||
/**
|
||||
* A MustacheFactory that does simple JSON escaping.
|
||||
*/
|
||||
final class JsonEscapingMustacheFactory extends DefaultMustacheFactory {
|
||||
|
||||
public class BytesArrayTests extends AbstractBytesReferenceTestCase {
|
||||
@Override
|
||||
public void encode(String value, Writer writer) {
|
||||
try {
|
||||
writer.write(JsonStringEncoder.getInstance().quoteAsString(value));
|
||||
} catch (IOException e) {
|
||||
throw new MustacheException("Failed to encode value: " + value);
|
||||
protected BytesReference newBytesReference(int length) throws IOException {
|
||||
// we know bytes stream output always creates a paged bytes reference, we use it to create randomized content
|
||||
final BytesStreamOutput out = new BytesStreamOutput(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
out.writeByte((byte) random().nextInt(1 << 8));
|
||||
}
|
||||
assertEquals(length, out.size());
|
||||
BytesArray ref = out.bytes().toBytesArray();
|
||||
assertEquals(length, ref.length());
|
||||
assertTrue(ref instanceof BytesArray);
|
||||
assertThat(ref.length(), Matchers.equalTo(length));
|
||||
return ref;
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.bytes;
|
|||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -29,316 +30,33 @@ import org.elasticsearch.common.util.ByteArray;
|
|||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class PagedBytesReferenceTests extends ESTestCase {
|
||||
public class PagedBytesReferenceTests extends AbstractBytesReferenceTestCase {
|
||||
|
||||
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
|
||||
|
||||
private BigArrays bigarrays;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testGet() {
|
||||
int length = randomIntBetween(1, PAGE_SIZE * 3);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, length / 2);
|
||||
int sliceLength = Math.max(1, length - sliceOffset - 1);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
assertEquals(pbr.get(sliceOffset), slice.get(0));
|
||||
assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1));
|
||||
}
|
||||
|
||||
public void testLength() {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
|
||||
assertEquals(sizes[i], pbr.length());
|
||||
protected BytesReference newBytesReference(int length) throws IOException {
|
||||
// we know bytes stream output always creates a paged bytes reference, we use it to create randomized content
|
||||
ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays);
|
||||
for (int i = 0; i < length; i++) {
|
||||
out.writeByte((byte) random().nextInt(1 << 8));
|
||||
}
|
||||
assertThat(out.size(), Matchers.equalTo(length));
|
||||
BytesReference ref = out.bytes();
|
||||
assertThat(ref.length(), Matchers.equalTo(length));
|
||||
assertThat(ref, Matchers.instanceOf(PagedBytesReference.class));
|
||||
return ref;
|
||||
}
|
||||
|
||||
public void testSlice() {
|
||||
int length = randomInt(PAGE_SIZE * 3);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, length / 2);
|
||||
int sliceLength = Math.max(0, length - sliceOffset - 1);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
assertEquals(sliceLength, slice.length());
|
||||
|
||||
if (slice.hasArray()) {
|
||||
assertEquals(sliceOffset, slice.arrayOffset());
|
||||
} else {
|
||||
try {
|
||||
slice.arrayOffset();
|
||||
fail("expected IllegalStateException");
|
||||
} catch (IllegalStateException ise) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testStreamInput() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
StreamInput si = pbr.streamInput();
|
||||
assertNotNull(si);
|
||||
|
||||
// read single bytes one by one
|
||||
assertEquals(pbr.get(0), si.readByte());
|
||||
assertEquals(pbr.get(1), si.readByte());
|
||||
assertEquals(pbr.get(2), si.readByte());
|
||||
|
||||
// reset the stream for bulk reading
|
||||
si.reset();
|
||||
|
||||
// buffer for bulk reads
|
||||
byte[] origBuf = new byte[length];
|
||||
random().nextBytes(origBuf);
|
||||
byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length);
|
||||
|
||||
// bulk-read 0 bytes: must not modify buffer
|
||||
si.readBytes(targetBuf, 0, 0);
|
||||
assertEquals(origBuf[0], targetBuf[0]);
|
||||
si.reset();
|
||||
|
||||
// read a few few bytes as ints
|
||||
int bytesToRead = randomIntBetween(1, length / 2);
|
||||
for (int i = 0; i < bytesToRead; i++) {
|
||||
int b = si.read();
|
||||
assertEquals(pbr.get(i), b);
|
||||
}
|
||||
si.reset();
|
||||
|
||||
// bulk-read all
|
||||
si.readFully(targetBuf);
|
||||
assertArrayEquals(pbr.toBytes(), targetBuf);
|
||||
|
||||
// continuing to read should now fail with EOFException
|
||||
try {
|
||||
si.readByte();
|
||||
fail("expected EOF");
|
||||
} catch (EOFException eof) {
|
||||
// yay
|
||||
}
|
||||
|
||||
// try to read more than the stream contains
|
||||
si.reset();
|
||||
try {
|
||||
si.readBytes(targetBuf, 0, length * 2);
|
||||
fail("expected IndexOutOfBoundsException: le > stream.length");
|
||||
} catch (IndexOutOfBoundsException ioob) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testStreamInputBulkReadWithOffset() throws IOException {
|
||||
final int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
StreamInput si = pbr.streamInput();
|
||||
assertNotNull(si);
|
||||
|
||||
// read a bunch of single bytes one by one
|
||||
int offset = randomIntBetween(1, length / 2);
|
||||
for (int i = 0; i < offset; i++) {
|
||||
assertEquals(si.available(), length - i);
|
||||
assertEquals(pbr.get(i), si.readByte());
|
||||
}
|
||||
|
||||
// now do NOT reset the stream - keep the stream's offset!
|
||||
|
||||
// buffer to compare remaining bytes against bulk read
|
||||
byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length);
|
||||
// randomized target buffer to ensure no stale slots
|
||||
byte[] targetBytes = new byte[pbrBytesWithOffset.length];
|
||||
random().nextBytes(targetBytes);
|
||||
|
||||
// bulk-read all
|
||||
si.readFully(targetBytes);
|
||||
assertArrayEquals(pbrBytesWithOffset, targetBytes);
|
||||
assertEquals(si.available(), 0);
|
||||
}
|
||||
|
||||
public void testRandomReads() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
StreamInput streamInput = pbr.streamInput();
|
||||
BytesRefBuilder target = new BytesRefBuilder();
|
||||
while (target.length() < pbr.length()) {
|
||||
switch (randomIntBetween(0, 10)) {
|
||||
case 6:
|
||||
case 5:
|
||||
target.append(new BytesRef(new byte[]{streamInput.readByte()}));
|
||||
break;
|
||||
case 4:
|
||||
case 3:
|
||||
BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length()));
|
||||
target.append(bytesRef);
|
||||
break;
|
||||
default:
|
||||
byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())];
|
||||
int offset = scaledRandomIntBetween(0, buffer.length - 1);
|
||||
int read = streamInput.read(buffer, offset, buffer.length - offset);
|
||||
target.append(new BytesRef(buffer, offset, read));
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertEquals(pbr.length(), target.length());
|
||||
BytesRef targetBytes = target.get();
|
||||
assertArrayEquals(pbr.toBytes(), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length));
|
||||
}
|
||||
|
||||
public void testSliceStreamInput() throws IOException {
|
||||
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
|
||||
// test stream input over slice (upper half of original)
|
||||
int sliceOffset = randomIntBetween(1, length / 2);
|
||||
int sliceLength = length - sliceOffset;
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
StreamInput sliceInput = slice.streamInput();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
|
||||
// single reads
|
||||
assertEquals(slice.get(0), sliceInput.readByte());
|
||||
assertEquals(slice.get(1), sliceInput.readByte());
|
||||
assertEquals(slice.get(2), sliceInput.readByte());
|
||||
assertEquals(sliceInput.available(), sliceLength - 3);
|
||||
|
||||
// reset the slice stream for bulk reading
|
||||
sliceInput.reset();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
|
||||
// bulk read
|
||||
byte[] sliceBytes = new byte[sliceLength];
|
||||
sliceInput.readFully(sliceBytes);
|
||||
assertEquals(sliceInput.available(), 0);
|
||||
|
||||
// compare slice content with upper half of original
|
||||
byte[] pbrSliceBytes = Arrays.copyOfRange(pbr.toBytes(), sliceOffset, length);
|
||||
assertArrayEquals(pbrSliceBytes, sliceBytes);
|
||||
|
||||
// compare slice bytes with bytes read from slice via streamInput :D
|
||||
byte[] sliceToBytes = slice.toBytes();
|
||||
assertEquals(sliceBytes.length, sliceToBytes.length);
|
||||
assertArrayEquals(sliceBytes, sliceToBytes);
|
||||
|
||||
sliceInput.reset();
|
||||
assertEquals(sliceInput.available(), sliceLength);
|
||||
byte[] buffer = new byte[sliceLength + scaledRandomIntBetween(1, 100)];
|
||||
int offset = scaledRandomIntBetween(0, Math.max(1, buffer.length - sliceLength - 1));
|
||||
int read = sliceInput.read(buffer, offset, sliceLength / 2);
|
||||
assertEquals(sliceInput.available(), sliceLength - read);
|
||||
sliceInput.read(buffer, offset + read, sliceLength);
|
||||
assertArrayEquals(sliceBytes, Arrays.copyOfRange(buffer, offset, offset + sliceLength));
|
||||
assertEquals(sliceInput.available(), 0);
|
||||
}
|
||||
|
||||
public void testWriteToOutputStream() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * 4);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
pbr.writeTo(out);
|
||||
assertEquals(pbr.length(), out.size());
|
||||
assertArrayEquals(pbr.toBytes(), out.bytes().toBytes());
|
||||
out.close();
|
||||
}
|
||||
|
||||
public void testWriteToChannel() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * 4);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
Path tFile = createTempFile();
|
||||
try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) {
|
||||
pbr.writeTo(channel);
|
||||
assertEquals(pbr.length(), channel.position());
|
||||
}
|
||||
assertArrayEquals(pbr.toBytes(), Files.readAllBytes(tFile));
|
||||
}
|
||||
|
||||
public void testSliceWriteToOutputStream() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(1, length / 2);
|
||||
int sliceLength = length - sliceOffset;
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
BytesStreamOutput sliceOut = new BytesStreamOutput(sliceLength);
|
||||
slice.writeTo(sliceOut);
|
||||
assertEquals(slice.length(), sliceOut.size());
|
||||
assertArrayEquals(slice.toBytes(), sliceOut.bytes().toBytes());
|
||||
sliceOut.close();
|
||||
}
|
||||
|
||||
public void testSliceWriteToChannel() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(1, length / 2);
|
||||
int sliceLength = length - sliceOffset;
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
Path tFile = createTempFile();
|
||||
try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) {
|
||||
slice.writeTo(channel);
|
||||
assertEquals(slice.length(), channel.position());
|
||||
}
|
||||
assertArrayEquals(slice.toBytes(), Files.readAllBytes(tFile));
|
||||
}
|
||||
|
||||
public void testToBytes() {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
|
||||
byte[] bytes = pbr.toBytes();
|
||||
assertEquals(sizes[i], bytes.length);
|
||||
// verify that toBytes() is cheap for small payloads
|
||||
if (sizes[i] <= PAGE_SIZE) {
|
||||
assertSame(bytes, pbr.toBytes());
|
||||
} else {
|
||||
assertNotSame(bytes, pbr.toBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testToBytesArraySharedPage() {
|
||||
int length = randomIntBetween(10, PAGE_SIZE);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesArray ba = pbr.toBytesArray();
|
||||
BytesArray ba2 = pbr.toBytesArray();
|
||||
assertNotNull(ba);
|
||||
assertNotNull(ba2);
|
||||
assertEquals(pbr.length(), ba.length());
|
||||
assertEquals(ba.length(), ba2.length());
|
||||
// single-page optimization
|
||||
assertSame(ba.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testToBytesArrayMaterializedPages() {
|
||||
public void testToBytesArrayMaterializedPages() throws IOException {
|
||||
// we need a length != (n * pagesize) to avoid page sharing at boundaries
|
||||
int length = 0;
|
||||
while ((length % PAGE_SIZE) == 0) {
|
||||
length = randomIntBetween(PAGE_SIZE, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
}
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
BytesArray ba = pbr.toBytesArray();
|
||||
BytesArray ba2 = pbr.toBytesArray();
|
||||
assertNotNull(ba);
|
||||
|
@ -349,77 +67,11 @@ public class PagedBytesReferenceTests extends ESTestCase {
|
|||
assertNotSame(ba.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testCopyBytesArray() {
|
||||
// small PBR which would normally share the first page
|
||||
int length = randomIntBetween(10, PAGE_SIZE);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesArray ba = pbr.copyBytesArray();
|
||||
BytesArray ba2 = pbr.copyBytesArray();
|
||||
assertNotNull(ba);
|
||||
assertNotSame(ba, ba2);
|
||||
assertNotSame(ba.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testSliceCopyBytesArray() {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
|
||||
BytesArray ba1 = slice.copyBytesArray();
|
||||
BytesArray ba2 = slice.copyBytesArray();
|
||||
assertNotNull(ba1);
|
||||
assertNotNull(ba2);
|
||||
assertNotSame(ba1.array(), ba2.array());
|
||||
assertArrayEquals(slice.toBytes(), ba1.array());
|
||||
assertArrayEquals(slice.toBytes(), ba2.array());
|
||||
assertArrayEquals(ba1.array(), ba2.array());
|
||||
}
|
||||
|
||||
public void testToChannelBuffer() {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
ChannelBuffer cb = pbr.toChannelBuffer();
|
||||
assertNotNull(cb);
|
||||
byte[] bufferBytes = new byte[length];
|
||||
cb.getBytes(0, bufferBytes);
|
||||
assertArrayEquals(pbr.toBytes(), bufferBytes);
|
||||
}
|
||||
|
||||
public void testEmptyToChannelBuffer() {
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(0);
|
||||
ChannelBuffer cb = pbr.toChannelBuffer();
|
||||
assertNotNull(cb);
|
||||
assertEquals(0, pbr.length());
|
||||
assertEquals(0, cb.capacity());
|
||||
}
|
||||
|
||||
public void testSliceToChannelBuffer() {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
ChannelBuffer cbSlice = slice.toChannelBuffer();
|
||||
assertNotNull(cbSlice);
|
||||
byte[] sliceBufferBytes = new byte[sliceLength];
|
||||
cbSlice.getBytes(0, sliceBufferBytes);
|
||||
assertArrayEquals(slice.toBytes(), sliceBufferBytes);
|
||||
}
|
||||
|
||||
public void testHasArray() {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
// must return true for <= pagesize
|
||||
assertEquals(length <= PAGE_SIZE, pbr.hasArray());
|
||||
}
|
||||
|
||||
public void testArray() {
|
||||
public void testArray() throws IOException {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
|
||||
BytesReference pbr = newBytesReference(sizes[i]);
|
||||
// verify that array() is cheap for small payloads
|
||||
if (sizes[i] <= PAGE_SIZE) {
|
||||
byte[] array = pbr.array();
|
||||
|
@ -437,153 +89,27 @@ public class PagedBytesReferenceTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testArrayOffset() {
|
||||
int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
if (pbr.hasArray()) {
|
||||
assertEquals(0, pbr.arrayOffset());
|
||||
} else {
|
||||
try {
|
||||
pbr.arrayOffset();
|
||||
fail("expected IllegalStateException");
|
||||
} catch (IllegalStateException ise) {
|
||||
// expected
|
||||
public void testToBytes() throws IOException {
|
||||
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
|
||||
|
||||
for (int i = 0; i < sizes.length; i++) {
|
||||
BytesReference pbr = newBytesReference(sizes[i]);
|
||||
byte[] bytes = pbr.toBytes();
|
||||
assertEquals(sizes[i], bytes.length);
|
||||
// verify that toBytes() is cheap for small payloads
|
||||
if (sizes[i] <= PAGE_SIZE) {
|
||||
assertSame(bytes, pbr.toBytes());
|
||||
} else {
|
||||
assertNotSame(bytes, pbr.toBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testSliceArrayOffset() {
|
||||
int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
|
||||
if (slice.hasArray()) {
|
||||
assertEquals(sliceOffset, slice.arrayOffset());
|
||||
} else {
|
||||
try {
|
||||
slice.arrayOffset();
|
||||
fail("expected IllegalStateException");
|
||||
} catch (IllegalStateException ise) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testToUtf8() throws IOException {
|
||||
// test empty
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(0);
|
||||
assertEquals("", pbr.toUtf8());
|
||||
// TODO: good way to test?
|
||||
}
|
||||
|
||||
public void testToBytesRef() {
|
||||
int length = randomIntBetween(0, PAGE_SIZE);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesRef ref = pbr.toBytesRef();
|
||||
assertNotNull(ref);
|
||||
assertEquals(pbr.arrayOffset(), ref.offset);
|
||||
assertEquals(pbr.length(), ref.length);
|
||||
}
|
||||
|
||||
public void testSliceToBytesRef() {
|
||||
int length = randomIntBetween(0, PAGE_SIZE);
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
// get a BytesRef from a slice
|
||||
int sliceOffset = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
|
||||
BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef();
|
||||
// note that these are only true if we have <= than a page, otherwise offset/length are shifted
|
||||
assertEquals(sliceOffset, sliceRef.offset);
|
||||
assertEquals(sliceLength, sliceRef.length);
|
||||
}
|
||||
|
||||
public void testCopyBytesRef() {
|
||||
int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesRef ref = pbr.copyBytesRef();
|
||||
assertNotNull(ref);
|
||||
assertEquals(pbr.length(), ref.length);
|
||||
}
|
||||
|
||||
public void testHashCode() {
|
||||
// empty content must have hash 1 (JDK compat)
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(0);
|
||||
assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode());
|
||||
|
||||
// test with content
|
||||
pbr = getRandomizedPagedBytesReference(randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)));
|
||||
int jdkHash = Arrays.hashCode(pbr.toBytes());
|
||||
int pbrHash = pbr.hashCode();
|
||||
assertEquals(jdkHash, pbrHash);
|
||||
|
||||
// test hashes of slices
|
||||
int sliceFrom = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
|
||||
BytesReference slice = pbr.slice(sliceFrom, sliceLength);
|
||||
int sliceJdkHash = Arrays.hashCode(slice.toBytes());
|
||||
int sliceHash = slice.hashCode();
|
||||
assertEquals(sliceJdkHash, sliceHash);
|
||||
}
|
||||
|
||||
public void testEquals() {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
ByteArray ba1 = bigarrays.newByteArray(length, false);
|
||||
ByteArray ba2 = bigarrays.newByteArray(length, false);
|
||||
|
||||
// copy contents
|
||||
for (long i = 0; i < length; i++) {
|
||||
ba2.set(i, ba1.get(i));
|
||||
}
|
||||
|
||||
// get refs & compare
|
||||
BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
|
||||
BytesReference pbr2 = new PagedBytesReference(bigarrays, ba2, length);
|
||||
assertEquals(pbr, pbr2);
|
||||
}
|
||||
|
||||
public void testEqualsPeerClass() {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
BytesReference pbr = getRandomizedPagedBytesReference(length);
|
||||
BytesReference ba = new BytesArray(pbr.toBytes());
|
||||
assertEquals(pbr, ba);
|
||||
}
|
||||
|
||||
public void testSliceEquals() {
|
||||
int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
|
||||
ByteArray ba1 = bigarrays.newByteArray(length, false);
|
||||
BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
|
||||
|
||||
// test equality of slices
|
||||
int sliceFrom = randomIntBetween(0, pbr.length());
|
||||
int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
|
||||
BytesReference slice1 = pbr.slice(sliceFrom, sliceLength);
|
||||
BytesReference slice2 = pbr.slice(sliceFrom, sliceLength);
|
||||
assertArrayEquals(slice1.toBytes(), slice2.toBytes());
|
||||
|
||||
// test a slice with same offset but different length,
|
||||
// unless randomized testing gave us a 0-length slice.
|
||||
if (sliceLength > 0) {
|
||||
BytesReference slice3 = pbr.slice(sliceFrom, sliceLength / 2);
|
||||
assertFalse(Arrays.equals(slice1.toBytes(), slice3.toBytes()));
|
||||
}
|
||||
}
|
||||
|
||||
private BytesReference getRandomizedPagedBytesReference(int length) {
|
||||
// we know bytes stream output always creates a paged bytes reference, we use it to create randomized content
|
||||
ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays);
|
||||
try {
|
||||
for (int i = 0; i < length; i++) {
|
||||
out.writeByte((byte) random().nextInt(1 << 8));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
fail("should not happen " + e.getMessage());
|
||||
}
|
||||
assertThat(out.size(), Matchers.equalTo(length));
|
||||
BytesReference ref = out.bytes();
|
||||
assertThat(ref.length(), Matchers.equalTo(length));
|
||||
assertThat(ref, Matchers.instanceOf(PagedBytesReference.class));
|
||||
return ref;
|
||||
public void testHasArray() throws IOException {
|
||||
int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3));
|
||||
BytesReference pbr = newBytesReference(length);
|
||||
// must return true for <= pagesize
|
||||
assertEquals(length <= PAGE_SIZE, pbr.hasArray());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,13 +37,9 @@ import java.util.concurrent.CountDownLatch;
|
|||
/**
|
||||
* Test streaming compression (e.g. used for recovery)
|
||||
*/
|
||||
public abstract class AbstractCompressedStreamTestCase extends ESTestCase {
|
||||
public class DeflateCompressTests extends ESTestCase {
|
||||
|
||||
private final Compressor compressor;
|
||||
|
||||
protected AbstractCompressedStreamTestCase(Compressor compressor) {
|
||||
this.compressor = compressor;
|
||||
}
|
||||
private final Compressor compressor = new DeflateCompressor();
|
||||
|
||||
public void testRandom() throws IOException {
|
||||
Random r = random();
|
|
@ -35,13 +35,9 @@ import static org.hamcrest.Matchers.not;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class AbstractCompressedXContentTestCase extends ESTestCase {
|
||||
public class DeflateCompressedXContentTests extends ESTestCase {
|
||||
|
||||
private final Compressor compressor;
|
||||
|
||||
protected AbstractCompressedXContentTestCase(Compressor compressor) {
|
||||
this.compressor = compressor;
|
||||
}
|
||||
private final Compressor compressor = new DeflateCompressor();
|
||||
|
||||
private void assertEquals(CompressedXContent s1, CompressedXContent s2) {
|
||||
Assert.assertEquals(s1, s2);
|
||||
|
@ -50,38 +46,26 @@ public abstract class AbstractCompressedXContentTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
public void simpleTests() throws IOException {
|
||||
Compressor defaultCompressor = CompressorFactory.defaultCompressor();
|
||||
try {
|
||||
CompressorFactory.setDefaultCompressor(compressor);
|
||||
String str = "---\nf:this is a simple string";
|
||||
CompressedXContent cstr = new CompressedXContent(str);
|
||||
assertThat(cstr.string(), equalTo(str));
|
||||
assertThat(new CompressedXContent(str), equalTo(cstr));
|
||||
String str = "---\nf:this is a simple string";
|
||||
CompressedXContent cstr = new CompressedXContent(str);
|
||||
assertThat(cstr.string(), equalTo(str));
|
||||
assertThat(new CompressedXContent(str), equalTo(cstr));
|
||||
|
||||
String str2 = "---\nf:this is a simple string 2";
|
||||
CompressedXContent cstr2 = new CompressedXContent(str2);
|
||||
assertThat(cstr2.string(), not(equalTo(str)));
|
||||
assertThat(new CompressedXContent(str2), not(equalTo(cstr)));
|
||||
assertEquals(new CompressedXContent(str2), cstr2);
|
||||
} finally {
|
||||
CompressorFactory.setDefaultCompressor(defaultCompressor);
|
||||
}
|
||||
String str2 = "---\nf:this is a simple string 2";
|
||||
CompressedXContent cstr2 = new CompressedXContent(str2);
|
||||
assertThat(cstr2.string(), not(equalTo(str)));
|
||||
assertThat(new CompressedXContent(str2), not(equalTo(cstr)));
|
||||
assertEquals(new CompressedXContent(str2), cstr2);
|
||||
}
|
||||
|
||||
public void testRandom() throws IOException {
|
||||
Compressor defaultCompressor = CompressorFactory.defaultCompressor();
|
||||
try {
|
||||
CompressorFactory.setDefaultCompressor(compressor);
|
||||
Random r = random();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String string = TestUtil.randomUnicodeString(r, 10000);
|
||||
// hack to make it detected as YAML
|
||||
string = "---\n" + string;
|
||||
CompressedXContent compressedXContent = new CompressedXContent(string);
|
||||
assertThat(compressedXContent.string(), equalTo(string));
|
||||
}
|
||||
} finally {
|
||||
CompressorFactory.setDefaultCompressor(defaultCompressor);
|
||||
Random r = random();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String string = TestUtil.randomUnicodeString(r, 10000);
|
||||
// hack to make it detected as YAML
|
||||
string = "---\n" + string;
|
||||
CompressedXContent compressedXContent = new CompressedXContent(string);
|
||||
assertThat(compressedXContent.string(), equalTo(string));
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
|
||||
import org.elasticsearch.common.compress.AbstractCompressedStreamTestCase;
|
||||
|
||||
public class DeflateCompressedStreamTests extends AbstractCompressedStreamTestCase {
|
||||
|
||||
public DeflateCompressedStreamTests() {
|
||||
super(new DeflateCompressor());
|
||||
}
|
||||
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
|
||||
import org.elasticsearch.common.compress.AbstractCompressedXContentTestCase;
|
||||
|
||||
public class DeflateXContentTests extends AbstractCompressedXContentTestCase {
|
||||
|
||||
public DeflateXContentTests() {
|
||||
super(new DeflateCompressor());
|
||||
}
|
||||
|
||||
}
|
|
@ -357,6 +357,7 @@ public class LuceneTests extends ESTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/19151")
|
||||
public void testAsSequentialAccessBits() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.netty;
|
||||
|
||||
import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ChannelBufferBytesReferenceTests extends AbstractBytesReferenceTestCase {
|
||||
@Override
|
||||
protected BytesReference newBytesReference(int length) throws IOException {
|
||||
ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays);
|
||||
for (int i = 0; i < length; i++) {
|
||||
out.writeByte((byte) random().nextInt(1 << 8));
|
||||
}
|
||||
assertEquals(out.size(), length);
|
||||
BytesReference ref = out.bytes();
|
||||
assertEquals(ref.length(), length);
|
||||
BytesArray bytesArray = ref.toBytesArray();
|
||||
return NettyUtils.toBytesReference(ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(),
|
||||
bytesArray.length()));
|
||||
}
|
||||
|
||||
public void testSliceOnAdvancedBuffer() throws IOException {
|
||||
BytesReference bytesReference = newBytesReference(randomIntBetween(10, 3 * PAGE_SIZE));
|
||||
BytesArray bytesArray = bytesReference.toBytesArray();
|
||||
|
||||
ChannelBuffer channelBuffer = ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(),
|
||||
bytesArray.length());
|
||||
int numBytesToRead = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numBytesToRead; i++) {
|
||||
channelBuffer.readByte();
|
||||
}
|
||||
BytesReference other = NettyUtils.toBytesReference(channelBuffer);
|
||||
BytesReference slice = bytesReference.slice(numBytesToRead, bytesReference.length() - numBytesToRead);
|
||||
assertEquals(other, slice);
|
||||
|
||||
assertEquals(other.slice(3, 1), slice.slice(3, 1));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.netty;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
import org.jboss.netty.buffer.CompositeChannelBuffer;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class NettyUtilsTests extends ESTestCase {
|
||||
|
||||
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
|
||||
private final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false);
|
||||
|
||||
public void testToChannelBufferWithEmptyRef() throws IOException {
|
||||
ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(getRandomizedBytesReference(0));
|
||||
assertSame(ChannelBuffers.EMPTY_BUFFER, channelBuffer);
|
||||
}
|
||||
|
||||
public void testToChannelBufferWithSlice() throws IOException {
|
||||
BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE));
|
||||
int sliceOffset = randomIntBetween(0, ref.length());
|
||||
int sliceLength = randomIntBetween(ref.length() - sliceOffset, ref.length() - sliceOffset);
|
||||
BytesReference slice = ref.slice(sliceOffset, sliceLength);
|
||||
ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(slice);
|
||||
BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer);
|
||||
assertArrayEquals(slice.toBytes(), bytesReference.toBytes());
|
||||
}
|
||||
|
||||
public void testToChannelBufferWithSliceAfter() throws IOException {
|
||||
BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE));
|
||||
int sliceOffset = randomIntBetween(0, ref.length());
|
||||
int sliceLength = randomIntBetween(ref.length() - sliceOffset, ref.length() - sliceOffset);
|
||||
ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(ref);
|
||||
BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer);
|
||||
assertArrayEquals(ref.slice(sliceOffset, sliceLength).toBytes(), bytesReference.slice(sliceOffset, sliceLength).toBytes());
|
||||
}
|
||||
|
||||
public void testToChannelBuffer() throws IOException {
|
||||
BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE));
|
||||
ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(ref);
|
||||
BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer);
|
||||
if (ref instanceof ChannelBufferBytesReference) {
|
||||
assertEquals(channelBuffer, ((ChannelBufferBytesReference) ref).toChannelBuffer());
|
||||
} else if (ref.hasArray() == false) { // we gather the buffers into a channel buffer
|
||||
assertTrue(channelBuffer instanceof CompositeChannelBuffer);
|
||||
}
|
||||
assertArrayEquals(ref.toBytes(), bytesReference.toBytes());
|
||||
}
|
||||
|
||||
private BytesReference getRandomizedBytesReference(int length) throws IOException {
|
||||
// we know bytes stream output always creates a paged bytes reference, we use it to create randomized content
|
||||
ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays);
|
||||
for (int i = 0; i < length; i++) {
|
||||
out.writeByte((byte) random().nextInt(1 << 8));
|
||||
}
|
||||
assertEquals(out.size(), length);
|
||||
BytesReference ref = out.bytes();
|
||||
assertEquals(ref.length(), length);
|
||||
if (randomBoolean()) {
|
||||
return ref.toBytesArray();
|
||||
} else if (randomBoolean()) {
|
||||
BytesArray bytesArray = ref.toBytesArray();
|
||||
return NettyUtils.toBytesReference(ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(),
|
||||
bytesArray.length()));
|
||||
} else {
|
||||
return ref;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,9 +30,12 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.object.HasToString.hasToString;
|
||||
|
||||
public class TimeValueTests extends ESTestCase {
|
||||
|
||||
|
@ -85,9 +88,6 @@ public class TimeValueTests extends ESTestCase {
|
|||
assertEquals(new TimeValue(10, TimeUnit.SECONDS),
|
||||
TimeValue.parseTimeValue("10S", null, "test"));
|
||||
|
||||
assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS),
|
||||
TimeValue.parseTimeValue("0.1s", null, "test"));
|
||||
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES),
|
||||
TimeValue.parseTimeValue("10 m", null, "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES),
|
||||
|
@ -115,14 +115,45 @@ public class TimeValueTests extends ESTestCase {
|
|||
assertEquals(new TimeValue(10, TimeUnit.DAYS),
|
||||
TimeValue.parseTimeValue("10D", null, "test"));
|
||||
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS),
|
||||
TimeValue.parseTimeValue("10 w", null, "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS),
|
||||
TimeValue.parseTimeValue("10w", null, "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS),
|
||||
TimeValue.parseTimeValue("10 W", null, "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS),
|
||||
TimeValue.parseTimeValue("10W", null, "test"));
|
||||
final int length = randomIntBetween(0, 8);
|
||||
final String zeros = new String(new char[length]).replace('\0', '0');
|
||||
assertTrue(TimeValue.parseTimeValue("-" + zeros + "1", null, "test") == TimeValue.MINUS_ONE);
|
||||
assertTrue(TimeValue.parseTimeValue(zeros + "0", null, "test") == TimeValue.ZERO);
|
||||
}
|
||||
|
||||
public void testRoundTrip() {
|
||||
final String s = randomTimeValue();
|
||||
assertThat(TimeValue.parseTimeValue(s, null, "test").getStringRep(), equalTo(s));
|
||||
final TimeValue t = new TimeValue(randomIntBetween(1, 128), randomFrom(TimeUnit.values()));
|
||||
assertThat(TimeValue.parseTimeValue(t.getStringRep(), null, "test"), equalTo(t));
|
||||
}
|
||||
|
||||
private static final String FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED = "fractional time values are not supported";
|
||||
|
||||
public void testNonFractionalTimeValues() {
|
||||
final String s = randomAsciiOfLength(10) + randomTimeUnit();
|
||||
final ElasticsearchParseException e =
|
||||
expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test"));
|
||||
assertThat(e, hasToString(containsString("failed to parse [" + s + "]")));
|
||||
assertThat(e, not(hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED))));
|
||||
assertThat(e.getCause(), instanceOf(NumberFormatException.class));
|
||||
}
|
||||
|
||||
public void testFractionalTimeValues() {
|
||||
double value;
|
||||
do {
|
||||
value = randomDouble();
|
||||
} while (value == 0);
|
||||
final String s = Double.toString(randomIntBetween(0, 128) + value) + randomTimeUnit();
|
||||
final ElasticsearchParseException e =
|
||||
expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test"));
|
||||
assertThat(e, hasToString(containsString("failed to parse [" + s + "]")));
|
||||
assertThat(e, hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED)));
|
||||
assertThat(e.getCause(), instanceOf(NumberFormatException.class));
|
||||
}
|
||||
|
||||
private String randomTimeUnit() {
|
||||
return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d");
|
||||
}
|
||||
|
||||
private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException {
|
||||
|
@ -134,13 +165,20 @@ public class TimeValueTests extends ESTestCase {
|
|||
TimeValue inValue = new TimeValue(in);
|
||||
|
||||
assertThat(inValue, equalTo(value));
|
||||
assertThat(inValue.duration(), equalTo(value.duration()));
|
||||
assertThat(inValue.timeUnit(), equalTo(value.timeUnit()));
|
||||
}
|
||||
|
||||
public void testSerialize() throws Exception {
|
||||
assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 8);
|
||||
assertEqualityAfterSerialize(timeValueNanos(-1), 1);
|
||||
assertEqualityAfterSerialize(timeValueNanos(1), 1);
|
||||
assertEqualityAfterSerialize(timeValueSeconds(30), 6);
|
||||
assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3);
|
||||
assertEqualityAfterSerialize(timeValueNanos(-1), 2);
|
||||
assertEqualityAfterSerialize(timeValueNanos(1), 2);
|
||||
assertEqualityAfterSerialize(timeValueSeconds(30), 2);
|
||||
|
||||
final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values()));
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeZLong(timeValue.duration());
|
||||
assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length());
|
||||
}
|
||||
|
||||
public void testFailOnUnknownUnits() {
|
||||
|
@ -148,7 +186,7 @@ public class TimeValueTests extends ESTestCase {
|
|||
TimeValue.parseTimeValue("23tw", null, "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("Failed to parse"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +195,7 @@ public class TimeValueTests extends ESTestCase {
|
|||
TimeValue.parseTimeValue("42", null, "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("Failed to parse"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,7 +204,7 @@ public class TimeValueTests extends ESTestCase {
|
|||
TimeValue.parseTimeValue("42ms.", null, "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("Failed to parse"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -67,6 +68,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase {
|
|||
inOrder.verifyNoMoreInteractions();
|
||||
}
|
||||
|
||||
@SuppressLoggerChecks(reason = "mock usage")
|
||||
public void testDoRunDoesNotRunWhenStoppedOrClosed() throws Exception {
|
||||
Callable<?> runCallable = mock(Callable.class);
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
Settings nodeSettings = Settings.builder()
|
||||
.put("discovery.type", "zen") // <-- To override the local setting if set externally
|
||||
.build();
|
||||
String nodeName = internalCluster().startNode(nodeSettings, Version.CURRENT);
|
||||
String nodeName = internalCluster().startNode(nodeSettings);
|
||||
ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName);
|
||||
DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0),
|
||||
|
|
|
@ -298,15 +298,10 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
|
||||
|
||||
logger.info("--> restarting the nodes");
|
||||
final Gateway gateway1 = internalCluster().getInstance(GatewayService.class, node_1).getGateway();
|
||||
internalCluster().fullRestart(new RestartCallback() {
|
||||
@Override
|
||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||
if (node_1.equals(nodeName)) {
|
||||
logger.info("--> deleting the data for the first node");
|
||||
gateway1.reset();
|
||||
}
|
||||
return null;
|
||||
public boolean clearData(String nodeName) {
|
||||
return node_1.equals(nodeName);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -348,7 +343,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
} else {
|
||||
// test with a shadow replica index
|
||||
final Path dataPath = createTempDir();
|
||||
logger.info("--> created temp data path for shadow replicas [" + dataPath + "]");
|
||||
logger.info("--> created temp data path for shadow replicas [{}]", dataPath);
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put("node.add_id_to_custom_path", false)
|
||||
|
@ -446,13 +441,6 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
assertNotNull(ex.getCause());
|
||||
assertEquals(IllegalArgumentException.class, ex.getCause().getClass());
|
||||
assertEquals(ex.getCause().getMessage(), "Unknown tokenfilter type [icu_collation] for [myCollator]");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings()
|
||||
.setSettings(Settings.builder().putNull("index.analysis.filter.myCollator.type")).get();
|
||||
client().admin().indices().prepareOpen("test").get();
|
||||
ensureYellow();
|
||||
logger.info("--> verify 1 doc in the index");
|
||||
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -510,13 +498,6 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
assertNotNull(ex.getCause());
|
||||
assertEquals(MapperParsingException.class, ex.getCause().getClass());
|
||||
assertEquals(ex.getCause().getMessage(), "analyzer [test] not found for field [field1]");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings()
|
||||
.setSettings(Settings.builder().put("index.analysis.analyzer.test.tokenizer", "keyword")).get();
|
||||
client().admin().indices().prepareOpen("test").get();
|
||||
ensureYellow();
|
||||
logger.info("--> verify 1 doc in the index");
|
||||
assertHitCount(client().prepareSearch().setQuery(matchQuery("field1", "value one")).get(), 1L);
|
||||
}
|
||||
|
||||
public void testArchiveBrokenClusterSettings() throws Exception {
|
||||
|
|
|
@ -176,28 +176,28 @@ public class IndexingSlowLogTests extends ESTestCase {
|
|||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -249,28 +249,28 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
|
|||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,28 +320,28 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
|
|||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
try {
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ public class BinaryMappingTests extends ESSingleNodeTestCase {
|
|||
|
||||
// case 2: a value that looks compressed: this used to fail in 1.x
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
try (StreamOutput compressed = CompressorFactory.defaultCompressor().streamOutput(out)) {
|
||||
try (StreamOutput compressed = CompressorFactory.COMPRESSOR.streamOutput(out)) {
|
||||
new BytesArray(binaryValue1).writeTo(compressed);
|
||||
}
|
||||
final byte[] binaryValue2 = out.bytes().toBytes();
|
||||
|
|
|
@ -139,7 +139,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase {
|
|||
|
||||
String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_ttl")
|
||||
.field("default", "1w")
|
||||
.field("default", "7d")
|
||||
.endObject()
|
||||
.startObject("properties").field("field").startObject().field("type", "text").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
|
|
@ -1086,7 +1086,7 @@ public class StoreTests extends ESTestCase {
|
|||
String uuid = Store.CORRUPTED + UUIDs.randomBase64UUID();
|
||||
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_STACK_TRACE);
|
||||
output.writeString(ExceptionsHelper.detailedMessage(exception, true, 0));
|
||||
output.writeString(ExceptionsHelper.detailedMessage(exception));
|
||||
output.writeString(ExceptionsHelper.stackTrace(exception));
|
||||
CodecUtil.writeFooter(output);
|
||||
}
|
||||
|
@ -1102,7 +1102,7 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_START);
|
||||
output.writeString(ExceptionsHelper.detailedMessage(exception, true, 0));
|
||||
output.writeString(ExceptionsHelper.detailedMessage(exception));
|
||||
CodecUtil.writeFooter(output);
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -263,7 +263,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
assertThat(waitForShardDeletion(node_3, index, 0), equalTo(false));
|
||||
|
||||
Path server2Shard = shardDirectory(node_2, index, 0);
|
||||
logger.info("--> stopping node " + node_2);
|
||||
logger.info("--> stopping node {}", node_2);
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
||||
|
||||
logger.info("--> running cluster_health");
|
||||
|
|
|
@ -140,7 +140,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
|
|||
IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0];
|
||||
for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) {
|
||||
if (metaData == IngestDocument.MetaData.TTL) {
|
||||
ingestDocument.setFieldValue(IngestDocument.MetaData.TTL.getFieldName(), "5w");
|
||||
ingestDocument.setFieldValue(IngestDocument.MetaData.TTL.getFieldName(), "35d");
|
||||
} else {
|
||||
ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasItems;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class ProgressInputStreamTests extends ESTestCase {
|
||||
|
||||
private List<Integer> progresses = new ArrayList<>();
|
||||
|
||||
public void testThatProgressListenerIsCalled() throws Exception {
|
||||
ProgressInputStream is = newProgressInputStream(0);
|
||||
is.checkProgress(-1);
|
||||
|
||||
assertThat(progresses, hasSize(1));
|
||||
assertThat(progresses, hasItems(100));
|
||||
}
|
||||
|
||||
public void testThatProgressListenerIsCalledOnUnexpectedCompletion() throws Exception {
|
||||
ProgressInputStream is = newProgressInputStream(2);
|
||||
is.checkProgress(-1);
|
||||
assertThat(progresses, hasItems(100));
|
||||
}
|
||||
|
||||
public void testThatProgressListenerReturnsMaxValueOnWrongExpectedSize() throws Exception {
|
||||
ProgressInputStream is = newProgressInputStream(2);
|
||||
|
||||
is.checkProgress(1);
|
||||
assertThat(progresses, hasItems(50));
|
||||
|
||||
is.checkProgress(3);
|
||||
assertThat(progresses, hasItems(50, 99));
|
||||
|
||||
is.checkProgress(-1);
|
||||
assertThat(progresses, hasItems(50, 99, 100));
|
||||
}
|
||||
|
||||
public void testOneByte() throws Exception {
|
||||
ProgressInputStream is = newProgressInputStream(1);
|
||||
is.checkProgress(1);
|
||||
is.checkProgress(-1);
|
||||
|
||||
assertThat(progresses, hasItems(99, 100));
|
||||
|
||||
}
|
||||
|
||||
public void testOddBytes() throws Exception {
|
||||
int odd = (randomIntBetween(100, 200) / 2) + 1;
|
||||
ProgressInputStream is = newProgressInputStream(odd);
|
||||
for (int i = 0; i < odd; i++) {
|
||||
is.checkProgress(1);
|
||||
}
|
||||
is.checkProgress(-1);
|
||||
|
||||
assertThat(progresses, hasSize(odd+1));
|
||||
assertThat(progresses, hasItem(100));
|
||||
}
|
||||
|
||||
public void testEvenBytes() throws Exception {
|
||||
int even = (randomIntBetween(100, 200) / 2);
|
||||
ProgressInputStream is = newProgressInputStream(even);
|
||||
|
||||
for (int i = 0; i < even; i++) {
|
||||
is.checkProgress(1);
|
||||
}
|
||||
is.checkProgress(-1);
|
||||
|
||||
assertThat(progresses, hasSize(even+1));
|
||||
assertThat(progresses, hasItem(100));
|
||||
}
|
||||
|
||||
public void testOnProgressCannotBeCalledMoreThanOncePerPercent() throws Exception {
|
||||
int count = randomIntBetween(150, 300);
|
||||
ProgressInputStream is = newProgressInputStream(count);
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
is.checkProgress(1);
|
||||
}
|
||||
is.checkProgress(-1);
|
||||
|
||||
assertThat(progresses, hasSize(100));
|
||||
}
|
||||
|
||||
private ProgressInputStream newProgressInputStream(int expectedSize) {
|
||||
return new ProgressInputStream(null, expectedSize) {
|
||||
@Override
|
||||
public void onProgress(int percent) {
|
||||
progresses.add(percent);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -479,7 +479,7 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
indexRandom(true, docs);
|
||||
numDocs *= 2;
|
||||
|
||||
logger.info(" --> waiting for relocation to complete", numDocs);
|
||||
logger.info(" --> waiting for relocation to complete");
|
||||
ensureGreen("test");// move all shards to the new node (it waits on relocation)
|
||||
final int numIters = randomIntBetween(10, 20);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
|
|
|
@ -47,6 +47,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Callable;
|
||||
|
@ -237,6 +238,46 @@ public class DateHistogramIT extends ESIntegTestCase {
|
|||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
}
|
||||
|
||||
public void testSingleValued_timeZone_epoch() throws Exception {
|
||||
String format = randomBoolean() ? "epoch_millis" : "epoch_second";
|
||||
int millisDivider = format.equals("epoch_millis") ? 1 : 1000;
|
||||
if (randomBoolean()) {
|
||||
format = format + "||date_optional_time";
|
||||
}
|
||||
DateTimeZone tz = DateTimeZone.forID("+01:00");
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(dateHistogram("histo").field("date")
|
||||
.dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1)
|
||||
.timeZone(tz).format(format))
|
||||
.execute()
|
||||
.actionGet();
|
||||
assertSearchResponse(response);
|
||||
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(6));
|
||||
|
||||
List<DateTime> expectedKeys = new ArrayList<>();
|
||||
expectedKeys.add(new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC));
|
||||
expectedKeys.add(new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC));
|
||||
expectedKeys.add(new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC));
|
||||
expectedKeys.add(new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC));
|
||||
expectedKeys.add(new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC));
|
||||
expectedKeys.add(new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC));
|
||||
|
||||
|
||||
Iterator<DateTime> keyIterator = expectedKeys.iterator();
|
||||
for (Histogram.Bucket bucket : buckets) {
|
||||
assertThat(bucket, notNullValue());
|
||||
DateTime expectedKey = keyIterator.next();
|
||||
assertThat(bucket.getKeyAsString(), equalTo(Long.toString(expectedKey.getMillis() / millisDivider)));
|
||||
assertThat(((DateTime) bucket.getKey()), equalTo(expectedKey));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSingleValuedFieldOrderedByKeyAsc() throws Exception {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(dateHistogram("histo")
|
||||
|
|
|
@ -86,7 +86,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.setState;
|
||||
|
@ -208,7 +207,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
builder.minScore(randomFloat() * 1000);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.timeout(new TimeValue(randomIntBetween(1, 100), randomFrom(TimeUnit.values())));
|
||||
builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout"));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.terminateAfter(randomIntBetween(1, 100000));
|
||||
|
@ -456,7 +455,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
SearchSourceBuilder firstBuilder = createSearchSourceBuilder();
|
||||
assertFalse("source builder is equal to null", firstBuilder.equals(null));
|
||||
assertNotNull("source builder is equal to null", firstBuilder);
|
||||
assertFalse("source builder is equal to incompatible type", firstBuilder.equals(""));
|
||||
assertTrue("source builder is not equal to self", firstBuilder.equals(firstBuilder));
|
||||
assertThat("same source builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(),
|
||||
|
@ -601,7 +600,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
final String query = "{ \"query\": { \"match_all\": {}}, \"timeout\": \"" + timeout + "\"}";
|
||||
try (XContentParser parser = XContentFactory.xContent(query).createParser(query)) {
|
||||
final SearchSourceBuilder builder = SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers, suggesters);
|
||||
assertThat(builder.timeoutInMillis(), equalTo(TimeValue.parseTimeValue(timeout, null, "timeout").millis()));
|
||||
assertThat(builder.timeout(), equalTo(TimeValue.parseTimeValue(timeout, null, "timeout")));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -594,9 +594,9 @@ public class DecayFunctionScoreIT extends ESIntegTestCase {
|
|||
searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
|
||||
searchSource().query(
|
||||
functionScoreQuery(QueryBuilders.matchAllQuery(), new FilterFunctionBuilder[]{
|
||||
new FilterFunctionBuilder(linearDecayFunction("num1", null, "1000w")),
|
||||
new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")),
|
||||
new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")),
|
||||
new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "1000w"))
|
||||
new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d"))
|
||||
}).scoreMode(FiltersFunctionScoreQuery.ScoreMode.MULTIPLY))));
|
||||
|
||||
SearchResponse sr = response.actionGet();
|
||||
|
|
|
@ -138,7 +138,7 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
|
|||
private BytesReference write(T obj) throws IOException {
|
||||
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
|
||||
if (compress) {
|
||||
try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) {
|
||||
try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) {
|
||||
write(obj, compressedStreamOutput);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.transport;
|
|||
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
|
@ -45,6 +44,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item;
|
|||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.indices.TermsLookup;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -61,6 +61,7 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
|
||||
|
@ -284,15 +285,16 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase {
|
|||
return internalCluster().transportClient().filterWithHeader(Collections.singletonMap(randomHeaderKey, randomHeaderValue));
|
||||
}
|
||||
|
||||
public static class ActionLoggingPlugin extends Plugin {
|
||||
public static class ActionLoggingPlugin extends Plugin implements ActionPlugin {
|
||||
|
||||
@Override
|
||||
public Collection<Module> nodeModules() {
|
||||
return Collections.<Module>singletonList(new ActionLoggingModule());
|
||||
}
|
||||
|
||||
public void onModule(ActionModule module) {
|
||||
module.registerFilter(LoggingFilter.class);
|
||||
@Override
|
||||
public List<Class<? extends ActionFilter>> getActionFilters() {
|
||||
return singletonList(LoggingFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ public class SimpleVersioningIT extends ESIntegTestCase {
|
|||
fail("did not hit expected exception");
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// expected
|
||||
assertTrue(iae.getMessage().contains("Failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized"));
|
||||
assertTrue(iae.getMessage().contains("failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ String clusterName = healths.getClusterName(); <2>
|
|||
int numberOfDataNodes = healths.getNumberOfDataNodes(); <3>
|
||||
int numberOfNodes = healths.getNumberOfNodes(); <4>
|
||||
|
||||
for (ClusterIndexHealth health : healths) { <5>
|
||||
for (ClusterIndexHealth health : healths.getIndices().values()) { <5>
|
||||
String index = health.getIndex(); <6>
|
||||
int numberOfShards = health.getNumberOfShards(); <7>
|
||||
int numberOfReplicas = health.getNumberOfReplicas(); <8>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue