Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
99398ad311
|
@ -78,7 +78,11 @@ public final class ShardUtils {
|
||||||
if (reader instanceof ElasticsearchDirectoryReader) {
|
if (reader instanceof ElasticsearchDirectoryReader) {
|
||||||
return (ElasticsearchDirectoryReader) reader;
|
return (ElasticsearchDirectoryReader) reader;
|
||||||
} else {
|
} else {
|
||||||
return null; // lucene needs a getDelegate method on FilteredDirectoryReader - not a big deal here
|
// We need to use FilterDirectoryReader#getDelegate and not FilterDirectoryReader#unwrap, because
|
||||||
|
// If there are multiple levels of filtered leaf readers then with the unwrap() method it immediately
|
||||||
|
// returns the most inner leaf reader and thus skipping of over any other filtered leaf reader that
|
||||||
|
// may be instance of ElasticsearchLeafReader. This can cause us to miss the shardId.
|
||||||
|
return getElasticsearchDirectoryReader(((FilterDirectoryReader) reader).getDelegate());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -221,10 +221,10 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void verify(String seed) {
|
public void verify(String seed) {
|
||||||
BlobContainer testBlobContainer = blobStore.blobContainer(basePath);
|
BlobContainer testBlobContainer = blobStore.blobContainer(basePath.add(testBlobPrefix(seed)));
|
||||||
DiscoveryNode localNode = clusterService.localNode();
|
DiscoveryNode localNode = clusterService.localNode();
|
||||||
if (testBlobContainer.blobExists(testBlobPrefix(seed) + "-master")) {
|
if (testBlobContainer.blobExists("master.dat")) {
|
||||||
try (OutputStream outputStream = testBlobContainer.createOutput(testBlobPrefix(seed) + "-" + localNode.getId())) {
|
try (OutputStream outputStream = testBlobContainer.createOutput("data-" + localNode.getId() + ".dat")) {
|
||||||
outputStream.write(Strings.toUTF8Bytes(seed));
|
outputStream.write(Strings.toUTF8Bytes(seed));
|
||||||
} catch (IOException exp) {
|
} catch (IOException exp) {
|
||||||
throw new RepositoryVerificationException(repositoryName, "store location [" + blobStore + "] is not accessible on the node [" + localNode + "]", exp);
|
throw new RepositoryVerificationException(repositoryName, "store location [" + blobStore + "] is not accessible on the node [" + localNode + "]", exp);
|
||||||
|
|
|
@ -47,6 +47,7 @@ import java.nio.file.DirectoryStream;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -107,7 +108,7 @@ public class PluginsService extends AbstractComponent {
|
||||||
List<Bundle> bundles = getPluginBundles(environment);
|
List<Bundle> bundles = getPluginBundles(environment);
|
||||||
tupleBuilder.addAll(loadBundles(bundles));
|
tupleBuilder.addAll(loadBundles(bundles));
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
throw new IllegalStateException("Can't load plugins into classloader", ex);
|
throw new IllegalStateException(ex);
|
||||||
}
|
}
|
||||||
|
|
||||||
plugins = tupleBuilder.build();
|
plugins = tupleBuilder.build();
|
||||||
|
@ -309,34 +310,30 @@ public class PluginsService extends AbstractComponent {
|
||||||
|
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
|
||||||
for (Path plugin : stream) {
|
for (Path plugin : stream) {
|
||||||
try {
|
if (FileSystemUtils.isHidden(plugin)) {
|
||||||
if (FileSystemUtils.isHidden(plugin)) {
|
logger.trace("--- skip hidden plugin file[{}]", plugin.toAbsolutePath());
|
||||||
logger.trace("--- skip hidden plugin file[{}]", plugin.toAbsolutePath());
|
continue;
|
||||||
continue;
|
}
|
||||||
}
|
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
|
||||||
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
|
PluginInfo info = PluginInfo.readFromProperties(plugin);
|
||||||
PluginInfo info = PluginInfo.readFromProperties(plugin);
|
List<URL> urls = new ArrayList<>();
|
||||||
List<URL> urls = new ArrayList<>();
|
if (info.isJvm()) {
|
||||||
if (info.isJvm()) {
|
// a jvm plugin: gather urls for jar files
|
||||||
// a jvm plugin: gather urls for jar files
|
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
for (Path jar : jarStream) {
|
||||||
for (Path jar : jarStream) {
|
urls.add(jar.toUri().toURL());
|
||||||
urls.add(jar.toUri().toURL());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final Bundle bundle;
|
|
||||||
if (info.isJvm() && info.isIsolated() == false) {
|
|
||||||
bundle = bundles.get(0); // purgatory
|
|
||||||
} else {
|
|
||||||
bundle = new Bundle();
|
|
||||||
bundles.add(bundle);
|
|
||||||
}
|
|
||||||
bundle.plugins.add(info);
|
|
||||||
bundle.urls.addAll(urls);
|
|
||||||
} catch (Throwable e) {
|
|
||||||
logger.warn("failed to add plugin [" + plugin + "]", e);
|
|
||||||
}
|
}
|
||||||
|
final Bundle bundle;
|
||||||
|
if (info.isJvm() && info.isIsolated() == false) {
|
||||||
|
bundle = bundles.get(0); // purgatory
|
||||||
|
} else {
|
||||||
|
bundle = new Bundle();
|
||||||
|
bundles.add(bundle);
|
||||||
|
}
|
||||||
|
bundle.plugins.add(info);
|
||||||
|
bundle.urls.addAll(urls);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +357,7 @@ public class PluginsService extends AbstractComponent {
|
||||||
jars.addAll(bundle.urls);
|
jars.addAll(bundle.urls);
|
||||||
JarHell.checkJarHell(jars.toArray(new URL[0]));
|
JarHell.checkJarHell(jars.toArray(new URL[0]));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("failed to load bundle {} due to jar hell", bundle.urls, e);
|
throw new IllegalStateException("failed to load bundle " + bundle.urls + " due to jar hell", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a child to load the plugins in this bundle
|
// create a child to load the plugins in this bundle
|
||||||
|
@ -371,17 +368,13 @@ public class PluginsService extends AbstractComponent {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
for (PluginInfo pluginInfo : bundle.plugins) {
|
for (PluginInfo pluginInfo : bundle.plugins) {
|
||||||
try {
|
final Plugin plugin;
|
||||||
final Plugin plugin;
|
if (pluginInfo.isJvm()) {
|
||||||
if (pluginInfo.isJvm()) {
|
plugin = loadPlugin(pluginInfo.getClassname(), settings);
|
||||||
plugin = loadPlugin(pluginInfo.getClassname(), settings);
|
} else {
|
||||||
} else {
|
plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
|
||||||
plugin = null;
|
|
||||||
}
|
|
||||||
plugins.add(new Tuple<>(pluginInfo, plugin));
|
|
||||||
} catch (Throwable e) {
|
|
||||||
logger.warn("failed to load plugin from [" + bundle.urls + "]", e);
|
|
||||||
}
|
}
|
||||||
|
plugins.add(new Tuple<>(pluginInfo, plugin));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.plugins;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.component.LifecycleComponent;
|
||||||
|
import org.elasticsearch.common.inject.Module;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
|
/** A site-only plugin, just serves resources */
|
||||||
|
final class SitePlugin implements Plugin {
|
||||||
|
final String name;
|
||||||
|
final String description;
|
||||||
|
|
||||||
|
SitePlugin(String name, String description) {
|
||||||
|
this.name = name;
|
||||||
|
this.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String description() {
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Module>> modules() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<? extends Module> modules(Settings settings) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends LifecycleComponent>> services() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Module>> indexModules() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<? extends Module> indexModules(Settings settings) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Closeable>> indexServices() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Module>> shardModules() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<? extends Module> shardModules(Settings settings) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Closeable>> shardServices() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processModule(Module module) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Settings additionalSettings() {
|
||||||
|
return Settings.EMPTY;
|
||||||
|
}
|
||||||
|
}
|
|
@ -624,12 +624,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
try {
|
try {
|
||||||
String seed = Strings.randomBase64UUID();
|
String seed = Strings.randomBase64UUID();
|
||||||
byte[] testBytes = Strings.toUTF8Bytes(seed);
|
byte[] testBytes = Strings.toUTF8Bytes(seed);
|
||||||
String blobName = testBlobPrefix(seed) + "-master";
|
BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
|
||||||
try (OutputStream outputStream = snapshotsBlobContainer.createOutput(blobName + "-temp")) {
|
String blobName = "master.dat";
|
||||||
|
try (OutputStream outputStream = testContainer.createOutput(blobName + "-temp")) {
|
||||||
outputStream.write(testBytes);
|
outputStream.write(testBytes);
|
||||||
}
|
}
|
||||||
// Make sure that move is supported
|
// Make sure that move is supported
|
||||||
snapshotsBlobContainer.move(blobName + "-temp", blobName);
|
testContainer.move(blobName + "-temp", blobName);
|
||||||
return seed;
|
return seed;
|
||||||
} catch (IOException exp) {
|
} catch (IOException exp) {
|
||||||
throw new RepositoryVerificationException(repositoryName, "path " + basePath() + " is not accessible on master node", exp);
|
throw new RepositoryVerificationException(repositoryName, "path " + basePath() + " is not accessible on master node", exp);
|
||||||
|
@ -639,7 +640,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
@Override
|
@Override
|
||||||
public void endVerification(String seed) {
|
public void endVerification(String seed) {
|
||||||
try {
|
try {
|
||||||
snapshotsBlobContainer.deleteBlobsByPrefix(testBlobPrefix(seed));
|
blobStore().delete(basePath().add(testBlobPrefix(seed)));
|
||||||
} catch (IOException exp) {
|
} catch (IOException exp) {
|
||||||
throw new RepositoryVerificationException(repositoryName, "cannot delete test data at " + basePath(), exp);
|
throw new RepositoryVerificationException(repositoryName, "cannot delete test data at " + basePath(), exp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class SitePluginRelativePathConfigTests extends ElasticsearchIntegrationT
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
String cwdToRoot = getRelativePath(PathUtils.get(".").toAbsolutePath());
|
String cwdToRoot = getRelativePath(PathUtils.get(".").toAbsolutePath());
|
||||||
Path pluginDir = PathUtils.get(cwdToRoot, relativizeToRootIfNecessary(getDataPath("/org/elasticsearch/plugins")).toString());
|
Path pluginDir = PathUtils.get(cwdToRoot, relativizeToRootIfNecessary(getDataPath("/org/elasticsearch/test_plugins")).toString());
|
||||||
|
|
||||||
Path tempDir = createTempDir();
|
Path tempDir = createTempDir();
|
||||||
boolean useRelativeInMiddleOfPath = randomBoolean();
|
boolean useRelativeInMiddleOfPath = randomBoolean();
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class SitePluginTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
Path pluginDir = getDataPath("/org/elasticsearch/plugins");
|
Path pluginDir = getDataPath("/org/elasticsearch/test_plugins");
|
||||||
return settingsBuilder()
|
return settingsBuilder()
|
||||||
.put(super.nodeSettings(nodeOrdinal))
|
.put(super.nodeSettings(nodeOrdinal))
|
||||||
.put("path.plugins", pluginDir.toAbsolutePath())
|
.put("path.plugins", pluginDir.toAbsolutePath())
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
site=true
|
||||||
|
description=anotherplugin
|
||||||
|
version=1.0
|
|
@ -0,0 +1,3 @@
|
||||||
|
site=true
|
||||||
|
description=dummy
|
||||||
|
version=1.0
|
|
@ -0,0 +1,3 @@
|
||||||
|
site=true
|
||||||
|
description=subdir
|
||||||
|
version=1.0
|
|
@ -95,18 +95,18 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
run('%s; %s clean package -DskipTests' % (JAVA_ENV, MVN))
|
run('%s; %s clean package -DskipTests' % (JAVA_ENV, MVN))
|
||||||
|
|
||||||
for f in os.listdir('core/target/releases/'):
|
for f in os.listdir('distribution/tar/target/releases/'):
|
||||||
if f.endswith('.tar.gz'):
|
if f.endswith('.tar.gz'):
|
||||||
artifact = f
|
artifact = f
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise RuntimeError('could not find elasticsearch release under core/target/releases/')
|
raise RuntimeError('could not find elasticsearch release under distribution/tar/target/releases/')
|
||||||
|
|
||||||
tmp_dir = tempfile.mkdtemp()
|
tmp_dir = tempfile.mkdtemp()
|
||||||
p = None
|
p = None
|
||||||
try:
|
try:
|
||||||
# Extract artifact:
|
# Extract artifact:
|
||||||
run('tar -xzf core/target/releases/%s -C %s' % (artifact, tmp_dir))
|
run('tar -xzf distribution/tar/target/releases/%s -C %s' % (artifact, tmp_dir))
|
||||||
es_install_dir = os.path.join(tmp_dir, artifact[:-7])
|
es_install_dir = os.path.join(tmp_dir, artifact[:-7])
|
||||||
es_plugin_path = os.path.join(es_install_dir, 'bin/plugin')
|
es_plugin_path = os.path.join(es_install_dir, 'bin/plugin')
|
||||||
installed_plugin_names = set()
|
installed_plugin_names = set()
|
||||||
|
@ -131,6 +131,7 @@ if __name__ == '__main__':
|
||||||
'-Des.node.name=smoke_tester',
|
'-Des.node.name=smoke_tester',
|
||||||
'-Des.cluster.name=smoke_tester_cluster'
|
'-Des.cluster.name=smoke_tester_cluster'
|
||||||
'-Des.discovery.zen.ping.multicast.enabled=false',
|
'-Des.discovery.zen.ping.multicast.enabled=false',
|
||||||
|
'-Des.logger.level=debug',
|
||||||
'-Des.script.inline=on',
|
'-Des.script.inline=on',
|
||||||
'-Des.script.indexed=on'),
|
'-Des.script.indexed=on'),
|
||||||
stdout = subprocess.PIPE,
|
stdout = subprocess.PIPE,
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
<!-- runs an OS script -->
|
<!-- runs an OS script -->
|
||||||
<macrodef name="run-script">
|
<macrodef name="run-script">
|
||||||
<attribute name="script"/>
|
<attribute name="script"/>
|
||||||
<attribute name="dir"/>
|
|
||||||
<attribute name="args"/>
|
<attribute name="args"/>
|
||||||
<attribute name="spawn" default="false"/>
|
<attribute name="spawn" default="false"/>
|
||||||
<element name="nested" optional="true"/>
|
<element name="nested" optional="true"/>
|
||||||
|
@ -31,15 +30,18 @@
|
||||||
<isfalse value="@{spawn}"/>
|
<isfalse value="@{spawn}"/>
|
||||||
</condition>
|
</condition>
|
||||||
|
|
||||||
<exec executable="cmd" osfamily="winnt" dir="@{dir}" failonerror="${failonerror}" spawn="@{spawn}">
|
<!-- create a temp CWD, to enforce that commands don't rely on CWD -->
|
||||||
|
<mkdir dir="${integ.temp}"/>
|
||||||
|
|
||||||
|
<exec executable="cmd" osfamily="winnt" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}">
|
||||||
<arg value="/c"/>
|
<arg value="/c"/>
|
||||||
<arg value="@{dir}/@{script}.bat"/>
|
<arg value="@{script}.bat"/>
|
||||||
<arg line="@{args}"/>
|
<arg line="@{args}"/>
|
||||||
<nested/>
|
<nested/>
|
||||||
</exec>
|
</exec>
|
||||||
|
|
||||||
<exec executable="sh" osfamily="unix" dir="@{dir}" failonerror="${failonerror}" spawn="@{spawn}">
|
<exec executable="sh" osfamily="unix" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}">
|
||||||
<arg value="@{dir}/@{script}"/>
|
<arg value="@{script}"/>
|
||||||
<arg line="@{args}"/>
|
<arg line="@{args}"/>
|
||||||
<nested/>
|
<nested/>
|
||||||
</exec>
|
</exec>
|
||||||
|
@ -86,7 +88,7 @@
|
||||||
|
|
||||||
<!-- install plugin -->
|
<!-- install plugin -->
|
||||||
<echo>Installing plugin @{name}...</echo>
|
<echo>Installing plugin @{name}...</echo>
|
||||||
<run-script dir="@{home}" script="bin/plugin" args="install @{name} -u ${url}"/>
|
<run-script script="@{home}/bin/plugin" args="install @{name} -u ${url}"/>
|
||||||
|
|
||||||
<!-- check that plugin was installed into correct place -->
|
<!-- check that plugin was installed into correct place -->
|
||||||
<local name="longname"/>
|
<local name="longname"/>
|
||||||
|
@ -116,7 +118,7 @@
|
||||||
<attribute name="args" default="${integ.args}"/>
|
<attribute name="args" default="${integ.args}"/>
|
||||||
<sequential>
|
<sequential>
|
||||||
<echo>Starting up external cluster...</echo>
|
<echo>Starting up external cluster...</echo>
|
||||||
<run-script dir="@{home}" script="bin/elasticsearch" spawn="true"
|
<run-script script="@{home}/bin/elasticsearch" spawn="true"
|
||||||
args="@{args} -Des.path.repo=@{home}/repo" />
|
args="@{args} -Des.path.repo=@{home}/repo" />
|
||||||
|
|
||||||
<waitfor maxwait="3" maxwaitunit="minute" checkevery="500">
|
<waitfor maxwait="3" maxwaitunit="minute" checkevery="500">
|
||||||
|
@ -144,7 +146,7 @@
|
||||||
<unzip src="${project.build.directory}/releases/elasticsearch-${project.version}.zip" dest="${integ.scratch}"/>
|
<unzip src="${project.build.directory}/releases/elasticsearch-${project.version}.zip" dest="${integ.scratch}"/>
|
||||||
<local name="home"/>
|
<local name="home"/>
|
||||||
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
|
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
|
||||||
<run-script dir="${home}" script="bin/elasticsearch" spawn="false"
|
<run-script script="${home}/bin/elasticsearch" spawn="false"
|
||||||
args="${integ.args} -Des.path.repo=${home}/repo">
|
args="${integ.args} -Des.path.repo=${home}/repo">
|
||||||
<nested>
|
<nested>
|
||||||
<env key="JAVA_OPTS" value="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000"/>
|
<env key="JAVA_OPTS" value="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000"/>
|
||||||
|
|
|
@ -13,9 +13,6 @@
|
||||||
<name>Elasticsearch RPM Distribution</name>
|
<name>Elasticsearch RPM Distribution</name>
|
||||||
<packaging>rpm</packaging>
|
<packaging>rpm</packaging>
|
||||||
|
|
||||||
<properties>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
|
||||||
<filters>
|
<filters>
|
||||||
|
|
|
@ -89,5 +89,10 @@ set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding=UTF-8
|
||||||
REM Use our provided JNA always versus the system one
|
REM Use our provided JNA always versus the system one
|
||||||
set JAVA_OPTS=%JAVA_OPTS% -Djna.nosys=true
|
set JAVA_OPTS=%JAVA_OPTS% -Djna.nosys=true
|
||||||
|
|
||||||
set ES_CLASSPATH=%ES_CLASSPATH%;%ES_HOME%/lib/${project.build.finalName}.jar;%ES_HOME%/lib/*
|
set CORE_CLASSPATH=%ES_HOME%/lib/${project.build.finalName}.jar;%ES_HOME%/lib/*
|
||||||
|
if "%ES_CLASSPATH%" == "" (
|
||||||
|
set ES_CLASSPATH=%CORE_CLASSPATH%
|
||||||
|
) else (
|
||||||
|
set ES_CLASSPATH=%ES_CLASSPATH%;%CORE_CLASSPATH%
|
||||||
|
)
|
||||||
set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%"
|
set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%"
|
||||||
|
|
|
@ -1,6 +1,12 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*"
|
CORE_CLASSPATH="$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*"
|
||||||
|
|
||||||
|
if [ "x$ES_CLASSPATH" = "x" ]; then
|
||||||
|
ES_CLASSPATH="$CORE_CLASSPATH"
|
||||||
|
else
|
||||||
|
ES_CLASSPATH="$ES_CLASSPATH:$CORE_CLASSPATH"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "x$ES_MIN_MEM" = "x" ]; then
|
if [ "x$ES_MIN_MEM" = "x" ]; then
|
||||||
ES_MIN_MEM=${packaging.elasticsearch.heap.min}
|
ES_MIN_MEM=${packaging.elasticsearch.heap.min}
|
||||||
|
|
|
@ -12,9 +12,6 @@
|
||||||
<artifactId>elasticsearch-tar</artifactId>
|
<artifactId>elasticsearch-tar</artifactId>
|
||||||
<name>Elasticsearch TAR Distribution</name>
|
<name>Elasticsearch TAR Distribution</name>
|
||||||
|
|
||||||
<properties>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
<filters>
|
<filters>
|
||||||
<filter>${project.basedir}/../src/main/packaging/packaging.properties</filter>
|
<filter>${project.basedir}/../src/main/packaging/packaging.properties</filter>
|
||||||
|
|
|
@ -12,10 +12,6 @@
|
||||||
<artifactId>elasticsearch-zip</artifactId>
|
<artifactId>elasticsearch-zip</artifactId>
|
||||||
<name>Elasticsearch ZIP Distribution</name>
|
<name>Elasticsearch ZIP Distribution</name>
|
||||||
|
|
||||||
<properties>
|
|
||||||
<skip.integ.tests>false</skip.integ.tests>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
<filters>
|
<filters>
|
||||||
<filter>${project.basedir}/../src/main/packaging/packaging.properties</filter>
|
<filter>${project.basedir}/../src/main/packaging/packaging.properties</filter>
|
||||||
|
|
|
@ -12,7 +12,6 @@ This section describes the following CRUD APIs:
|
||||||
.Multi-document APIs
|
.Multi-document APIs
|
||||||
* <<java-docs-multi-get>>
|
* <<java-docs-multi-get>>
|
||||||
* <<java-docs-bulk>>
|
* <<java-docs-bulk>>
|
||||||
* <<java-docs-delete-by-query>>
|
|
||||||
|
|
||||||
NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single
|
NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single
|
||||||
index name, or an `alias` which points to a single index.
|
index name, or an `alias` which points to a single index.
|
||||||
|
@ -28,5 +27,3 @@ include::docs/update.asciidoc[]
|
||||||
include::docs/multi-get.asciidoc[]
|
include::docs/multi-get.asciidoc[]
|
||||||
|
|
||||||
include::docs/bulk.asciidoc[]
|
include::docs/bulk.asciidoc[]
|
||||||
|
|
||||||
include::docs/delete-by-query.asciidoc[]
|
|
||||||
|
|
|
@ -1,34 +0,0 @@
|
||||||
[[java-docs-delete-by-query]]
|
|
||||||
=== Delete By Query API
|
|
||||||
|
|
||||||
The delete by query API allows one to delete documents from one or more
|
|
||||||
indices and one or more types based on a <<java-query-dsl,query>>.
|
|
||||||
|
|
||||||
It's available as a plugin so you need to explicitly declare it in your project:
|
|
||||||
|
|
||||||
[source,xml]
|
|
||||||
--------------------------------------------------
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.elasticsearch.plugin</groupId>
|
|
||||||
<artifactId>elasticsearch-delete-by-query</artifactId>
|
|
||||||
<version>${es.version}</version>
|
|
||||||
</dependency>
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
To use it from Java, you can do the following:
|
|
||||||
|
|
||||||
[source,java]
|
|
||||||
--------------------------------------------------
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.*;
|
|
||||||
|
|
||||||
DeleteByQueryResponse response = client
|
|
||||||
.prepareDeleteByQuery("test") <1>
|
|
||||||
.setQuery(termQuery("_type", "type1")) <2>
|
|
||||||
.get();
|
|
||||||
--------------------------------------------------
|
|
||||||
<1> index name
|
|
||||||
<2> query
|
|
||||||
|
|
||||||
For more information on the delete by query operation, check out the
|
|
||||||
{ref}/docs-delete-by-query.html[delete_by_query API]
|
|
||||||
docs.
|
|
|
@ -18,7 +18,7 @@ if (rem < 0) {
|
||||||
bucket_key = value - rem
|
bucket_key = value - rem
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
From the rounding function above it can be seen that the intervals themsevles **must** be integers.
|
From the rounding function above it can be seen that the intervals themselves **must** be integers.
|
||||||
|
|
||||||
The following snippet "buckets" the products based on their `price` by interval of `50`:
|
The following snippet "buckets" the products based on their `price` by interval of `50`:
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,6 @@ This section describes the following CRUD APIs:
|
||||||
.Multi-document APIs
|
.Multi-document APIs
|
||||||
* <<docs-multi-get>>
|
* <<docs-multi-get>>
|
||||||
* <<docs-bulk>>
|
* <<docs-bulk>>
|
||||||
* <<docs-delete-by-query>>
|
|
||||||
|
|
||||||
NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single
|
NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single
|
||||||
index name, or an `alias` which points to a single index.
|
index name, or an `alias` which points to a single index.
|
||||||
|
@ -34,8 +33,6 @@ include::docs/multi-get.asciidoc[]
|
||||||
|
|
||||||
include::docs/bulk.asciidoc[]
|
include::docs/bulk.asciidoc[]
|
||||||
|
|
||||||
include::docs/delete-by-query.asciidoc[]
|
|
||||||
|
|
||||||
include::docs/termvectors.asciidoc[]
|
include::docs/termvectors.asciidoc[]
|
||||||
|
|
||||||
include::docs/multi-termvectors.asciidoc[]
|
include::docs/multi-termvectors.asciidoc[]
|
||||||
|
|
|
@ -1,147 +0,0 @@
|
||||||
[[docs-delete-by-query]]
|
|
||||||
== Delete By Query API
|
|
||||||
|
|
||||||
The delete by query API allows to delete documents from one or more
|
|
||||||
indices and one or more types based on a query. The query can either be
|
|
||||||
provided using a simple query string as a parameter, or using the
|
|
||||||
<<query-dsl,Query DSL>> defined within the request
|
|
||||||
body. Here is an example:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/twitter/tweet/_query?q=user:kimchy'
|
|
||||||
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/twitter/tweet/_query' -d '{
|
|
||||||
"query" : {
|
|
||||||
"term" : { "user" : "kimchy" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
NOTE: The query being sent in the body must be nested in a `query` key, same as
|
|
||||||
the <<search-search,search api>> works
|
|
||||||
|
|
||||||
Both above examples end up doing the same thing, which is delete all
|
|
||||||
tweets from the twitter index for a certain user. The result of the
|
|
||||||
commands is:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"_indices" : {
|
|
||||||
"twitter" : {
|
|
||||||
"_shards" : {
|
|
||||||
"total" : 10,
|
|
||||||
"failed" : 0,
|
|
||||||
"successful" : 10,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
Note, delete by query bypasses versioning support. Also, it is not
|
|
||||||
recommended to delete "large chunks of the data in an index", many
|
|
||||||
times, it's better to simply reindex into a new index.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[multiple-indices]]
|
|
||||||
=== Multiple Indices and Types
|
|
||||||
|
|
||||||
The delete by query API can be applied to multiple types within an
|
|
||||||
index, and across multiple indices. For example, we can delete all
|
|
||||||
documents across all types within the twitter index:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/twitter/_query?q=user:kimchy'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
We can also delete within specific types:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/twitter/tweet,user/_query?q=user:kimchy'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
We can also delete all tweets with a certain tag across several indices
|
|
||||||
(for example, when each user has his own index):
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/kimchy,elasticsearch/_query?q=tag:wow'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
Or even delete across all indices:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XDELETE 'http://localhost:9200/_all/_query?q=tag:wow'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[delete-by-query-parameters]]
|
|
||||||
=== Request Parameters
|
|
||||||
|
|
||||||
When executing a delete by query using the query parameter `q`, the
|
|
||||||
query passed is a query string using Lucene query parser. There are
|
|
||||||
additional parameters that can be passed:
|
|
||||||
|
|
||||||
[cols="<,<",options="header",]
|
|
||||||
|=======================================================================
|
|
||||||
|Name |Description
|
|
||||||
|df |The default field to use when no field prefix is defined within the
|
|
||||||
query.
|
|
||||||
|
|
||||||
|analyzer |The analyzer name to be used when analyzing the query string.
|
|
||||||
|
|
||||||
|default_operator |The default operator to be used, can be `AND` or
|
|
||||||
`OR`. Defaults to `OR`.
|
|
||||||
|=======================================================================
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[request-body]]
|
|
||||||
=== Request Body
|
|
||||||
|
|
||||||
The delete by query can use the <<query-dsl,Query
|
|
||||||
DSL>> within its body in order to express the query that should be
|
|
||||||
executed and delete all documents. The body content can also be passed
|
|
||||||
as a REST parameter named `source`.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[delete-by-query-distributed]]
|
|
||||||
=== Distributed
|
|
||||||
|
|
||||||
The delete by query API is broadcast across all primary shards, and from
|
|
||||||
there, replicated across all shards replicas.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[delete-by-query-routing]]
|
|
||||||
=== Routing
|
|
||||||
|
|
||||||
The routing value (a comma separated list of the routing values) can be
|
|
||||||
specified to control which shards the delete by query request will be
|
|
||||||
executed on.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[delete-by-query-consistency]]
|
|
||||||
=== Write Consistency
|
|
||||||
|
|
||||||
Control if the operation will be allowed to execute based on the number
|
|
||||||
of active shards within that partition (replication group). The values
|
|
||||||
allowed are `one`, `quorum`, and `all`. The parameter to set it is
|
|
||||||
`consistency`, and it defaults to the node level setting of
|
|
||||||
`action.write_consistency` which in turn defaults to `quorum`.
|
|
||||||
|
|
||||||
For example, in a N shards with 2 replicas index, there will have to be
|
|
||||||
at least 2 active shards within the relevant partition (`quorum`) for
|
|
||||||
the operation to succeed. In a N shards with 1 replica scenario, there
|
|
||||||
will need to be a single shard active (in this case, `one` and `quorum`
|
|
||||||
is the same).
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[limitations]]
|
|
||||||
=== Limitations
|
|
||||||
|
|
||||||
The delete by query does not support the following queries and filters: `has_child` and `has_parent`.
|
|
|
@ -70,7 +70,7 @@ setting the routing parameter.
|
||||||
|
|
||||||
Note that deleting a parent document does not automatically delete its
|
Note that deleting a parent document does not automatically delete its
|
||||||
children. One way of deleting all child documents given a parent's id is
|
children. One way of deleting all child documents given a parent's id is
|
||||||
to perform a <<docs-delete-by-query,delete by query>> on the child
|
to use the `delete-by-query` plugin to perform a delete on the child
|
||||||
index with the automatically generated (and indexed)
|
index with the automatically generated (and indexed)
|
||||||
field _parent, which is in the format parent_type#parent_id.
|
field _parent, which is in the format parent_type#parent_id.
|
||||||
|
|
||||||
|
|
|
@ -478,17 +478,7 @@ Deleting a document is fairly straightforward. This example shows how to delete
|
||||||
curl -XDELETE 'localhost:9200/customer/external/2?pretty'
|
curl -XDELETE 'localhost:9200/customer/external/2?pretty'
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
We also have the ability to delete multiple documents that match a query condition. This example shows how to delete all customers whose names contain "John":
|
The `delete-by-query` plugin can delete all documents matching a specific query.
|
||||||
|
|
||||||
[source,sh]
|
|
||||||
--------------------------------------------------
|
|
||||||
curl -XDELETE 'localhost:9200/customer/external/_query?pretty' -d '
|
|
||||||
{
|
|
||||||
"query": { "match": { "name": "John" } }
|
|
||||||
}'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
Note above that the URI has changed to `/_query` to signify a delete-by-query API with the delete query criteria in the body, but we are still using the DELETE verb. Don't worry yet about the query syntax as we will cover that later in this tutorial.
|
|
||||||
|
|
||||||
=== Batch Processing
|
=== Batch Processing
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ behaviour can be reenabled on an index-by-index basis with the setting:
|
||||||
=== Search requests
|
=== Search requests
|
||||||
|
|
||||||
While the `search` API takes a top-level `query` parameter, the
|
While the `search` API takes a top-level `query` parameter, the
|
||||||
<<search-count,`count`>>, <<docs-delete-by-query,`delete-by-query`>> and
|
<<search-count,`count`>>, `delete-by-query` and
|
||||||
<<search-validate,`validate-query`>> requests expected the whole body to be a
|
<<search-validate,`validate-query`>> requests expected the whole body to be a
|
||||||
query. These now _require_ a top-level `query` parameter:
|
query. These now _require_ a top-level `query` parameter:
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ In addition, the following node settings related to routing have been deprecated
|
||||||
=== Async replication
|
=== Async replication
|
||||||
|
|
||||||
The `replication` parameter has been removed from all CRUD operations (index,
|
The `replication` parameter has been removed from all CRUD operations (index,
|
||||||
update, delete, bulk, delete-by-query). These operations are now synchronous
|
update, delete, bulk). These operations are now synchronous
|
||||||
only, and a request will only return once the changes have been replicated to
|
only, and a request will only return once the changes have been replicated to
|
||||||
all active shards in the shard group.
|
all active shards in the shard group.
|
||||||
|
|
||||||
|
|
|
@ -499,7 +499,7 @@ The `wildcard` and `regexp` query natively use a lot of memory and because the p
|
||||||
this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to
|
this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to
|
||||||
achieve the same result (with way less memory being used).
|
achieve the same result (with way less memory being used).
|
||||||
|
|
||||||
The delete-by-query API doesn't work to unregister a query, it only deletes the percolate documents from disk. In order
|
The `delete-by-query` plugin doesn't work to unregister a query, it only deletes the percolate documents from disk. In order
|
||||||
to update the registered queries in memory the index needs be closed and opened.
|
to update the registered queries in memory the index needs be closed and opened.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
|
|
|
@ -71,6 +71,12 @@ public class S3BlobStore extends AbstractComponent implements BlobStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
this.numberOfRetries = maxRetries;
|
this.numberOfRetries = maxRetries;
|
||||||
|
|
||||||
|
// Note: the method client.doesBucketExist() may return 'true' is the bucket exists
|
||||||
|
// but we don't have access to it (ie, 403 Forbidden response code)
|
||||||
|
// Also, if invalid security credentials are used to execute this method, the
|
||||||
|
// client is not able to distinguish between bucket permission errors and
|
||||||
|
// invalid credential errors, and this method could return an incorrect result.
|
||||||
if (!client.doesBucketExist(bucket)) {
|
if (!client.doesBucketExist(bucket)) {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
client.createBucket(bucket, region);
|
client.createBucket(bucket, region);
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-plugin</artifactId>
|
||||||
|
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>elasticsearch-example-site</artifactId>
|
||||||
|
<name>Elasticsearch Example site plugin</name>
|
||||||
|
<description>Demonstrates how to serve resources via elasticsearch.</description>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
<elasticsearch.assembly.descriptor>${project.basedir}/src/main/assemblies/plugin-assembly.xml</elasticsearch.assembly.descriptor>
|
||||||
|
<elasticsearch.plugin.site>true</elasticsearch.plugin.site>
|
||||||
|
<elasticsearch.plugin.classname>NA</elasticsearch.plugin.classname>
|
||||||
|
<elasticsearch.plugin.jvm>false</elasticsearch.plugin.jvm>
|
||||||
|
|
||||||
|
<tests.rest.suite>example</tests.rest.suite>
|
||||||
|
<tests.rest.load_packaged>false</tests.rest.load_packaged>
|
||||||
|
<skip.unit.tests>true</skip.unit.tests>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
<!-- disable jar plugin, we have no jar -->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-jar-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>default-jar</id>
|
||||||
|
<phase>none</phase>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Integration tests for Example site plugin
|
||||||
|
#
|
||||||
|
"Example site loaded":
|
||||||
|
- do:
|
||||||
|
cluster.state: {}
|
||||||
|
|
||||||
|
# Get master node id
|
||||||
|
- set: { master_node: master }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
nodes.info: {}
|
||||||
|
|
||||||
|
- match: { nodes.$master.plugins.0.name: example-site }
|
||||||
|
- match: { nodes.$master.plugins.0.jvm: false }
|
||||||
|
- match: { nodes.$master.plugins.0.site: true }
|
|
@ -0,0 +1,23 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<assembly>
|
||||||
|
<id>plugin</id>
|
||||||
|
<formats>
|
||||||
|
<format>zip</format>
|
||||||
|
</formats>
|
||||||
|
<includeBaseDirectory>false</includeBaseDirectory>
|
||||||
|
<!-- _site/ directory containing contents -->
|
||||||
|
<fileSets>
|
||||||
|
<fileSet>
|
||||||
|
<directory>${project.basedir}/src/site</directory>
|
||||||
|
<outputDirectory></outputDirectory>
|
||||||
|
</fileSet>
|
||||||
|
</fileSets>
|
||||||
|
<!-- plugin descriptor -->
|
||||||
|
<files>
|
||||||
|
<file>
|
||||||
|
<source>${elasticsearch.tools.directory}/plugin-metadata/plugin-descriptor.properties</source>
|
||||||
|
<outputDirectory></outputDirectory>
|
||||||
|
<filtered>true</filtered>
|
||||||
|
</file>
|
||||||
|
</files>
|
||||||
|
</assembly>
|
|
@ -0,0 +1,6 @@
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Page title</title>
|
||||||
|
</head>
|
||||||
|
<body>Page body</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.example;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||||
|
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||||
|
import org.elasticsearch.test.rest.ElasticsearchRestTestCase;
|
||||||
|
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||||
|
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class SiteRestIT extends ElasticsearchRestTestCase {
|
||||||
|
|
||||||
|
public SiteRestIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||||
|
super(testCandidate);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ParametersFactory
|
||||||
|
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||||
|
return ElasticsearchRestTestCase.createParameters(0, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -384,6 +384,7 @@
|
||||||
<module>cloud-gce</module>
|
<module>cloud-gce</module>
|
||||||
<module>cloud-azure</module>
|
<module>cloud-azure</module>
|
||||||
<module>cloud-aws</module>
|
<module>cloud-aws</module>
|
||||||
|
<module>example-site</module>
|
||||||
<module>lang-python</module>
|
<module>lang-python</module>
|
||||||
<module>lang-javascript</module>
|
<module>lang-javascript</module>
|
||||||
<module>delete-by-query</module>
|
<module>delete-by-query</module>
|
||||||
|
|
Loading…
Reference in New Issue