mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-24 22:09:24 +00:00
Merge branch 'master' into ccr
* master: silence InstallPluginCommandTests, see https://github.com/elastic/elasticsearch/issues/30900 Remove left-over comment Fix double semicolon in import statement [TEST] Fix minor random bug from #30794 Include size of snapshot in snapshot metadata #18543, bwc clean up (#30890) Enabling testing against an external cluster (#30885) Add public key header/footer (#30877) SQL: Remove the last remaining server dependencies from jdbc (#30771) Include size of snapshot in snapshot metadata (#29602) Do not serialize basic license exp in x-pack info (#30848) Change BWC version for VerifyRepositoryResponse (#30796) [DOCS] Document index name limitations (#30826) Harmonize include_defaults tests (#30700)
This commit is contained in:
commit
03e3bd28c9
@ -70,31 +70,44 @@ public class RestIntegTestTask extends DefaultTask {
|
||||
runner.parallelism = '1'
|
||||
runner.include('**/*IT.class')
|
||||
runner.systemProperty('tests.rest.load_packaged', 'false')
|
||||
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
|
||||
// this is more realistic than just talking to a single node
|
||||
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
|
||||
runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// dump errors and warnings from cluster log on failure
|
||||
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
for (NodeInfo nodeInfo : nodes) {
|
||||
printLogExcerpt(nodeInfo)
|
||||
if (System.getProperty("tests.rest.cluster") == null) {
|
||||
if (System.getProperty("tests.cluster") != null) {
|
||||
throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null")
|
||||
}
|
||||
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
|
||||
// this is more realistic than just talking to a single node
|
||||
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
|
||||
runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// dump errors and warnings from cluster log on failure
|
||||
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
for (NodeInfo nodeInfo : nodes) {
|
||||
printLogExcerpt(nodeInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
runner.doFirst {
|
||||
project.gradle.addListener(logDumpListener)
|
||||
}
|
||||
runner.doLast {
|
||||
project.gradle.removeListener(logDumpListener)
|
||||
runner.doFirst {
|
||||
project.gradle.addListener(logDumpListener)
|
||||
}
|
||||
runner.doLast {
|
||||
project.gradle.removeListener(logDumpListener)
|
||||
}
|
||||
} else {
|
||||
if (System.getProperty("tests.cluster") == null) {
|
||||
throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null")
|
||||
}
|
||||
// an external cluster was specified and all responsibility for cluster configuration is taken by the user
|
||||
runner.systemProperty('tests.rest.cluster', System.getProperty("tests.rest.cluster"))
|
||||
runner.systemProperty('test.cluster', System.getProperty("tests.cluster"))
|
||||
}
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
@ -109,7 +122,10 @@ public class RestIntegTestTask extends DefaultTask {
|
||||
clusterInit.enabled = false
|
||||
return // no need to add cluster formation tasks if the task won't run!
|
||||
}
|
||||
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
|
||||
// only create the cluster if needed as otherwise an external cluster to use was specified
|
||||
if (System.getProperty("tests.rest.cluster") == null) {
|
||||
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
|
||||
}
|
||||
super.dependsOn(runner.finalizedBy)
|
||||
}
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
@ -71,7 +72,6 @@ import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.Security;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
@ -543,8 +543,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
InputStream fin = pluginZipInputStream(zip);
|
||||
// sin is a URL stream to the signature corresponding to the downloaded plugin zip
|
||||
InputStream sin = urlOpenStream(ascUrl);
|
||||
// pin is a decoded base64 stream over the embedded public key in RFC2045 format
|
||||
InputStream pin = Base64.getMimeDecoder().wrap(getPublicKey())) {
|
||||
// pin is a input stream to the public key in ASCII-Armor format (RFC4880); the Armor data is in RFC2045 format
|
||||
InputStream pin = getPublicKey()) {
|
||||
final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin));
|
||||
final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0);
|
||||
|
||||
@ -555,7 +555,19 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
}
|
||||
|
||||
// compute the signature of the downloaded plugin zip
|
||||
final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(pin, new JcaKeyFingerprintCalculator());
|
||||
final List<String> lines =
|
||||
new BufferedReader(new InputStreamReader(pin, StandardCharsets.UTF_8)).lines().collect(Collectors.toList());
|
||||
// skip armor headers and possible blank line
|
||||
int index = 1;
|
||||
for (; index < lines.size(); index++) {
|
||||
if (lines.get(index).matches(".*: .*") == false && lines.get(index).matches("\\s*") == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
final byte[] armoredData =
|
||||
lines.subList(index, lines.size() - 1).stream().collect(Collectors.joining("\n")).getBytes(StandardCharsets.UTF_8);
|
||||
final InputStream ain = Base64.getMimeDecoder().wrap(new ByteArrayInputStream(armoredData));
|
||||
final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator());
|
||||
final PGPPublicKey key = collection.getPublicKey(signature.getKeyID());
|
||||
signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleProvider()), key);
|
||||
final byte[] buffer = new byte[1024];
|
||||
@ -597,7 +609,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
* @return an input stream to the public key
|
||||
*/
|
||||
InputStream getPublicKey() {
|
||||
return InstallPluginCommand.class.getResourceAsStream("/public_key");
|
||||
return InstallPluginCommand.class.getResourceAsStream("/public_key.asc");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,3 +1,7 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: SKS 1.1.6
|
||||
Comment: Hostname: pgp.mit.edu
|
||||
|
||||
mQENBFI3HsoBCADXDtbNJnxbPqB1vDNtCsqhe49vFYsZN9IOZsZXgp7aHjh6CJBDA+bGFOwy
|
||||
hbd7at35jQjWAw1O3cfYsKAmFy+Ar3LHCMkV3oZspJACTIgCrwnkic/9CUliQe324qvObU2Q
|
||||
RtP4Fl0zWcfb/S8UYzWXWIFuJqMvE9MaRY1bwUBvzoqavLGZj3SF1SPO+TB5QrHkrQHBsmX+
|
||||
@ -22,3 +26,4 @@ EyUJ8SKsaHh4jV9wp9KmC8C+9CwMukL7vM5w8cgvJoAwsp3Fn59AxWthN3XJYcnMfStkIuWg
|
||||
R7U2r+a210W6vnUxU4oN0PmMcursYPyeV0NX/KQeUeNMwGTFB6QHS/anRaGQewijkrYYoTNt
|
||||
fllxIu9XYmiBERQ/qPDlGRlOgVTd9xUfHFkzB52c70E=
|
||||
=92oX
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@ -23,6 +23,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import com.google.common.jimfs.Configuration;
|
||||
import com.google.common.jimfs.Jimfs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.bouncycastle.bcpg.ArmoredOutputStream;
|
||||
import org.bouncycastle.bcpg.BCPGOutputStream;
|
||||
import org.bouncycastle.bcpg.HashAlgorithmTags;
|
||||
@ -115,6 +116,7 @@ import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("*")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30900")
|
||||
public class InstallPluginCommandTests extends ESTestCase {
|
||||
|
||||
private InstallPluginCommand skipJarHellCommand;
|
||||
@ -893,12 +895,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
||||
final ArmoredOutputStream armored = new ArmoredOutputStream(output);
|
||||
secretKey.getPublicKey().encode(armored);
|
||||
armored.close();
|
||||
final String publicKey = new String(output.toByteArray(), "UTF-8");
|
||||
int start = publicKey.indexOf("\n", 1 + publicKey.indexOf("\n"));
|
||||
int end = publicKey.lastIndexOf("\n", publicKey.lastIndexOf("\n") - 1);
|
||||
// strip the header (first two lines) and footer (last line)
|
||||
final String substring = publicKey.substring(1 + start, end);
|
||||
return new ByteArrayInputStream(substring.getBytes("UTF-8"));
|
||||
return new ByteArrayInputStream(output.toByteArray());
|
||||
} catch (final IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
|
@ -1,16 +1,39 @@
|
||||
[[indices-create-index]]
|
||||
== Create Index
|
||||
|
||||
The create index API allows to instantiate an index. Elasticsearch
|
||||
provides support for multiple indices, including executing operations
|
||||
across several indices.
|
||||
The Create Index API is used to manually create an index in Elasticsearch. All documents in Elasticsearch
|
||||
are stored inside of one index or another.
|
||||
|
||||
The most basic command is the following:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
This create an index named `twitter` with all default setting.
|
||||
|
||||
[NOTE]
|
||||
.Index name limitations
|
||||
======================================================
|
||||
There are several limitations to what you can name your index. The complete list of limitations are:
|
||||
|
||||
- Lowercase only
|
||||
- Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, `#`
|
||||
- Indices prior to 7.0 could contain a colon (`:`), but that's been deprecated and won't be supported in 7.0+
|
||||
- Cannot start with `-`, `_`, `+`
|
||||
- Cannot be `.` or ``..`
|
||||
- Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster)
|
||||
|
||||
======================================================
|
||||
|
||||
[float]
|
||||
[[create-index-settings]]
|
||||
=== Index Settings
|
||||
|
||||
Each index created can have specific settings
|
||||
associated with it.
|
||||
associated with it, defined in the body:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
@ -28,25 +51,6 @@ PUT twitter
|
||||
<1> Default for `number_of_shards` is 1
|
||||
<2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard)
|
||||
|
||||
The above second curl example shows how an index called `twitter` can be
|
||||
created with specific settings for it using http://www.yaml.org[YAML].
|
||||
In this case, creating an index with 3 shards, each with 2 replicas. The
|
||||
index settings can also be defined with http://www.json.org[JSON]:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : 3,
|
||||
"number_of_replicas" : 2
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
or more simplified
|
||||
|
||||
[source,js]
|
||||
|
@ -35,7 +35,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
||||
* <<breaking_70_java_changes>>
|
||||
* <<breaking_70_settings_changes>>
|
||||
* <<breaking_70_scripting_changes>>
|
||||
|
||||
* <<breaking_70_snapshotstats_changes>>
|
||||
|
||||
include::migrate_7_0/aggregations.asciidoc[]
|
||||
include::migrate_7_0/analysis.asciidoc[]
|
||||
@ -49,3 +49,4 @@ include::migrate_7_0/api.asciidoc[]
|
||||
include::migrate_7_0/java.asciidoc[]
|
||||
include::migrate_7_0/settings.asciidoc[]
|
||||
include::migrate_7_0/scripting.asciidoc[]
|
||||
include::migrate_7_0/snapshotstats.asciidoc[]
|
13
docs/reference/migration/migrate_7_0/snapshotstats.asciidoc
Normal file
13
docs/reference/migration/migrate_7_0/snapshotstats.asciidoc
Normal file
@ -0,0 +1,13 @@
|
||||
[[breaking_70_snapshotstats_changes]]
|
||||
=== Snapshot stats changes
|
||||
|
||||
Snapshot stats details are provided in a new structured way:
|
||||
|
||||
* `total` section for all the files that are referenced by the snapshot.
|
||||
* `incremental` section for those files that actually needed to be copied over as part of the incremental snapshotting.
|
||||
* In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied.
|
||||
|
||||
==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed
|
||||
|
||||
* Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`.
|
||||
* Properties `processed_files` and `processed_size_in_bytes` are removed and should be replaced by values of nested object `processed`.
|
@ -563,6 +563,54 @@ GET /_snapshot/my_backup/snapshot_1/_status
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The output looks similar to the following:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"snapshots": [
|
||||
{
|
||||
"snapshot": "snapshot_1",
|
||||
"repository": "my_backup",
|
||||
"uuid": "XuBo4l4ISYiVg0nYUen9zg",
|
||||
"state": "SUCCESS",
|
||||
"include_global_state": true,
|
||||
"shards_stats": {
|
||||
"initializing": 0,
|
||||
"started": 0,
|
||||
"finalizing": 0,
|
||||
"done": 5,
|
||||
"failed": 0,
|
||||
"total": 5
|
||||
},
|
||||
"stats": {
|
||||
"incremental": {
|
||||
"file_count": 8,
|
||||
"size_in_bytes": 4704
|
||||
},
|
||||
"processed": {
|
||||
"file_count": 7,
|
||||
"size_in_bytes": 4254
|
||||
},
|
||||
"total": {
|
||||
"file_count": 8,
|
||||
"size_in_bytes": 4704
|
||||
},
|
||||
"start_time_in_millis": 1526280280355,
|
||||
"time_in_millis": 358
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
The output is composed of different sections. The `stats` sub-object provides details on the number and size of files that were
|
||||
snapshotted. As snapshots are incremental, copying only the Lucene segments that are not already in the repository,
|
||||
the `stats` object contains a `total` section for all the files that are referenced by the snapshot, as well as an `incremental` section
|
||||
for those files that actually needed to be copied over as part of the incremental snapshotting. In case of a snapshot that's still
|
||||
in progress, there's also a `processed` section that contains information about the files that are in the process of being copied.
|
||||
|
||||
Multiple ids are also supported:
|
||||
|
||||
[source,sh]
|
||||
|
@ -25,9 +25,3 @@ dependencies {
|
||||
testCompile project(path: ':modules:rank-eval', configuration: 'runtime')
|
||||
testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
|
||||
}
|
||||
|
||||
/*
|
||||
* One of the integration tests doesn't work with the zip distribution
|
||||
* and will be fixed later.
|
||||
* Tracked by https://github.com/elastic/elasticsearch/issues/30628
|
||||
*/
|
||||
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
index: test-index
|
||||
---
|
||||
Test retrieval of default settings:
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: include_defaults will not work in mixed-mode clusters containing nodes pre-6.4
|
||||
- do:
|
||||
indices.get_settings:
|
||||
flat_settings: true
|
||||
index: test-index
|
||||
- is_false:
|
||||
test-index.settings.index\.refresh_interval
|
||||
- do:
|
||||
indices.get_settings:
|
||||
include_defaults: true
|
||||
flat_settings: true
|
||||
index: test-index
|
||||
- match:
|
||||
test-index.defaults.index\.refresh_interval: "1s"
|
@ -23,15 +23,3 @@ Test reset index settings:
|
||||
indices.get_settings:
|
||||
flat_settings: false
|
||||
- is_false: test-index.settings.index\.refresh_interval
|
||||
|
||||
# Disabled until https://github.com/elastic/elasticsearch/pull/29229 is back-ported
|
||||
# That PR changed the execution path of index settings default to be on the master
|
||||
# until the PR is back-ported the old master will not return default settings.
|
||||
#
|
||||
# - do:
|
||||
# indices.get_settings:
|
||||
# include_defaults: true
|
||||
# flat_settings: true
|
||||
# index: test-index
|
||||
# - match:
|
||||
# test-index.defaults.index\.refresh_interval: "1s"
|
||||
|
@ -11,7 +11,6 @@ setup:
|
||||
|
||||
---
|
||||
"Get snapshot status":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
@ -32,6 +31,14 @@ setup:
|
||||
snapshot: test_snapshot
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: test_snapshot }
|
||||
- match: { snapshots.0.state: SUCCESS }
|
||||
- gt: { snapshots.0.stats.incremental.file_count: 0 }
|
||||
- gt: { snapshots.0.stats.incremental.size_in_bytes: 0 }
|
||||
- gt: { snapshots.0.stats.total.file_count: 0 }
|
||||
- gt: { snapshots.0.stats.total.size_in_bytes: 0 }
|
||||
- is_true: snapshots.0.stats.start_time_in_millis
|
||||
- is_true: snapshots.0.stats.time_in_millis
|
||||
|
||||
---
|
||||
"Get missing snapshot status throws an exception":
|
||||
|
@ -140,7 +140,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
this.nodes = in.readList(NodeView::new).stream().map(n -> n.convertToDiscoveryNode()).collect(Collectors.toList());
|
||||
} else {
|
||||
clusterName = new ClusterName(in);
|
||||
@ -151,7 +151,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeList(getNodes());
|
||||
} else {
|
||||
clusterName.writeTo(out);
|
||||
|
@ -74,8 +74,8 @@ public class SnapshotIndexShardStatus extends BroadcastShardResponse implements
|
||||
throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage());
|
||||
}
|
||||
this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(),
|
||||
indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(),
|
||||
indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
|
||||
indexShardStatus.getIncrementalFileCount(), indexShardStatus.getTotalFileCount(), indexShardStatus.getProcessedFileCount(),
|
||||
indexShardStatus.getIncrementalSize(), indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
|
||||
this.failure = indexShardStatus.getFailure();
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
@ -34,19 +35,25 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
|
||||
|
||||
private long startTime;
|
||||
private long time;
|
||||
private int numberOfFiles;
|
||||
private int processedFiles;
|
||||
private int incrementalFileCount;
|
||||
private int totalFileCount;
|
||||
private int processedFileCount;
|
||||
private long incrementalSize;
|
||||
private long totalSize;
|
||||
private long processedSize;
|
||||
|
||||
SnapshotStats() {
|
||||
}
|
||||
|
||||
SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) {
|
||||
SnapshotStats(long startTime, long time,
|
||||
int incrementalFileCount, int totalFileCount, int processedFileCount,
|
||||
long incrementalSize, long totalSize, long processedSize) {
|
||||
this.startTime = startTime;
|
||||
this.time = time;
|
||||
this.numberOfFiles = numberOfFiles;
|
||||
this.processedFiles = processedFiles;
|
||||
this.incrementalFileCount = incrementalFileCount;
|
||||
this.totalFileCount = totalFileCount;
|
||||
this.processedFileCount = processedFileCount;
|
||||
this.incrementalSize = incrementalSize;
|
||||
this.totalSize = totalSize;
|
||||
this.processedSize = processedSize;
|
||||
}
|
||||
@ -66,17 +73,31 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns number of files in the snapshot
|
||||
* Returns incremental file count of the snapshot
|
||||
*/
|
||||
public int getNumberOfFiles() {
|
||||
return numberOfFiles;
|
||||
public int getIncrementalFileCount() {
|
||||
return incrementalFileCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns total number of files in the snapshot
|
||||
*/
|
||||
public int getTotalFileCount() {
|
||||
return totalFileCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns number of files in the snapshot that were processed so far
|
||||
*/
|
||||
public int getProcessedFiles() {
|
||||
return processedFiles;
|
||||
public int getProcessedFileCount() {
|
||||
return processedFileCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return incremental files size of the snapshot
|
||||
*/
|
||||
public long getIncrementalSize() {
|
||||
return incrementalSize;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -105,11 +126,16 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
|
||||
out.writeVLong(startTime);
|
||||
out.writeVLong(time);
|
||||
|
||||
out.writeVInt(numberOfFiles);
|
||||
out.writeVInt(processedFiles);
|
||||
out.writeVInt(incrementalFileCount);
|
||||
out.writeVInt(processedFileCount);
|
||||
|
||||
out.writeVLong(totalSize);
|
||||
out.writeVLong(incrementalSize);
|
||||
out.writeVLong(processedSize);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeVInt(totalFileCount);
|
||||
out.writeVLong(totalSize);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -117,21 +143,32 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
|
||||
startTime = in.readVLong();
|
||||
time = in.readVLong();
|
||||
|
||||
numberOfFiles = in.readVInt();
|
||||
processedFiles = in.readVInt();
|
||||
incrementalFileCount = in.readVInt();
|
||||
processedFileCount = in.readVInt();
|
||||
|
||||
totalSize = in.readVLong();
|
||||
incrementalSize = in.readVLong();
|
||||
processedSize = in.readVLong();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
totalFileCount = in.readVInt();
|
||||
totalSize = in.readVLong();
|
||||
} else {
|
||||
totalFileCount = incrementalFileCount;
|
||||
totalSize = incrementalSize;
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String STATS = "stats";
|
||||
static final String NUMBER_OF_FILES = "number_of_files";
|
||||
static final String PROCESSED_FILES = "processed_files";
|
||||
static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes";
|
||||
static final String TOTAL_SIZE = "total_size";
|
||||
static final String PROCESSED_SIZE_IN_BYTES = "processed_size_in_bytes";
|
||||
static final String PROCESSED_SIZE = "processed_size";
|
||||
|
||||
static final String INCREMENTAL = "incremental";
|
||||
static final String PROCESSED = "processed";
|
||||
static final String TOTAL = "total";
|
||||
|
||||
static final String FILE_COUNT = "file_count";
|
||||
static final String SIZE = "size";
|
||||
static final String SIZE_IN_BYTES = "size_in_bytes";
|
||||
|
||||
static final String START_TIME_IN_MILLIS = "start_time_in_millis";
|
||||
static final String TIME_IN_MILLIS = "time_in_millis";
|
||||
static final String TIME = "time";
|
||||
@ -139,25 +176,44 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(Fields.STATS);
|
||||
builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles());
|
||||
builder.field(Fields.PROCESSED_FILES, getProcessedFiles());
|
||||
builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize()));
|
||||
builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()));
|
||||
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime());
|
||||
builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
|
||||
builder.endObject();
|
||||
return builder;
|
||||
builder.startObject(Fields.STATS)
|
||||
// incremental starts
|
||||
.startObject(Fields.INCREMENTAL)
|
||||
.field(Fields.FILE_COUNT, getIncrementalFileCount())
|
||||
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getIncrementalSize()))
|
||||
// incremental ends
|
||||
.endObject();
|
||||
|
||||
if (getProcessedFileCount() != getIncrementalFileCount()) {
|
||||
// processed starts
|
||||
builder.startObject(Fields.PROCESSED)
|
||||
.field(Fields.FILE_COUNT, getProcessedFileCount())
|
||||
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getProcessedSize()))
|
||||
// processed ends
|
||||
.endObject();
|
||||
}
|
||||
// total starts
|
||||
builder.startObject(Fields.TOTAL)
|
||||
.field(Fields.FILE_COUNT, getTotalFileCount())
|
||||
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalSize()))
|
||||
// total ends
|
||||
.endObject();
|
||||
// timings stats
|
||||
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime())
|
||||
.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
|
||||
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
void add(SnapshotStats stats) {
|
||||
numberOfFiles += stats.numberOfFiles;
|
||||
processedFiles += stats.processedFiles;
|
||||
incrementalFileCount += stats.incrementalFileCount;
|
||||
totalFileCount += stats.totalFileCount;
|
||||
processedFileCount += stats.processedFileCount;
|
||||
|
||||
incrementalSize += stats.incrementalSize;
|
||||
totalSize += stats.totalSize;
|
||||
processedSize += stats.processedSize;
|
||||
|
||||
|
||||
if (startTime == 0) {
|
||||
// First time here
|
||||
startTime = stats.startTime;
|
||||
|
@ -60,31 +60,39 @@ public class IndexShardSnapshotStatus {
|
||||
private final AtomicReference<Stage> stage;
|
||||
private long startTime;
|
||||
private long totalTime;
|
||||
private int numberOfFiles;
|
||||
private int processedFiles;
|
||||
private int incrementalFileCount;
|
||||
private int totalFileCount;
|
||||
private int processedFileCount;
|
||||
private long totalSize;
|
||||
private long incrementalSize;
|
||||
private long processedSize;
|
||||
private long indexVersion;
|
||||
private String failure;
|
||||
|
||||
private IndexShardSnapshotStatus(final Stage stage, final long startTime, final long totalTime,
|
||||
final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize,
|
||||
final int incrementalFileCount, final int totalFileCount, final int processedFileCount,
|
||||
final long incrementalSize, final long totalSize, final long processedSize,
|
||||
final long indexVersion, final String failure) {
|
||||
this.stage = new AtomicReference<>(Objects.requireNonNull(stage));
|
||||
this.startTime = startTime;
|
||||
this.totalTime = totalTime;
|
||||
this.numberOfFiles = numberOfFiles;
|
||||
this.processedFiles = processedFiles;
|
||||
this.incrementalFileCount = incrementalFileCount;
|
||||
this.totalFileCount = totalFileCount;
|
||||
this.processedFileCount = processedFileCount;
|
||||
this.totalSize = totalSize;
|
||||
this.processedSize = processedSize;
|
||||
this.incrementalSize = incrementalSize;
|
||||
this.indexVersion = indexVersion;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public synchronized Copy moveToStarted(final long startTime, final int numberOfFiles, final long totalSize) {
|
||||
public synchronized Copy moveToStarted(final long startTime, final int incrementalFileCount, final int totalFileCount,
|
||||
final long incrementalSize, final long totalSize) {
|
||||
if (stage.compareAndSet(Stage.INIT, Stage.STARTED)) {
|
||||
this.startTime = startTime;
|
||||
this.numberOfFiles = numberOfFiles;
|
||||
this.incrementalFileCount = incrementalFileCount;
|
||||
this.totalFileCount = totalFileCount;
|
||||
this.incrementalSize = incrementalSize;
|
||||
this.totalSize = totalSize;
|
||||
} else {
|
||||
throw new IllegalStateException("Unable to move the shard snapshot status to [STARTED]: " +
|
||||
@ -135,7 +143,7 @@ public class IndexShardSnapshotStatus {
|
||||
* Increments number of processed files
|
||||
*/
|
||||
public synchronized void addProcessedFile(long size) {
|
||||
processedFiles++;
|
||||
processedFileCount++;
|
||||
processedSize += size;
|
||||
}
|
||||
|
||||
@ -146,12 +154,14 @@ public class IndexShardSnapshotStatus {
|
||||
* @return a {@link IndexShardSnapshotStatus.Copy}
|
||||
*/
|
||||
public synchronized IndexShardSnapshotStatus.Copy asCopy() {
|
||||
return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime, numberOfFiles, processedFiles, totalSize, processedSize,
|
||||
indexVersion, failure);
|
||||
return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime,
|
||||
incrementalFileCount, totalFileCount, processedFileCount,
|
||||
incrementalSize, totalSize, processedSize,
|
||||
indexVersion, failure);
|
||||
}
|
||||
|
||||
public static IndexShardSnapshotStatus newInitializing() {
|
||||
return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, null);
|
||||
return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, 0, null);
|
||||
}
|
||||
|
||||
public static IndexShardSnapshotStatus newFailed(final String failure) {
|
||||
@ -159,12 +169,15 @@ public class IndexShardSnapshotStatus {
|
||||
if (failure == null) {
|
||||
throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus");
|
||||
}
|
||||
return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, failure);
|
||||
return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, 0, failure);
|
||||
}
|
||||
|
||||
public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime, final int files, final long size) {
|
||||
public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime,
|
||||
final int incrementalFileCount, final int fileCount,
|
||||
final long incrementalSize, final long size) {
|
||||
// The snapshot is done which means the number of processed files is the same as total
|
||||
return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, files, files, size, size, 0, null);
|
||||
return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, incrementalFileCount, fileCount, incrementalFileCount,
|
||||
incrementalSize, size, incrementalSize, 0, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,23 +188,28 @@ public class IndexShardSnapshotStatus {
|
||||
private final Stage stage;
|
||||
private final long startTime;
|
||||
private final long totalTime;
|
||||
private final int numberOfFiles;
|
||||
private final int processedFiles;
|
||||
private final int incrementalFileCount;
|
||||
private final int totalFileCount;
|
||||
private final int processedFileCount;
|
||||
private final long totalSize;
|
||||
private final long processedSize;
|
||||
private final long incrementalSize;
|
||||
private final long indexVersion;
|
||||
private final String failure;
|
||||
|
||||
public Copy(final Stage stage, final long startTime, final long totalTime,
|
||||
final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize,
|
||||
final int incrementalFileCount, final int totalFileCount, final int processedFileCount,
|
||||
final long incrementalSize, final long totalSize, final long processedSize,
|
||||
final long indexVersion, final String failure) {
|
||||
this.stage = stage;
|
||||
this.startTime = startTime;
|
||||
this.totalTime = totalTime;
|
||||
this.numberOfFiles = numberOfFiles;
|
||||
this.processedFiles = processedFiles;
|
||||
this.incrementalFileCount = incrementalFileCount;
|
||||
this.totalFileCount = totalFileCount;
|
||||
this.processedFileCount = processedFileCount;
|
||||
this.totalSize = totalSize;
|
||||
this.processedSize = processedSize;
|
||||
this.incrementalSize = incrementalSize;
|
||||
this.indexVersion = indexVersion;
|
||||
this.failure = failure;
|
||||
}
|
||||
@ -208,12 +226,20 @@ public class IndexShardSnapshotStatus {
|
||||
return totalTime;
|
||||
}
|
||||
|
||||
public int getNumberOfFiles() {
|
||||
return numberOfFiles;
|
||||
public int getIncrementalFileCount() {
|
||||
return incrementalFileCount;
|
||||
}
|
||||
|
||||
public int getProcessedFiles() {
|
||||
return processedFiles;
|
||||
public int getTotalFileCount() {
|
||||
return totalFileCount;
|
||||
}
|
||||
|
||||
public int getProcessedFileCount() {
|
||||
return processedFileCount;
|
||||
}
|
||||
|
||||
public long getIncrementalSize() {
|
||||
return incrementalSize;
|
||||
}
|
||||
|
||||
public long getTotalSize() {
|
||||
@ -238,8 +264,10 @@ public class IndexShardSnapshotStatus {
|
||||
"stage=" + stage +
|
||||
", startTime=" + startTime +
|
||||
", totalTime=" + totalTime +
|
||||
", numberOfFiles=" + numberOfFiles +
|
||||
", processedFiles=" + processedFiles +
|
||||
", incrementalFileCount=" + incrementalFileCount +
|
||||
", totalFileCount=" + totalFileCount +
|
||||
", processedFileCount=" + processedFileCount +
|
||||
", incrementalSize=" + incrementalSize +
|
||||
", totalSize=" + totalSize +
|
||||
", processedSize=" + processedSize +
|
||||
", indexVersion=" + indexVersion +
|
||||
|
@ -356,25 +356,28 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
|
||||
private final long time;
|
||||
|
||||
private final int numberOfFiles;
|
||||
private final int incrementalFileCount;
|
||||
|
||||
private final long totalSize;
|
||||
private final long incrementalSize;
|
||||
|
||||
private final List<FileInfo> indexFiles;
|
||||
|
||||
/**
|
||||
* Constructs new shard snapshot metadata from snapshot metadata
|
||||
*
|
||||
* @param snapshot snapshot id
|
||||
* @param indexVersion index version
|
||||
* @param indexFiles list of files in the shard
|
||||
* @param startTime snapshot start time
|
||||
* @param time snapshot running time
|
||||
* @param numberOfFiles number of files that where snapshotted
|
||||
* @param totalSize total size of all files snapshotted
|
||||
* @param snapshot snapshot id
|
||||
* @param indexVersion index version
|
||||
* @param indexFiles list of files in the shard
|
||||
* @param startTime snapshot start time
|
||||
* @param time snapshot running time
|
||||
* @param incrementalFileCount incremental of files that were snapshotted
|
||||
* @param incrementalSize incremental size of snapshot
|
||||
*/
|
||||
public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List<FileInfo> indexFiles, long startTime, long time,
|
||||
int numberOfFiles, long totalSize) {
|
||||
public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List<FileInfo> indexFiles,
|
||||
long startTime, long time,
|
||||
int incrementalFileCount,
|
||||
long incrementalSize
|
||||
) {
|
||||
assert snapshot != null;
|
||||
assert indexVersion >= 0;
|
||||
this.snapshot = snapshot;
|
||||
@ -382,8 +385,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles));
|
||||
this.startTime = startTime;
|
||||
this.time = time;
|
||||
this.numberOfFiles = numberOfFiles;
|
||||
this.totalSize = totalSize;
|
||||
this.incrementalFileCount = incrementalFileCount;
|
||||
this.incrementalSize = incrementalSize;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -395,8 +398,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
this.indexFiles = Collections.emptyList();
|
||||
this.startTime = 0;
|
||||
this.time = 0;
|
||||
this.numberOfFiles = 0;
|
||||
this.totalSize = 0;
|
||||
this.incrementalFileCount = 0;
|
||||
this.incrementalSize = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -441,34 +444,51 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns number of files that where snapshotted
|
||||
* Returns incremental of files that were snapshotted
|
||||
*/
|
||||
public int numberOfFiles() {
|
||||
return numberOfFiles;
|
||||
public int incrementalFileCount() {
|
||||
return incrementalFileCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns total number of files that are referenced by this snapshot
|
||||
*/
|
||||
public int totalFileCount() {
|
||||
return indexFiles.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns incremental of files size that were snapshotted
|
||||
*/
|
||||
public long incrementalSize() {
|
||||
return incrementalSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns total size of all files that where snapshotted
|
||||
*/
|
||||
public long totalSize() {
|
||||
return totalSize;
|
||||
return indexFiles.stream().mapToLong(fi -> fi.metadata().length()).sum();
|
||||
}
|
||||
|
||||
private static final String NAME = "name";
|
||||
private static final String INDEX_VERSION = "index_version";
|
||||
private static final String START_TIME = "start_time";
|
||||
private static final String TIME = "time";
|
||||
private static final String NUMBER_OF_FILES = "number_of_files";
|
||||
private static final String TOTAL_SIZE = "total_size";
|
||||
private static final String FILES = "files";
|
||||
// for the sake of BWC keep the actual property names as in 6.x
|
||||
// + there is a constraint in #fromXContent() that leads to ElasticsearchParseException("unknown parameter [incremental_file_count]");
|
||||
private static final String INCREMENTAL_FILE_COUNT = "number_of_files";
|
||||
private static final String INCREMENTAL_SIZE = "total_size";
|
||||
|
||||
private static final ParseField PARSE_NAME = new ParseField("name");
|
||||
private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version");
|
||||
private static final ParseField PARSE_START_TIME = new ParseField("start_time");
|
||||
private static final ParseField PARSE_TIME = new ParseField("time");
|
||||
private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files");
|
||||
private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size");
|
||||
private static final ParseField PARSE_FILES = new ParseField("files");
|
||||
|
||||
private static final ParseField PARSE_NAME = new ParseField(NAME);
|
||||
private static final ParseField PARSE_INDEX_VERSION = new ParseField(INDEX_VERSION, "index-version");
|
||||
private static final ParseField PARSE_START_TIME = new ParseField(START_TIME);
|
||||
private static final ParseField PARSE_TIME = new ParseField(TIME);
|
||||
private static final ParseField PARSE_INCREMENTAL_FILE_COUNT = new ParseField(INCREMENTAL_FILE_COUNT);
|
||||
private static final ParseField PARSE_INCREMENTAL_SIZE = new ParseField(INCREMENTAL_SIZE);
|
||||
private static final ParseField PARSE_FILES = new ParseField(FILES);
|
||||
|
||||
/**
|
||||
* Serializes shard snapshot metadata info into JSON
|
||||
@ -482,8 +502,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
builder.field(INDEX_VERSION, indexVersion);
|
||||
builder.field(START_TIME, startTime);
|
||||
builder.field(TIME, time);
|
||||
builder.field(NUMBER_OF_FILES, numberOfFiles);
|
||||
builder.field(TOTAL_SIZE, totalSize);
|
||||
builder.field(INCREMENTAL_FILE_COUNT, incrementalFileCount);
|
||||
builder.field(INCREMENTAL_SIZE, incrementalSize);
|
||||
builder.startArray(FILES);
|
||||
for (FileInfo fileInfo : indexFiles) {
|
||||
FileInfo.toXContent(fileInfo, builder, params);
|
||||
@ -503,8 +523,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
long indexVersion = -1;
|
||||
long startTime = 0;
|
||||
long time = 0;
|
||||
int numberOfFiles = 0;
|
||||
long totalSize = 0;
|
||||
int incrementalFileCount = 0;
|
||||
long incrementalSize = 0;
|
||||
|
||||
List<FileInfo> indexFiles = new ArrayList<>();
|
||||
if (parser.currentToken() == null) { // fresh parser? move to the first token
|
||||
@ -526,10 +546,10 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
startTime = parser.longValue();
|
||||
} else if (PARSE_TIME.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
time = parser.longValue();
|
||||
} else if (PARSE_NUMBER_OF_FILES.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
numberOfFiles = parser.intValue();
|
||||
} else if (PARSE_TOTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
totalSize = parser.longValue();
|
||||
} else if (PARSE_INCREMENTAL_FILE_COUNT.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
incrementalFileCount = parser.intValue();
|
||||
} else if (PARSE_INCREMENTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
incrementalSize = parser.longValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName);
|
||||
}
|
||||
@ -549,7 +569,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles),
|
||||
startTime, time, numberOfFiles, totalSize);
|
||||
startTime, time, incrementalFileCount, incrementalSize);
|
||||
}
|
||||
}
|
||||
|
@ -818,7 +818,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
|
||||
Context context = new Context(snapshotId, version, indexId, shardId);
|
||||
BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot();
|
||||
return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), snapshot.numberOfFiles(), snapshot.totalSize());
|
||||
return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(),
|
||||
snapshot.incrementalFileCount(), snapshot.totalFileCount(),
|
||||
snapshot.incrementalSize(), snapshot.totalSize());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1139,9 +1141,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>();
|
||||
|
||||
store.incRef();
|
||||
int indexIncrementalFileCount = 0;
|
||||
int indexTotalNumberOfFiles = 0;
|
||||
long indexIncrementalSize = 0;
|
||||
long indexTotalFileCount = 0;
|
||||
try {
|
||||
int indexNumberOfFiles = 0;
|
||||
long indexTotalFilesSize = 0;
|
||||
ArrayList<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new ArrayList<>();
|
||||
final Store.MetadataSnapshot metadata;
|
||||
// TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
|
||||
@ -1182,9 +1186,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
indexTotalFileCount += md.length();
|
||||
indexTotalNumberOfFiles++;
|
||||
|
||||
if (existingFileInfo == null) {
|
||||
indexNumberOfFiles++;
|
||||
indexTotalFilesSize += md.length();
|
||||
indexIncrementalFileCount++;
|
||||
indexIncrementalSize += md.length();
|
||||
// create a new FileInfo
|
||||
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize());
|
||||
indexCommitPointFiles.add(snapshotFileInfo);
|
||||
@ -1194,7 +1202,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
}
|
||||
}
|
||||
|
||||
snapshotStatus.moveToStarted(startTime, indexNumberOfFiles, indexTotalFilesSize);
|
||||
snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount,
|
||||
indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount);
|
||||
|
||||
for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) {
|
||||
try {
|
||||
@ -1217,8 +1226,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
// snapshotStatus.startTime() is assigned on the same machine,
|
||||
// so it's safe to use with VLong
|
||||
System.currentTimeMillis() - lastSnapshotStatus.getStartTime(),
|
||||
lastSnapshotStatus.getNumberOfFiles(),
|
||||
lastSnapshotStatus.getTotalSize());
|
||||
lastSnapshotStatus.getIncrementalFileCount(),
|
||||
lastSnapshotStatus.getIncrementalSize()
|
||||
);
|
||||
|
||||
//TODO: The time stored in snapshot doesn't include cleanup time.
|
||||
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
|
||||
|
@ -65,11 +65,11 @@ public class SnapshotBlocksIT extends ESIntegTestCase {
|
||||
client().prepareIndex(OTHER_INDEX_NAME, "type").setSource("test", "init").execute().actionGet();
|
||||
}
|
||||
|
||||
|
||||
logger.info("--> register a repository");
|
||||
|
||||
assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME)
|
||||
.setType("fs")
|
||||
.setSettings(Settings.builder().put("location", randomRepoPath())));
|
||||
.setSettings(Settings.builder().put("location", randomRepoPath())));
|
||||
|
||||
logger.info("--> verify the repository");
|
||||
VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get();
|
||||
|
@ -91,10 +91,14 @@ public class SnapshotStatusTests extends ESTestCase {
|
||||
" \"total\" : " + totalShards + "\n" +
|
||||
" },\n" +
|
||||
" \"stats\" : {\n" +
|
||||
" \"number_of_files\" : 0,\n" +
|
||||
" \"processed_files\" : 0,\n" +
|
||||
" \"total_size_in_bytes\" : 0,\n" +
|
||||
" \"processed_size_in_bytes\" : 0,\n" +
|
||||
" \"incremental\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"total\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"start_time_in_millis\" : 0,\n" +
|
||||
" \"time_in_millis\" : 0\n" +
|
||||
" },\n" +
|
||||
@ -109,10 +113,14 @@ public class SnapshotStatusTests extends ESTestCase {
|
||||
" \"total\" : " + totalShards + "\n" +
|
||||
" },\n" +
|
||||
" \"stats\" : {\n" +
|
||||
" \"number_of_files\" : 0,\n" +
|
||||
" \"processed_files\" : 0,\n" +
|
||||
" \"total_size_in_bytes\" : 0,\n" +
|
||||
" \"processed_size_in_bytes\" : 0,\n" +
|
||||
" \"incremental\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"total\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"start_time_in_millis\" : 0,\n" +
|
||||
" \"time_in_millis\" : 0\n" +
|
||||
" },\n" +
|
||||
@ -120,10 +128,14 @@ public class SnapshotStatusTests extends ESTestCase {
|
||||
" \"" + shardId + "\" : {\n" +
|
||||
" \"stage\" : \"" + shardStage.toString() + "\",\n" +
|
||||
" \"stats\" : {\n" +
|
||||
" \"number_of_files\" : 0,\n" +
|
||||
" \"processed_files\" : 0,\n" +
|
||||
" \"total_size_in_bytes\" : 0,\n" +
|
||||
" \"processed_size_in_bytes\" : 0,\n" +
|
||||
" \"incremental\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"total\" : {\n" +
|
||||
" \"file_count\" : 0,\n" +
|
||||
" \"size_in_bytes\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"start_time_in_millis\" : 0,\n" +
|
||||
" \"time_in_millis\" : 0\n" +
|
||||
" }\n" +
|
||||
|
@ -471,7 +471,7 @@ public class CacheTests extends ESTestCase {
|
||||
keys.add(key);
|
||||
} else {
|
||||
// invalidate with incorrect value
|
||||
cache.invalidate(key, Integer.toString(key * randomIntBetween(2, 10)));
|
||||
cache.invalidate(key, Integer.toString(key + randomIntBetween(2, 10)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -506,7 +506,7 @@ public class CacheTests extends ESTestCase {
|
||||
invalidated.add(i);
|
||||
} else {
|
||||
// invalidate with incorrect value
|
||||
cache.invalidate(i, Integer.toString(i * randomIntBetween(2, 10)));
|
||||
cache.invalidate(i, Integer.toString(i + randomIntBetween(2, 10)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,10 +23,12 @@ import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntSet;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
@ -83,7 +85,12 @@ import org.elasticsearch.test.TestCustomMetaData;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
@ -102,6 +109,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
@ -1019,6 +1027,129 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||
assertThat(snapshots.get(0).getState().completed(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testSnapshotTotalAndIncrementalSizes() throws IOException {
|
||||
Client client = client();
|
||||
final String indexName = "test-blocks-1";
|
||||
final String repositoryName = "repo-" + indexName;
|
||||
final String snapshot0 = "snapshot-0";
|
||||
final String snapshot1 = "snapshot-1";
|
||||
|
||||
createIndex(indexName);
|
||||
|
||||
int docs = between(10, 100);
|
||||
for (int i = 0; i < docs; i++) {
|
||||
client.prepareIndex(indexName, "type").setSource("test", "init").execute().actionGet();
|
||||
}
|
||||
|
||||
logger.info("--> register a repository");
|
||||
|
||||
final Path repoPath = randomRepoPath();
|
||||
assertAcked(client.admin().cluster().preparePutRepository(repositoryName)
|
||||
.setType("fs")
|
||||
.setSettings(Settings.builder().put("location", repoPath)));
|
||||
|
||||
logger.info("--> create a snapshot");
|
||||
client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshot0)
|
||||
.setIncludeGlobalState(true)
|
||||
.setWaitForCompletion(true)
|
||||
.get();
|
||||
|
||||
SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus(repositoryName)
|
||||
.setSnapshots(snapshot0)
|
||||
.get();
|
||||
|
||||
List<SnapshotStatus> snapshots = response.getSnapshots();
|
||||
|
||||
List<Path> snapshot0Files = scanSnapshotFolder(repoPath);
|
||||
assertThat(snapshots, hasSize(1));
|
||||
|
||||
final int snapshot0FileCount = snapshot0Files.size();
|
||||
final long snapshot0FileSize = calculateTotalFilesSize(snapshot0Files);
|
||||
|
||||
SnapshotStats stats = snapshots.get(0).getStats();
|
||||
|
||||
assertThat(stats.getTotalFileCount(), is(snapshot0FileCount));
|
||||
assertThat(stats.getTotalSize(), is(snapshot0FileSize));
|
||||
|
||||
assertThat(stats.getIncrementalFileCount(), equalTo(snapshot0FileCount));
|
||||
assertThat(stats.getIncrementalSize(), equalTo(snapshot0FileSize));
|
||||
|
||||
assertThat(stats.getIncrementalFileCount(), equalTo(stats.getProcessedFileCount()));
|
||||
assertThat(stats.getIncrementalSize(), equalTo(stats.getProcessedSize()));
|
||||
|
||||
// add few docs - less than initially
|
||||
docs = between(1, 5);
|
||||
for (int i = 0; i < docs; i++) {
|
||||
client.prepareIndex(indexName, "type").setSource("test", "test" + i).execute().actionGet();
|
||||
}
|
||||
|
||||
// create another snapshot
|
||||
// total size has to grow and has to be equal to files on fs
|
||||
assertThat(client.admin().cluster()
|
||||
.prepareCreateSnapshot(repositoryName, snapshot1)
|
||||
.setWaitForCompletion(true).get().status(),
|
||||
equalTo(RestStatus.OK));
|
||||
|
||||
// drop 1st one to avoid miscalculation as snapshot reuses some files of prev snapshot
|
||||
assertTrue(client.admin().cluster()
|
||||
.prepareDeleteSnapshot(repositoryName, snapshot0)
|
||||
.get().isAcknowledged());
|
||||
|
||||
response = client.admin().cluster().prepareSnapshotStatus(repositoryName)
|
||||
.setSnapshots(snapshot1)
|
||||
.get();
|
||||
|
||||
final List<Path> snapshot1Files = scanSnapshotFolder(repoPath);
|
||||
|
||||
final int snapshot1FileCount = snapshot1Files.size();
|
||||
final long snapshot1FileSize = calculateTotalFilesSize(snapshot1Files);
|
||||
|
||||
snapshots = response.getSnapshots();
|
||||
|
||||
SnapshotStats anotherStats = snapshots.get(0).getStats();
|
||||
|
||||
ArrayList<Path> snapshotFilesDiff = new ArrayList<>(snapshot1Files);
|
||||
snapshotFilesDiff.removeAll(snapshot0Files);
|
||||
|
||||
assertThat(anotherStats.getIncrementalFileCount(), equalTo(snapshotFilesDiff.size()));
|
||||
assertThat(anotherStats.getIncrementalSize(), equalTo(calculateTotalFilesSize(snapshotFilesDiff)));
|
||||
|
||||
assertThat(anotherStats.getIncrementalFileCount(), equalTo(anotherStats.getProcessedFileCount()));
|
||||
assertThat(anotherStats.getIncrementalSize(), equalTo(anotherStats.getProcessedSize()));
|
||||
|
||||
assertThat(stats.getTotalSize(), lessThan(anotherStats.getTotalSize()));
|
||||
assertThat(stats.getTotalFileCount(), lessThan(anotherStats.getTotalFileCount()));
|
||||
|
||||
assertThat(anotherStats.getTotalFileCount(), is(snapshot1FileCount));
|
||||
assertThat(anotherStats.getTotalSize(), is(snapshot1FileSize));
|
||||
}
|
||||
|
||||
private long calculateTotalFilesSize(List<Path> files) {
|
||||
return files.stream().mapToLong(f -> {
|
||||
try {
|
||||
return Files.size(f);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}).sum();
|
||||
}
|
||||
|
||||
|
||||
private List<Path> scanSnapshotFolder(Path repoPath) throws IOException {
|
||||
List<Path> files = new ArrayList<>();
|
||||
Files.walkFileTree(repoPath, new SimpleFileVisitor<Path>(){
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (file.getFileName().toString().startsWith("__")){
|
||||
files.add(file);
|
||||
}
|
||||
return super.visitFile(file, attrs);
|
||||
}
|
||||
}
|
||||
);
|
||||
return files;
|
||||
}
|
||||
|
||||
public static class SnapshottableMetadata extends TestCustomMetaData {
|
||||
public static final String TYPE = "test_snapshottable";
|
||||
|
||||
|
@ -122,6 +122,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAlloc
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING;
|
||||
import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING;
|
||||
import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.shard.IndexShardTests.getEngineFromShard;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
@ -2070,7 +2071,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0);
|
||||
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
|
||||
for (SnapshotIndexShardStatus status : shards) {
|
||||
assertThat(status.getStats().getProcessedFiles(), greaterThan(1));
|
||||
assertThat(status.getStats().getProcessedFileCount(), greaterThan(1));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2082,7 +2083,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0);
|
||||
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
|
||||
for (SnapshotIndexShardStatus status : shards) {
|
||||
assertThat(status.getStats().getProcessedFiles(), equalTo(0));
|
||||
assertThat(status.getStats().getProcessedFileCount(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2098,9 +2099,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
// we flush before the snapshot such that we have to process the segments_N files plus the .del file
|
||||
if (INDEX_SOFT_DELETES_SETTING.get(indexSettings)) {
|
||||
// soft-delete generates DV files.
|
||||
assertThat(status.getStats().getProcessedFiles(), greaterThan(2));
|
||||
assertThat(status.getStats().getProcessedFileCount(), greaterThan(2));
|
||||
} else {
|
||||
assertThat(status.getStats().getProcessedFiles(), equalTo(2));
|
||||
assertThat(status.getStats().getProcessedFileCount(), equalTo(2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -625,7 +625,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||
|
||||
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy();
|
||||
assertEquals(IndexShardSnapshotStatus.Stage.DONE, lastSnapshotStatus.getStage());
|
||||
assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getNumberOfFiles());
|
||||
assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getTotalFileCount());
|
||||
assertNull(lastSnapshotStatus.getFailure());
|
||||
}
|
||||
|
||||
|
@ -123,13 +123,15 @@ public class XPackInfoResponse extends ActionResponse {
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return builder.startObject()
|
||||
.field("uid", uid)
|
||||
.field("type", type)
|
||||
.field("mode", mode)
|
||||
.field("status", status.label())
|
||||
.timeField("expiry_date_in_millis", "expiry_date", expiryDate)
|
||||
.endObject();
|
||||
builder.startObject()
|
||||
.field("uid", uid)
|
||||
.field("type", type)
|
||||
.field("mode", mode)
|
||||
.field("status", status.label());
|
||||
if (expiryDate != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) {
|
||||
builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate);
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
@ -52,24 +52,20 @@ dependencies {
|
||||
compile (xpackProject('plugin:sql:sql-shared-client')) {
|
||||
transitive = false
|
||||
}
|
||||
compile (xpackProject('plugin:sql:sql-proto')) {
|
||||
compile (xpackProject('plugin:sql:sql-shared-proto')) {
|
||||
transitive = false
|
||||
}
|
||||
} else {
|
||||
bundled (xpackProject('plugin:sql:sql-shared-client')) {
|
||||
transitive = false
|
||||
}
|
||||
bundled (xpackProject('plugin:sql:sql-proto')) {
|
||||
bundled (xpackProject('plugin:sql:sql-shared-proto')) {
|
||||
transitive = false
|
||||
}
|
||||
}
|
||||
compile (project(':server')) {
|
||||
transitive = false
|
||||
}
|
||||
compile (project(':libs:x-content')) {
|
||||
transitive = false
|
||||
}
|
||||
compile "org.apache.lucene:lucene-core:${versions.lucene}"
|
||||
compile 'joda-time:joda-time:2.9.9'
|
||||
compile project(':libs:elasticsearch-core')
|
||||
runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
@ -80,15 +76,13 @@ dependencies {
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /sql-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /sql-shared-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /sql-shared-client.*/, to: 'elasticsearch'
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
mapping from: /lucene-.*/, to: 'lucene'
|
||||
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
|
||||
ignoreSha 'sql-proto'
|
||||
ignoreSha 'sql-shared-proto'
|
||||
ignoreSha 'sql-shared-client'
|
||||
ignoreSha 'elasticsearch'
|
||||
ignoreSha 'elasticsearch-core'
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,475 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
|
||||
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
|
||||
derived from unicode conversion examples available at
|
||||
http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright
|
||||
from those sources:
|
||||
|
||||
/*
|
||||
* Copyright 2001-2004 Unicode, Inc.
|
||||
*
|
||||
* Disclaimer
|
||||
*
|
||||
* This source code is provided as is by Unicode, Inc. No claims are
|
||||
* made as to fitness for any particular purpose. No warranties of any
|
||||
* kind are expressed or implied. The recipient agrees to determine
|
||||
* applicability of information provided. If this file has been
|
||||
* purchased on magnetic or optical media from Unicode, Inc., the
|
||||
* sole remedy for any claim will be exchange of defective media
|
||||
* within 90 days of receipt.
|
||||
*
|
||||
* Limitations on Rights to Redistribute This Code
|
||||
*
|
||||
* Unicode, Inc. hereby grants the right to freely use the information
|
||||
* supplied in this file in the creation of products supporting the
|
||||
* Unicode Standard, and to make copies of this file in any form
|
||||
* for internal or external distribution as long as this notice
|
||||
* remains attached.
|
||||
*/
|
||||
|
||||
|
||||
Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was
|
||||
derived from Python 2.4.2 sources available at
|
||||
http://www.python.org. Full license is here:
|
||||
|
||||
http://www.python.org/download/releases/2.4.2/license/
|
||||
|
||||
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
|
||||
derived from Python 3.1.2 sources available at
|
||||
http://www.python.org. Full license is here:
|
||||
|
||||
http://www.python.org/download/releases/3.1.2/license/
|
||||
|
||||
Some code in core/src/java/org/apache/lucene/util/automaton was
|
||||
derived from Brics automaton sources available at
|
||||
www.brics.dk/automaton/. Here is the copyright from those sources:
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001-2009 Anders Moeller
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton
|
||||
were automatically generated with the moman/finenight FSA package.
|
||||
Here is the copyright for those sources:
|
||||
|
||||
# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, <jpb@rrette.com>
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person
|
||||
# obtaining a copy of this software and associated documentation
|
||||
# files (the "Software"), to deal in the Software without
|
||||
# restriction, including without limitation the rights to use,
|
||||
# copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following
|
||||
# conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
|
||||
derived from ICU (http://www.icu-project.org)
|
||||
The full license is available here:
|
||||
http://source.icu-project.org/repos/icu/icu/trunk/license.html
|
||||
|
||||
/*
|
||||
* Copyright (C) 1999-2010, International Business Machines
|
||||
* Corporation and others. All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, and/or sell copies of the
|
||||
* Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* provided that the above copyright notice(s) and this permission notice appear
|
||||
* in all copies of the Software and that both the above copyright notice(s) and
|
||||
* this permission notice appear in supporting documentation.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
|
||||
* LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
|
||||
* ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*
|
||||
* Except as contained in this notice, the name of a copyright holder shall not
|
||||
* be used in advertising or otherwise to promote the sale, use or other
|
||||
* dealings in this Software without prior written authorization of the
|
||||
* copyright holder.
|
||||
*/
|
||||
|
||||
The following license applies to the Snowball stemmers:
|
||||
|
||||
Copyright (c) 2001, Dr Martin Porter
|
||||
Copyright (c) 2002, Richard Boulton
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the copyright holders nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The following license applies to the KStemmer:
|
||||
|
||||
Copyright © 2003,
|
||||
Center for Intelligent Information Retrieval,
|
||||
University of Massachusetts, Amherst.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. The names "Center for Intelligent Information Retrieval" and
|
||||
"University of Massachusetts" must not be used to endorse or promote products
|
||||
derived from this software without prior written permission. To obtain
|
||||
permission, contact info@ciir.cs.umass.edu.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
|
||||
The following license applies to the Morfologik project:
|
||||
|
||||
Copyright (c) 2006 Dawid Weiss
|
||||
Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Morfologik nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
---
|
||||
|
||||
The dictionary comes from Morfologik project. Morfologik uses data from
|
||||
Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and
|
||||
is licenced on the terms of (inter alia) LGPL and Creative Commons
|
||||
ShareAlike. The part-of-speech tags were added in Morfologik project and
|
||||
are not found in the data from sjp.pl. The tagset is similar to IPI PAN
|
||||
tagset.
|
||||
|
||||
---
|
||||
|
||||
The following license applies to the Morfeusz project,
|
||||
used by org.apache.lucene.analysis.morfologik.
|
||||
|
||||
BSD-licensed dictionary of Polish (SGJP)
|
||||
http://sgjp.pl/morfeusz/
|
||||
|
||||
Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński,
|
||||
Marcin Woliński, Robert Wołosz
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
|
||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -1,192 +0,0 @@
|
||||
Apache Lucene
|
||||
Copyright 2014 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
||||
Includes software from other Apache Software Foundation projects,
|
||||
including, but not limited to:
|
||||
- Apache Ant
|
||||
- Apache Jakarta Regexp
|
||||
- Apache Commons
|
||||
- Apache Xerces
|
||||
|
||||
ICU4J, (under analysis/icu) is licensed under an MIT styles license
|
||||
and Copyright (c) 1995-2008 International Business Machines Corporation and others
|
||||
|
||||
Some data files (under analysis/icu/src/data) are derived from Unicode data such
|
||||
as the Unicode Character Database. See http://unicode.org/copyright.html for more
|
||||
details.
|
||||
|
||||
Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is
|
||||
BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/
|
||||
|
||||
The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were
|
||||
automatically generated with the moman/finenight FSA library, created by
|
||||
Jean-Philippe Barrette-LaPierre. This library is available under an MIT license,
|
||||
see http://sites.google.com/site/rrettesite/moman and
|
||||
http://bitbucket.org/jpbarrette/moman/overview/
|
||||
|
||||
The class org.apache.lucene.util.WeakIdentityMap was derived from
|
||||
the Apache CXF project and is Apache License 2.0.
|
||||
|
||||
The Google Code Prettify is Apache License 2.0.
|
||||
See http://code.google.com/p/google-code-prettify/
|
||||
|
||||
JUnit (junit-4.10) is licensed under the Common Public License v. 1.0
|
||||
See http://junit.sourceforge.net/cpl-v10.html
|
||||
|
||||
This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin
|
||||
g Package (jaspell): http://jaspell.sourceforge.net/
|
||||
License: The BSD License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
The snowball stemmers in
|
||||
analysis/common/src/java/net/sf/snowball
|
||||
were developed by Martin Porter and Richard Boulton.
|
||||
The snowball stopword lists in
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/snowball
|
||||
were developed by Martin Porter and Richard Boulton.
|
||||
The full snowball package is available from
|
||||
http://snowball.tartarus.org/
|
||||
|
||||
The KStem stemmer in
|
||||
analysis/common/src/org/apache/lucene/analysis/en
|
||||
was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst)
|
||||
under the BSD-license.
|
||||
|
||||
The Arabic,Persian,Romanian,Bulgarian, Hindi and Bengali analyzers (common) come with a default
|
||||
stopword list that is BSD-licensed created by Jacques Savoy. These files reside in:
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt,
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt,
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt,
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt,
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt,
|
||||
analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt
|
||||
See http://members.unine.ch/jacques.savoy/clef/index.html.
|
||||
|
||||
The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers
|
||||
(common) are based on BSD-licensed reference implementations created by Jacques Savoy and
|
||||
Ljiljana Dolamic. These files reside in:
|
||||
analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java
|
||||
analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java
|
||||
|
||||
The Stempel analyzer (stempel) includes BSD-licensed software developed
|
||||
by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil,
|
||||
and Edmond Nolan.
|
||||
|
||||
The Polish analyzer (stempel) comes with a default
|
||||
stopword list that is BSD-licensed created by the Carrot2 project. The file resides
|
||||
in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt.
|
||||
See http://project.carrot2.org/license.html.
|
||||
|
||||
The SmartChineseAnalyzer source code (smartcn) was
|
||||
provided by Xiaoping Gao and copyright 2009 by www.imdict.net.
|
||||
|
||||
WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/)
|
||||
is derived from Unicode data such as the Unicode Character Database.
|
||||
See http://unicode.org/copyright.html for more details.
|
||||
|
||||
The Morfologik analyzer (morfologik) includes BSD-licensed software
|
||||
developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/).
|
||||
|
||||
Morfologik uses data from Polish ispell/myspell dictionary
|
||||
(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia)
|
||||
LGPL and Creative Commons ShareAlike.
|
||||
|
||||
Morfologic includes data from BSD-licensed dictionary of Polish (SGJP)
|
||||
(http://sgjp.pl/morfeusz/)
|
||||
|
||||
Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original
|
||||
source code for this can be found at http://www.eclipse.org/jetty/downloads.php
|
||||
|
||||
===========================================================================
|
||||
Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration
|
||||
===========================================================================
|
||||
|
||||
This software includes a binary and/or source version of data from
|
||||
|
||||
mecab-ipadic-2.7.0-20070801
|
||||
|
||||
which can be obtained from
|
||||
|
||||
http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz
|
||||
|
||||
or
|
||||
|
||||
http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz
|
||||
|
||||
===========================================================================
|
||||
mecab-ipadic-2.7.0-20070801 Notice
|
||||
===========================================================================
|
||||
|
||||
Nara Institute of Science and Technology (NAIST),
|
||||
the copyright holders, disclaims all warranties with regard to this
|
||||
software, including all implied warranties of merchantability and
|
||||
fitness, in no event shall NAIST be liable for
|
||||
any special, indirect or consequential damages or any damages
|
||||
whatsoever resulting from loss of use, data or profits, whether in an
|
||||
action of contract, negligence or other tortuous action, arising out
|
||||
of or in connection with the use or performance of this software.
|
||||
|
||||
A large portion of the dictionary entries
|
||||
originate from ICOT Free Software. The following conditions for ICOT
|
||||
Free Software applies to the current dictionary as well.
|
||||
|
||||
Each User may also freely distribute the Program, whether in its
|
||||
original form or modified, to any third party or parties, PROVIDED
|
||||
that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear
|
||||
on, or be attached to, the Program, which is distributed substantially
|
||||
in the same form as set out herein and that such intended
|
||||
distribution, if actually made, will neither violate or otherwise
|
||||
contravene any of the laws and regulations of the countries having
|
||||
jurisdiction over the User or the intended distribution itself.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
The program was produced on an experimental basis in the course of the
|
||||
research and development conducted during the project and is provided
|
||||
to users as so produced on an experimental basis. Accordingly, the
|
||||
program is provided without any warranty whatsoever, whether express,
|
||||
implied, statutory or otherwise. The term "warranty" used herein
|
||||
includes, but is not limited to, any warranty of the quality,
|
||||
performance, merchantability and fitness for a particular purpose of
|
||||
the program and the nonexistence of any infringement or violation of
|
||||
any right of any third party.
|
||||
|
||||
Each user of the program will agree and understand, and be deemed to
|
||||
have agreed and understood, that there is no warranty whatsoever for
|
||||
the program and, accordingly, the entire risk arising from or
|
||||
otherwise connected with the program is assumed by the user.
|
||||
|
||||
Therefore, neither ICOT, the copyright holder, or any other
|
||||
organization that participated in or was otherwise related to the
|
||||
development of the program and their respective officials, directors,
|
||||
officers and other employees shall be held liable for any and all
|
||||
damages, including, without limitation, general, special, incidental
|
||||
and consequential damages, arising out of or otherwise in connection
|
||||
with the use or inability to use the program or any product, material
|
||||
or result produced or otherwise obtained by using the program,
|
||||
regardless of whether they have been advised of, or otherwise had
|
||||
knowledge of, the possibility of such damages at any time during the
|
||||
project or thereafter. Each user will be deemed to have agreed to the
|
||||
foregoing by his or her commencement of use of the program. The term
|
||||
"use" as used herein includes, but is not limited to, the use,
|
||||
modification, copying and distribution of the program and the
|
||||
production of secondary products from the program.
|
||||
|
||||
In the case where the program, whether in its original form or
|
||||
modified, was distributed or delivered to or received by a user from
|
||||
any person, organization or entity other than ICOT, unless it makes or
|
||||
grants independently of ICOT any specific warranty to the user in
|
||||
writing, such person, organization or entity, will also be exempted
|
||||
from and not be held liable to the user for any such damages as noted
|
||||
above as far as the program is concerned.
|
@ -1 +0,0 @@
|
||||
e118e4d05070378516b9055184b74498ba528dee
|
@ -10,9 +10,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
|
||||
import org.elasticsearch.xpack.sql.proto.Mode;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.ReadableDateTime;
|
||||
|
||||
import java.sql.JDBCType;
|
||||
|
||||
@ -51,7 +50,11 @@ public class TypeConverterTests extends ESTestCase {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
builder.startObject();
|
||||
builder.field("value");
|
||||
SqlQueryResponse.value(builder, Mode.JDBC, value);
|
||||
if (value instanceof ReadableDateTime) {
|
||||
builder.value(((ReadableDateTime) value).getMillis());
|
||||
} else {
|
||||
builder.value(value);
|
||||
}
|
||||
builder.endObject();
|
||||
builder.close();
|
||||
Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value");
|
||||
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.sql.cli.command;
|
||||
import org.elasticsearch.xpack.sql.client.HttpClient;
|
||||
import org.elasticsearch.xpack.sql.client.shared.ClientException;
|
||||
import org.elasticsearch.xpack.sql.client.shared.Version;
|
||||
import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest;
|
||||
import org.elasticsearch.xpack.sql.proto.MainResponse;
|
||||
import org.elasticsearch.xpack.sql.proto.Protocol;
|
||||
|
||||
|
@ -28,7 +28,7 @@ public class CliSessionTests extends ESTestCase {
|
||||
public void testProperConnection() throws Exception {
|
||||
HttpClient httpClient = mock(HttpClient.class);
|
||||
when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT.toString(),
|
||||
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT));
|
||||
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID()));
|
||||
CliSession cliSession = new CliSession(httpClient);
|
||||
cliSession.checkConnection();
|
||||
verify(httpClient, times(1)).serverInfo();
|
||||
@ -58,7 +58,7 @@ public class CliSessionTests extends ESTestCase {
|
||||
}
|
||||
when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5),
|
||||
org.elasticsearch.Version.fromString(major + "." + minor + ".23").toString(),
|
||||
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT));
|
||||
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID()));
|
||||
CliSession cliSession = new CliSession(httpClient);
|
||||
expectThrows(ClientException.class, cliSession::checkConnection);
|
||||
verify(httpClient, times(1)).serverInfo();
|
||||
|
@ -36,7 +36,7 @@ public class ServerInfoCliCommandTests extends ESTestCase {
|
||||
HttpClient client = mock(HttpClient.class);
|
||||
CliSession cliSession = new CliSession(client);
|
||||
when(client.serverInfo()).thenReturn(new MainResponse("my_node", "1.2.3",
|
||||
new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT));
|
||||
new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID()));
|
||||
ServerInfoCliCommand cliCommand = new ServerInfoCliCommand();
|
||||
assertTrue(cliCommand.handle(testTerminal, cliSession, "info"));
|
||||
assertEquals(testTerminal.toString(), "Node:<em>my_node</em> Cluster:<em>my_cluster</em> Version:<em>1.2.3</em>\n");
|
||||
|
@ -24,6 +24,7 @@ dependencies {
|
||||
compile (project(':libs:x-content')) {
|
||||
transitive = false
|
||||
}
|
||||
compile xpackProject('plugin:sql:sql-shared-proto')
|
||||
compile "org.apache.lucene:lucene-core:${versions.lucene}"
|
||||
compile 'joda-time:joda-time:2.9.9'
|
||||
runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
|
@ -10,14 +10,14 @@ apply plugin: 'elasticsearch.build'
|
||||
description = 'Code shared between jdbc and cli'
|
||||
|
||||
dependencies {
|
||||
compile xpackProject('plugin:sql:sql-proto')
|
||||
compile xpackProject('plugin:sql:sql-shared-proto')
|
||||
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
mapping from: /sql-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /sql-shared-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /elasticsearch-cli.*/, to: 'elasticsearch'
|
||||
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
|
||||
mapping from: /lucene-.*/, to: 'lucene'
|
||||
|
@ -5,14 +5,12 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.client;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.core.internal.io.Streams;
|
||||
@ -30,6 +28,8 @@ import org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse;
|
||||
import org.elasticsearch.xpack.sql.proto.SqlQueryRequest;
|
||||
import org.elasticsearch.xpack.sql.proto.SqlQueryResponse;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.AccessController;
|
||||
@ -67,8 +67,8 @@ public class HttpClient {
|
||||
public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException {
|
||||
// TODO allow customizing the time zone - this is what session set/reset/get should be about
|
||||
SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, query, Collections.emptyList(), null,
|
||||
TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()),
|
||||
TimeValue.timeValueMillis(cfg.pageTimeout()));
|
||||
TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()),
|
||||
TimeValue.timeValueMillis(cfg.pageTimeout()));
|
||||
return query(sqlRequest);
|
||||
}
|
||||
|
||||
@ -84,83 +84,92 @@ public class HttpClient {
|
||||
|
||||
public boolean queryClose(String cursor) throws SQLException {
|
||||
SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT,
|
||||
new SqlClearCursorRequest(Mode.PLAIN, cursor),
|
||||
SqlClearCursorResponse::fromXContent);
|
||||
new SqlClearCursorRequest(Mode.PLAIN, cursor),
|
||||
SqlClearCursorResponse::fromXContent);
|
||||
return response.isSucceeded();
|
||||
}
|
||||
|
||||
private <Request extends AbstractSqlRequest, Response> Response post(String path, Request request,
|
||||
CheckedFunction<XContentParser, Response, IOException> responseParser)
|
||||
throws SQLException {
|
||||
BytesReference requestBytes = toXContent(request);
|
||||
byte[] requestBytes = toXContent(request);
|
||||
String query = "error_trace&mode=" + request.mode();
|
||||
Tuple<XContentType, BytesReference> response =
|
||||
AccessController.doPrivileged((PrivilegedAction<ResponseOrException<Tuple<XContentType, BytesReference>>>) () ->
|
||||
Tuple<XContentType, byte[]> response =
|
||||
AccessController.doPrivileged((PrivilegedAction<ResponseOrException<Tuple<XContentType, byte[]>>>) () ->
|
||||
JreHttpUrlConnection.http(path, query, cfg, con ->
|
||||
con.request(
|
||||
requestBytes::writeTo,
|
||||
this::readFrom,
|
||||
"POST"
|
||||
)
|
||||
con.request(
|
||||
(out) -> out.write(requestBytes),
|
||||
this::readFrom,
|
||||
"POST"
|
||||
)
|
||||
)).getResponseOrThrowException();
|
||||
return fromXContent(response.v1(), response.v2(), responseParser);
|
||||
}
|
||||
|
||||
private boolean head(String path, long timeoutInMs) throws SQLException {
|
||||
ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(),
|
||||
cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(),
|
||||
cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig());
|
||||
cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(),
|
||||
cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig());
|
||||
try {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () ->
|
||||
JreHttpUrlConnection.http(path, "error_trace", pingCfg, JreHttpUrlConnection::head));
|
||||
JreHttpUrlConnection.http(path, "error_trace", pingCfg, JreHttpUrlConnection::head));
|
||||
} catch (ClientException ex) {
|
||||
throw new SQLException("Cannot ping server", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private <Response> Response get(String path, CheckedFunction<XContentParser, Response, IOException> responseParser)
|
||||
throws SQLException {
|
||||
Tuple<XContentType, BytesReference> response =
|
||||
AccessController.doPrivileged((PrivilegedAction<ResponseOrException<Tuple<XContentType, BytesReference>>>) () ->
|
||||
throws SQLException {
|
||||
Tuple<XContentType, byte[]> response =
|
||||
AccessController.doPrivileged((PrivilegedAction<ResponseOrException<Tuple<XContentType, byte[]>>>) () ->
|
||||
JreHttpUrlConnection.http(path, "error_trace", cfg, con ->
|
||||
con.request(
|
||||
null,
|
||||
this::readFrom,
|
||||
"GET"
|
||||
)
|
||||
con.request(
|
||||
null,
|
||||
this::readFrom,
|
||||
"GET"
|
||||
)
|
||||
)).getResponseOrThrowException();
|
||||
return fromXContent(response.v1(), response.v2(), responseParser);
|
||||
}
|
||||
|
||||
private static <Request extends ToXContent> BytesReference toXContent(Request xContent) {
|
||||
try {
|
||||
return XContentHelper.toXContent(xContent, REQUEST_BODY_CONTENT_TYPE, false);
|
||||
private static <Request extends ToXContent> byte[] toXContent(Request xContent) {
|
||||
try(ByteArrayOutputStream buffer = new ByteArrayOutputStream()) {
|
||||
try (XContentBuilder xContentBuilder = new XContentBuilder(REQUEST_BODY_CONTENT_TYPE.xContent(), buffer)) {
|
||||
if (xContent.isFragment()) {
|
||||
xContentBuilder.startObject();
|
||||
}
|
||||
xContent.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
|
||||
if (xContent.isFragment()) {
|
||||
xContentBuilder.endObject();
|
||||
}
|
||||
}
|
||||
return buffer.toByteArray();
|
||||
} catch (IOException ex) {
|
||||
throw new ClientException("Cannot serialize request", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private Tuple<XContentType, BytesReference> readFrom(InputStream inputStream, Function<String, String> headers) {
|
||||
private Tuple<XContentType, byte[]> readFrom(InputStream inputStream, Function<String, String> headers) {
|
||||
String contentType = headers.apply("Content-Type");
|
||||
XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType);
|
||||
if (xContentType == null) {
|
||||
throw new IllegalStateException("Unsupported Content-Type: " + contentType);
|
||||
}
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
try {
|
||||
Streams.copy(inputStream, out);
|
||||
} catch (IOException ex) {
|
||||
throw new ClientException("Cannot deserialize response", ex);
|
||||
}
|
||||
return new Tuple<>(xContentType, out.bytes());
|
||||
return new Tuple<>(xContentType, out.toByteArray());
|
||||
|
||||
}
|
||||
|
||||
private <Response> Response fromXContent(XContentType xContentType, BytesReference bytesReference,
|
||||
private <Response> Response fromXContent(XContentType xContentType, byte[] bytesReference,
|
||||
CheckedFunction<XContentParser, Response, IOException> responseParser) {
|
||||
try (InputStream stream = bytesReference.streamInput();
|
||||
try (InputStream stream = new ByteArrayInputStream(bytesReference);
|
||||
XContentParser parser = xContentType.xContent().createParser(registry,
|
||||
LoggingDeprecationHandler.INSTANCE, stream)) {
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) {
|
||||
return responseParser.apply(parser);
|
||||
} catch (IOException ex) {
|
||||
throw new ClientException("Cannot parse response", ex);
|
||||
|
35
x-pack/plugin/sql/sql-shared-proto/build.gradle
Normal file
35
x-pack/plugin/sql/sql-shared-proto/build.gradle
Normal file
@ -0,0 +1,35 @@
|
||||
|
||||
/*
|
||||
* This project contains XContent protocol classes shared between server and http client
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
description = 'Request and response objects shared by the cli, jdbc ' +
|
||||
'and the Elasticsearch plugin'
|
||||
|
||||
dependencies {
|
||||
compile (project(':libs:elasticsearch-core')) {
|
||||
transitive = false
|
||||
}
|
||||
compile (project(':libs:x-content')) {
|
||||
transitive = false
|
||||
}
|
||||
compile 'joda-time:joda-time:2.9.9'
|
||||
runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
//sql does not depend on server, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
ignoreSha 'elasticsearch-core'
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
This copy of Jackson JSON processor streaming parser/generator is licensed under the
|
||||
Apache (Software) License, version 2.0 ("the License").
|
||||
See the License for details about distribution rights, and the
|
||||
specific rights regarding derivate works.
|
||||
|
||||
You may obtain a copy of the License at:
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
20
x-pack/plugin/sql/sql-shared-proto/licenses/jackson-NOTICE
Normal file
20
x-pack/plugin/sql/sql-shared-proto/licenses/jackson-NOTICE
Normal file
@ -0,0 +1,20 @@
|
||||
# Jackson JSON processor
|
||||
|
||||
Jackson is a high-performance, Free/Open Source JSON processing library.
|
||||
It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
|
||||
been in development since 2007.
|
||||
It is currently developed by a community of developers, as well as supported
|
||||
commercially by FasterXML.com.
|
||||
|
||||
## Licensing
|
||||
|
||||
Jackson core and extension components may licensed under different licenses.
|
||||
To find the details that apply to this artifact see the accompanying LICENSE file.
|
||||
For more information, including possible other licensing options, contact
|
||||
FasterXML.com (http://fasterxml.com).
|
||||
|
||||
## Credits
|
||||
|
||||
A list of contributors may be found from CREDITS file, which is included
|
||||
in some artifacts (usually source distributions); but is always available
|
||||
from the source code management (SCM) system project uses.
|
@ -0,0 +1 @@
|
||||
eb21a035c66ad307e66ec8fce37f5d50fd62d039
|
@ -0,0 +1 @@
|
||||
f7b520c458572890807d143670c9b24f4de90897
|
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,5 @@
|
||||
=============================================================================
|
||||
= NOTICE file corresponding to section 4d of the Apache License Version 2.0 =
|
||||
=============================================================================
|
||||
This product includes software developed by
|
||||
Joda.org (http://www.joda.org/).
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.proto;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -74,7 +73,7 @@ public class ColumnInfo implements ToXContentObject {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (Strings.hasText(table)) {
|
||||
if (table != null && table.isEmpty() == false) {
|
||||
builder.field("table", table);
|
||||
}
|
||||
builder.field("name", name);
|
||||
@ -146,6 +145,6 @@ public class ColumnInfo implements ToXContentObject {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
return ProtoUtils.toString(this);
|
||||
}
|
||||
}
|
@ -6,7 +6,6 @@
|
||||
|
||||
package org.elasticsearch.xpack.sql.proto;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
@ -21,18 +20,16 @@ public class MainResponse {
|
||||
private String version;
|
||||
private String clusterName;
|
||||
private String clusterUuid;
|
||||
// TODO: Add parser for Build
|
||||
private Build build;
|
||||
|
||||
private MainResponse() {
|
||||
}
|
||||
|
||||
public MainResponse(String nodeName, String version, String clusterName, String clusterUuid, Build build) {
|
||||
public MainResponse(String nodeName, String version, String clusterName, String clusterUuid) {
|
||||
this.nodeName = nodeName;
|
||||
this.version = version;
|
||||
this.clusterName = clusterName;
|
||||
this.clusterUuid = clusterUuid;
|
||||
this.build = build;
|
||||
|
||||
}
|
||||
|
||||
public String getNodeName() {
|
||||
@ -51,10 +48,6 @@ public class MainResponse {
|
||||
return clusterUuid;
|
||||
}
|
||||
|
||||
public Build getBuild() {
|
||||
return build;
|
||||
}
|
||||
|
||||
private static final ObjectParser<MainResponse, Void> PARSER = new ObjectParser<>(MainResponse.class.getName(), true,
|
||||
MainResponse::new);
|
||||
|
||||
@ -65,15 +58,6 @@ public class MainResponse {
|
||||
PARSER.declareString((response, value) -> {
|
||||
}, new ParseField("tagline"));
|
||||
PARSER.declareObject((response, value) -> {
|
||||
final String buildFlavor = (String) value.get("build_flavor");
|
||||
final String buildType = (String) value.get("build_type");
|
||||
response.build =
|
||||
new Build(
|
||||
buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor),
|
||||
buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType),
|
||||
(String) value.get("build_hash"),
|
||||
(String) value.get("build_date"),
|
||||
(boolean) value.get("build_snapshot"));
|
||||
response.version = (String) value.get("number");
|
||||
}, (parser, context) -> parser.map(), new ParseField("version"));
|
||||
}
|
||||
@ -94,12 +78,11 @@ public class MainResponse {
|
||||
return Objects.equals(nodeName, other.nodeName) &&
|
||||
Objects.equals(version, other.version) &&
|
||||
Objects.equals(clusterUuid, other.clusterUuid) &&
|
||||
Objects.equals(build, other.build) &&
|
||||
Objects.equals(clusterName, other.clusterName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(nodeName, version, clusterUuid, build, clusterName);
|
||||
return Objects.hash(nodeName, version, clusterUuid, clusterName);
|
||||
}
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.sql.proto;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Locale;
|
||||
|
||||
public final class ProtoUtils {
|
||||
|
||||
private ProtoUtils() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a generic value from the XContent stream
|
||||
*/
|
||||
public static Object parseFieldsValue(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
//binary values will be parsed back and returned as base64 strings when reading from json and yaml
|
||||
return parser.text();
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
return parser.numberValue();
|
||||
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
return parser.booleanValue();
|
||||
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||
return null;
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
return parser.mapOrdered();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
return parser.listOrderedMap();
|
||||
} else {
|
||||
String message = "Failed to parse object: unexpected token [%s] found";
|
||||
throw new IllegalStateException(String.format(Locale.ROOT, message, token));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a string representation of the builder (only applicable for text based xcontent).
|
||||
*
|
||||
* @param xContentBuilder builder containing an object to converted to a string
|
||||
*/
|
||||
public static String toString(XContentBuilder xContentBuilder) {
|
||||
byte[] byteArray = ((ByteArrayOutputStream) xContentBuilder.getOutputStream()).toByteArray();
|
||||
return new String(byteArray, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
public static String toString(ToXContent toXContent) {
|
||||
try {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
if (toXContent.isFragment()) {
|
||||
builder.startObject();
|
||||
}
|
||||
toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
if (toXContent.isFragment()) {
|
||||
builder.endObject();
|
||||
}
|
||||
builder.close();
|
||||
return toString(builder);
|
||||
} catch (IOException e) {
|
||||
try {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
builder.startObject();
|
||||
builder.field("error", "error building toString out of XContent: " + e.getMessage());
|
||||
builder.endObject();
|
||||
builder.close();
|
||||
return toString(builder);
|
||||
} catch (IOException e2) {
|
||||
throw new IllegalArgumentException("cannot generate error message for deserialization", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -18,7 +18,7 @@ import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue;
|
||||
import static org.elasticsearch.xpack.sql.proto.ProtoUtils.parseFieldsValue;
|
||||
|
||||
/**
|
||||
* Response to perform an sql query for JDBC/CLI client
|
@ -11,13 +11,13 @@ import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.xpack.sql.proto.ProtoUtils.parseFieldsValue;
|
||||
|
||||
/**
|
||||
* Represent a strongly typed parameter value
|
||||
@ -33,7 +33,7 @@ public class SqlTypedParamValue implements ToXContentObject {
|
||||
private static final ParseField TYPE = new ParseField("type");
|
||||
|
||||
static {
|
||||
PARSER.declareField(constructorArg(), (p, c) -> XContentParserUtils.parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE);
|
||||
PARSER.declareField(constructorArg(), (p, c) -> parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE);
|
||||
PARSER.declareString(constructorArg(), TYPE);
|
||||
}
|
||||
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.sql.proto;
|
||||
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ProtoUtilsTests extends ESTestCase {
|
||||
|
||||
public void testGenericValueParsing() throws IOException {
|
||||
|
||||
String json = ProtoUtils.toString((builder, params) -> {
|
||||
builder.field("int", 42);
|
||||
builder.field("double", 42.5);
|
||||
builder.field("string", "foobar");
|
||||
builder.nullField("null");
|
||||
return builder;
|
||||
});
|
||||
|
||||
XContentParser parser =
|
||||
JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
|
||||
|
||||
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
assertEquals(XContentParser.Token.FIELD_NAME, parser.currentToken());
|
||||
String fieldName = parser.currentName();
|
||||
parser.nextToken();
|
||||
Object val = ProtoUtils.parseFieldsValue(parser);
|
||||
switch (fieldName) {
|
||||
case "int":
|
||||
assertEquals(42, val);
|
||||
break;
|
||||
case "double":
|
||||
assertEquals(42.5, val);
|
||||
break;
|
||||
case "string":
|
||||
assertEquals("foobar", val);
|
||||
break;
|
||||
case "null":
|
||||
assertNull(val);
|
||||
break;
|
||||
default:
|
||||
fail("Unexpected value " + fieldName);
|
||||
}
|
||||
}
|
||||
assertNull(parser.nextToken());
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -96,6 +96,9 @@ subprojects {
|
||||
|
||||
// CLI testing dependencies
|
||||
testRuntime project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps')
|
||||
testRuntime (xpackProject('plugin:sql:sql-proto')) {
|
||||
transitive = false
|
||||
}
|
||||
testRuntime "org.jline:jline:3.6.0"
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user