Begin moving XContent to a separate lib/artifact (#29300)

* Begin moving XContent to a separate lib/artifact

This commit moves a large portion of the XContent code from the `server` project
to the `libs/xcontent` project. For the pieces that have been moved, some
helpers have been duplicated to allow them to be decoupled from ES helper
classes. In addition, `Booleans` and `CheckedFunction` have been moved to the
`elasticsearch-core`  project.

This decoupling is a move so that we can eventually make things like the
high-level REST client not rely on the entire ES jar, only the parts it needs.

There are some pieces that are still not decoupled, in particular some of the
XContent tests still remain in the server project, this is because they test a
large portion of the pluggable xcontent pieces through
`XContentElasticsearchException`. They may be decoupled in future work.
Additionally, there may be more piecese that we want to move to the xcontent lib
in the future that are not part of this PR, this is a starting point.

Relates to #28504
This commit is contained in:
Lee Hinman 2018-04-02 15:58:31 -06:00 committed by GitHub
parent 1172b3b31b
commit 6b2167f462
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 395 additions and 176 deletions

View File

@ -196,6 +196,7 @@ subprojects {
"org.elasticsearch:elasticsearch-cli:${version}": ':server:cli',
"org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core',
"org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio',
"org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content',
"org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm',
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',

View File

@ -248,9 +248,7 @@
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutors.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadBarrier.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadContext.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentFactory.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentHelper.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]smile[/\\]SmileXContent.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]Discovery.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoverySettings.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscovery.java" checks="LineLength" />

View File

@ -73,6 +73,19 @@ public final class Booleans {
throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed.");
}
private static boolean hasText(CharSequence str) {
if (str == null || str.length() == 0) {
return false;
}
int strLen = str.length();
for (int i = 0; i < strLen; i++) {
if (!Character.isWhitespace(str.charAt(i))) {
return true;
}
}
return false;
}
/**
*
* @param value text to parse.
@ -80,14 +93,14 @@ public final class Booleans {
* @return see {@link #parseBoolean(String)}
*/
public static boolean parseBoolean(String value, boolean defaultValue) {
if (Strings.hasText(value)) {
if (hasText(value)) {
return parseBoolean(value);
}
return defaultValue;
}
public static Boolean parseBoolean(String value, Boolean defaultValue) {
if (Strings.hasText(value)) {
if (hasText(value)) {
return parseBoolean(value);
}
return defaultValue;

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
/**
* Utility class for glob-like matching
*/
public class Glob {
/**
* Match a String against the given pattern, supporting the following simple
* pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
* arbitrary number of pattern parts), as well as direct equality.
*
* @param pattern the pattern to match against
* @param str the String to match
* @return whether the String matches the given pattern
*/
public static boolean globMatch(String pattern, String str) {
if (pattern == null || str == null) {
return false;
}
int firstIndex = pattern.indexOf('*');
if (firstIndex == -1) {
return pattern.equals(str);
}
if (firstIndex == 0) {
if (pattern.length() == 1) {
return true;
}
int nextIndex = pattern.indexOf('*', firstIndex + 1);
if (nextIndex == -1) {
return str.endsWith(pattern.substring(1));
} else if (nextIndex == 1) {
// Double wildcard "**" - skipping the first "*"
return globMatch(pattern.substring(1), str);
}
String part = pattern.substring(1, nextIndex);
int partIndex = str.indexOf(part);
while (partIndex != -1) {
if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) {
return true;
}
partIndex = str.indexOf(part, partIndex + 1);
}
return false;
}
return (str.length() >= firstIndex &&
pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) &&
globMatch(pattern.substring(firstIndex), str.substring(firstIndex)));
}
}

View File

@ -0,0 +1,85 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.build'
apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'
archivesBaseName = 'elasticsearch-x-content'
publishing {
publications {
nebula {
artifactId = archivesBaseName
}
}
}
dependencies {
compile "org.elasticsearch:elasticsearch-core:${version}"
compile "org.yaml:snakeyaml:${versions.snakeyaml}"
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
if (isEclipse == false || project.path == ":libs:x-content-tests") {
testCompile("org.elasticsearch.test:framework:${version}") {
exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content'
}
}
}
forbiddenApisMain {
// x-content does not depend on server
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to core
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
}
if (isEclipse) {
// in eclipse the project is under a fake root, we need to change around the source sets
sourceSets {
if (project.path == ":libs:x-content") {
main.java.srcDirs = ['java']
main.resources.srcDirs = ['resources']
} else {
test.java.srcDirs = ['java']
test.resources.srcDirs = ['resources']
}
}
}
thirdPartyAudit.excludes = [
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
]
dependencyLicenses {
mapping from: /jackson-.*/, to: 'jackson'
}
jarHell.enabled = false

View File

@ -35,6 +35,8 @@ public class ParseField {
private String allReplacedWith = null;
private final String[] allNames;
private static final String[] EMPTY = new String[0];
/**
* @param name
* the primary name for this field. This will be returned by
@ -46,7 +48,7 @@ public class ParseField {
public ParseField(String name, String... deprecatedNames) {
this.name = name;
if (deprecatedNames == null || deprecatedNames.length == 0) {
this.deprecatedNames = Strings.EMPTY_ARRAY;
this.deprecatedNames = EMPTY;
} else {
final HashSet<String> set = new HashSet<>();
Collections.addAll(set, deprecatedNames);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.common.Booleans;
import java.io.IOException;
import java.util.Map;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.common.Booleans;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;

View File

@ -19,8 +19,6 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.Flushable;
@ -35,6 +33,7 @@ import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
@ -740,7 +739,9 @@ public final class XContentBuilder implements Closeable, Flushable {
//Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated as an Iterable here
value((Path) value);
} else if (value instanceof Map) {
map((Map<String,?>) value, ensureNoSelfReferences);
@SuppressWarnings("unchecked")
final Map<String, ?> valueMap = (Map<String, ?>) value;
map(valueMap, ensureNoSelfReferences);
} else if (value instanceof Iterable) {
value((Iterable<?>) value, ensureNoSelfReferences);
} else if (value instanceof Object[]) {
@ -799,7 +800,7 @@ public final class XContentBuilder implements Closeable, Flushable {
// checks that the map does not contain references to itself because
// iterating over map entries will cause a stackoverflow error
if (ensureNoSelfReferences) {
CollectionUtils.ensureNoSelfReferences(values);
ensureNoSelfReferences(values);
}
startObject();
@ -828,7 +829,7 @@ public final class XContentBuilder implements Closeable, Flushable {
// checks that the iterable does not contain references to itself because
// iterating over entries will cause a stackoverflow error
if (ensureNoSelfReferences) {
CollectionUtils.ensureNoSelfReferences(values);
ensureNoSelfReferences(values);
}
startArray();
for (Object value : values) {
@ -937,4 +938,39 @@ public final class XContentBuilder implements Closeable, Flushable {
throw new IllegalArgumentException(message);
}
}
private static void ensureNoSelfReferences(Object value) {
Iterable<?> it = convert(value);
if (it != null) {
ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>()));
}
}
private static Iterable<?> convert(Object value) {
if (value == null) {
return null;
}
if (value instanceof Map) {
return ((Map<?,?>) value).values();
} else if ((value instanceof Iterable) && (value instanceof Path == false)) {
return (Iterable<?>) value;
} else if (value instanceof Object[]) {
return Arrays.asList((Object[]) value);
} else {
return null;
}
}
private static void ensureNoSelfReferences(final Iterable<?> value, Object originalReference, final Set<Object> ancestors) {
if (value != null) {
if (ancestors.add(originalReference) == false) {
throw new IllegalArgumentException("Iterable object is self-referencing itself");
}
for (Object o : value) {
ensureNoSelfReferences(convert(o), o, ancestors);
}
ancestors.remove(originalReference);
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.common.xcontent;
import com.fasterxml.jackson.dataformat.cbor.CBORConstants;
import com.fasterxml.jackson.dataformat.smile.SmileConstants;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.xcontent.cbor.CborXContent;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.smile.SmileXContent;
@ -154,7 +153,8 @@ public class XContentFactory {
return XContentType.JSON;
}
// Should we throw a failure here? Smile idea is to use it in bytes....
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 &&
content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
return XContentType.SMILE;
}
if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') {
@ -186,7 +186,7 @@ public class XContentFactory {
public static XContent xContent(CharSequence content) {
XContentType type = xContentType(content);
if (type == null) {
throw new ElasticsearchParseException("Failed to derive xcontent");
throw new XContentParseException("Failed to derive xcontent");
}
return xContent(type);
}
@ -213,7 +213,7 @@ public class XContentFactory {
public static XContent xContent(byte[] data, int offset, int length) {
XContentType type = xContentType(data, offset, length);
if (type == null) {
throw new ElasticsearchParseException("Failed to derive xcontent");
throw new XContentParseException("Failed to derive xcontent");
}
return xContent(type);
}
@ -278,7 +278,8 @@ public class XContentFactory {
if (first == '{') {
return XContentType.JSON;
}
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 &&
bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
return XContentType.SMILE;
}
if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') {

View File

@ -103,6 +103,57 @@ public interface XContentGenerator extends Closeable, Flushable {
void copyCurrentStructure(XContentParser parser) throws IOException;
default void copyCurrentEvent(XContentParser parser) throws IOException {
switch (parser.currentToken()) {
case START_OBJECT:
writeStartObject();
break;
case END_OBJECT:
writeEndObject();
break;
case START_ARRAY:
writeStartArray();
break;
case END_ARRAY:
writeEndArray();
break;
case FIELD_NAME:
writeFieldName(parser.currentName());
break;
case VALUE_STRING:
if (parser.hasTextCharacters()) {
writeString(parser.textCharacters(), parser.textOffset(), parser.textLength());
} else {
writeString(parser.text());
}
break;
case VALUE_NUMBER:
switch (parser.numberType()) {
case INT:
writeNumber(parser.intValue());
break;
case LONG:
writeNumber(parser.longValue());
break;
case FLOAT:
writeNumber(parser.floatValue());
break;
case DOUBLE:
writeNumber(parser.doubleValue());
break;
}
break;
case VALUE_BOOLEAN:
writeBoolean(parser.booleanValue());
break;
case VALUE_NULL:
writeNull();
break;
case VALUE_EMBEDDED_OBJECT:
writeBinary(parser.binaryValue());
}
}
/**
* Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output.
*/

View File

@ -23,12 +23,12 @@ import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentGenerator;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
@ -70,7 +70,7 @@ public class CborXContent implements XContent {
@Override
public byte streamSeparator() {
throw new ElasticsearchParseException("cbor does not support stream parsing...");
throw new XContentParseException("cbor does not support stream parsing...");
}
@Override

View File

@ -28,16 +28,15 @@ import com.fasterxml.jackson.core.json.JsonWriteContext;
import com.fasterxml.jackson.core.util.DefaultIndenter;
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
import com.fasterxml.jackson.core.util.JsonGeneratorDelegate;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentGenerator;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter;
import org.elasticsearch.core.internal.io.IOUtils;
import java.io.BufferedInputStream;
import java.io.IOException;
@ -325,7 +324,7 @@ public class JsonXContentGenerator implements XContentGenerator {
} else {
writeStartRaw(name);
flush();
Streams.copy(content, os);
copyStream(content, os);
writeEndRaw();
}
}
@ -393,7 +392,40 @@ public class JsonXContentGenerator implements XContentGenerator {
if (parser instanceof JsonXContentParser) {
generator.copyCurrentStructure(((JsonXContentParser) parser).parser);
} else {
XContentHelper.copyCurrentStructure(this, parser);
copyCurrentStructure(this, parser);
}
}
/**
* Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}.
*/
private static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
// Let's handle field-name separately first
if (token == XContentParser.Token.FIELD_NAME) {
destination.writeFieldName(parser.currentName());
token = parser.nextToken();
// fall-through to copy the associated value
}
switch (token) {
case START_ARRAY:
destination.writeStartArray();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
copyCurrentStructure(destination, parser);
}
destination.writeEndArray();
break;
case START_OBJECT:
destination.writeStartObject();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
copyCurrentStructure(destination, parser);
}
destination.writeEndObject();
break;
default: // others are simple:
destination.copyCurrentEvent(parser);
}
}
@ -423,4 +455,37 @@ public class JsonXContentGenerator implements XContentGenerator {
public boolean isClosed() {
return generator.isClosed();
}
/**
* Copy the contents of the given InputStream to the given OutputStream.
* Closes both streams when done.
*
* @param in the stream to copy from
* @param out the stream to copy to
* @return the number of bytes copied
* @throws IOException in case of I/O errors
*/
private static long copyStream(InputStream in, OutputStream out) throws IOException {
Objects.requireNonNull(in, "No InputStream specified");
Objects.requireNonNull(out, "No OutputStream specified");
final byte[] buffer = new byte[8192];
boolean success = false;
try {
long byteCount = 0;
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
byteCount += bytesRead;
}
out.flush();
success = true;
return byteCount;
} finally {
if (success) {
IOUtils.close(in, out);
} else {
IOUtils.closeWhileHandlingException(in, out);
}
}
}
}

View File

@ -53,7 +53,8 @@ public class SmileXContent implements XContent {
static {
smileFactory = new SmileFactory();
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets
// for now, this is an overhead, might make sense for web sockets
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false);
smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method
smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);

View File

@ -19,14 +19,15 @@
package org.elasticsearch.common.xcontent.support;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.CharBuffer;
import java.util.ArrayList;
import java.util.HashMap;
@ -178,6 +179,34 @@ public abstract class AbstractXContentParser implements XContentParser {
protected abstract int doIntValue() throws IOException;
/** Return the long that {@code stringValue} stores or throws an exception if the
* stored value cannot be converted to a long that stores the exact same
* value and {@code coerce} is false. */
private static long toLong(String stringValue, boolean coerce) {
try {
return Long.parseLong(stringValue);
} catch (NumberFormatException e) {
// we will try again with BigDecimal
}
final BigInteger bigIntegerValue;
try {
BigDecimal bigDecimalValue = new BigDecimal(stringValue);
bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact();
} catch (ArithmeticException e) {
throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part");
} catch (NumberFormatException e) {
throw new IllegalArgumentException("For input string: \"" + stringValue + "\"");
}
if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 ||
bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long");
}
return bigIntegerValue.longValue();
}
@Override
public long longValue() throws IOException {
return longValue(DEFAULT_NUMBER_COERCE_POLICY);
@ -188,7 +217,7 @@ public abstract class AbstractXContentParser implements XContentParser {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Long.class);
return Numbers.toLong(text(), coerce);
return toLong(text(), coerce);
}
long result = doLongValue();
ensureNumberConversion(coerce, result, Long.class);
@ -369,7 +398,7 @@ public abstract class AbstractXContentParser implements XContentParser {
if (token == XContentParser.Token.START_ARRAY) {
token = parser.nextToken();
} else {
throw new ElasticsearchParseException("Failed to parse list: expecting "
throw new XContentParseException(parser.getTokenLocation(), "Failed to parse list: expecting "
+ XContentParser.Token.START_ARRAY + " but got " + token);
}

View File

@ -20,7 +20,7 @@
package org.elasticsearch.common.xcontent.support.filtering;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.Glob;
import java.util.ArrayList;
import java.util.List;
@ -49,7 +49,7 @@ public class FilterPath {
}
public FilterPath matchProperty(String name) {
if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) {
if ((next != null) && (simpleWildcard || doubleWildcard || Glob.globMatch(segment, name))) {
return next;
}
return null;

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.xcontent.support.filtering;
import com.fasterxml.jackson.core.filter.TokenFilter;
import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
@ -47,7 +46,7 @@ public class FilterPathBasedFilter extends TokenFilter {
private final boolean inclusive;
public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) {
if (CollectionUtils.isEmpty(filters)) {
if (filters == null || filters.length == 0) {
throw new IllegalArgumentException("filters cannot be null or empty");
}
this.inclusive = inclusive;

View File

@ -123,7 +123,7 @@ public class XContentParserTests extends ESTestCase {
readList(source);
fail("should have thrown a parse exception");
} catch (Exception e) {
assertThat(e, instanceOf(ElasticsearchParseException.class));
assertThat(e, instanceOf(XContentParseException.class));
assertThat(e.getMessage(), containsString("Failed to parse list"));
}
}

View File

@ -349,7 +349,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE, document)) {
parser.nextToken();
XContentHelper.copyCurrentStructure(builder.generator(), parser);
builder.generator().copyCurrentStructure(parser);
}
}
builder.endArray();

View File

@ -63,6 +63,7 @@ dependencies {
compile "org.elasticsearch:elasticsearch-core:${version}"
compile "org.elasticsearch:elasticsearch-secure-sm:${version}"
compile "org.elasticsearch:elasticsearch-x-content:${version}"
compileOnly project(':libs:plugin-classloader')
testRuntime project(':libs:plugin-classloader')
@ -91,13 +92,6 @@ dependencies {
// time handling, remove with java 8 time
compile 'joda-time:joda-time:2.9.9'
// json and yaml
compile "org.yaml:snakeyaml:${versions.snakeyaml}"
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
// percentiles aggregation
compile 'com.tdunning:t-digest:3.2'
// precentil ranks aggregation
@ -295,7 +289,6 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) {
dependencyLicenses {
mapping from: /lucene-.*/, to: 'lucene'
mapping from: /jackson-.*/, to: 'jackson'
dependencies = project.configurations.runtime.fileCollection {
it.group.startsWith('org.elasticsearch') == false ||
// keep the following org.elasticsearch jars in

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent;
/**
* Helpers for dealing with boolean values. Package-visible only so that only XContent classes use them.
*/
final class Booleans {
/**
* Parse {@code value} with values "true", "false", or null, returning the
* default value if null or the empty string is used. Any other input
* results in an {@link IllegalArgumentException} being thrown.
*/
static boolean parseBoolean(String value, Boolean defaultValue) {
if (value != null && value.length() > 0) {
switch (value) {
case "true":
return true;
case "false":
return false;
default:
throw new IllegalArgumentException("Failed to parse param [" + value + "] as only [true] or [false] are allowed.");
}
} else {
return defaultValue;
}
}
}

View File

@ -287,90 +287,6 @@ public class XContentHelper {
return true;
}
/**
* Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}.
*/
public static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
// Let's handle field-name separately first
if (token == XContentParser.Token.FIELD_NAME) {
destination.writeFieldName(parser.currentName());
token = parser.nextToken();
// fall-through to copy the associated value
}
switch (token) {
case START_ARRAY:
destination.writeStartArray();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
copyCurrentStructure(destination, parser);
}
destination.writeEndArray();
break;
case START_OBJECT:
destination.writeStartObject();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
copyCurrentStructure(destination, parser);
}
destination.writeEndObject();
break;
default: // others are simple:
copyCurrentEvent(destination, parser);
}
}
public static void copyCurrentEvent(XContentGenerator generator, XContentParser parser) throws IOException {
switch (parser.currentToken()) {
case START_OBJECT:
generator.writeStartObject();
break;
case END_OBJECT:
generator.writeEndObject();
break;
case START_ARRAY:
generator.writeStartArray();
break;
case END_ARRAY:
generator.writeEndArray();
break;
case FIELD_NAME:
generator.writeFieldName(parser.currentName());
break;
case VALUE_STRING:
if (parser.hasTextCharacters()) {
generator.writeString(parser.textCharacters(), parser.textOffset(), parser.textLength());
} else {
generator.writeString(parser.text());
}
break;
case VALUE_NUMBER:
switch (parser.numberType()) {
case INT:
generator.writeNumber(parser.intValue());
break;
case LONG:
generator.writeNumber(parser.longValue());
break;
case FLOAT:
generator.writeNumber(parser.floatValue());
break;
case DOUBLE:
generator.writeNumber(parser.doubleValue());
break;
}
break;
case VALUE_BOOLEAN:
generator.writeBoolean(parser.booleanValue());
break;
case VALUE_NULL:
generator.writeNull();
break;
case VALUE_EMBEDDED_OBJECT:
generator.writeBinary(parser.binaryValue());
}
}
/**
* Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using
* {@link XContentBuilder#rawField(String, InputStream)}.

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
@ -47,7 +48,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
PARSER.declareString(Builder::setId, new ParseField("id"));
PARSER.declareField((parser, builder, aVoid) -> {
XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent());
XContentHelper.copyCurrentStructure(contentBuilder.generator(), parser);
contentBuilder.generator().copyCurrentStructure(parser);
builder.setConfig(BytesReference.bytes(contentBuilder), contentBuilder.contentType());
}, new ParseField("config"), ObjectParser.ValueType.OBJECT);

View File

@ -25,7 +25,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanBoostQuery;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -56,6 +55,7 @@ import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentGenerator;
@ -63,6 +63,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.Index;
@ -425,7 +426,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
// We reached the place in the object tree where we want to insert a new object level
generator.writeStartObject();
generator.writeFieldName("newField");
XContentHelper.copyCurrentStructure(generator, parser);
generator.copyCurrentStructure(parser);
generator.writeEndObject();
if (hasArbitraryContent) {
@ -447,7 +448,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
}
// We are walking through the object tree, so we can safely copy the current node
XContentHelper.copyCurrentEvent(generator, parser);
generator.copyCurrentEvent(parser);
}
if (objectIndex < mutation) {

View File

@ -21,7 +21,6 @@ package org.elasticsearch.test.hamcrest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.ESTestCase;
@ -59,7 +58,7 @@ public class ElasticsearchAssertionsTests extends ESTestCase {
try (XContentBuilder copy = JsonXContent.contentBuilder();
XContentParser parser = createParser(original.contentType().xContent(), BytesReference.bytes(original))) {
parser.nextToken();
XContentHelper.copyCurrentStructure(copy.generator(), parser);
copy.generator().copyCurrentStructure(parser);
try (XContentBuilder copyShuffled = shuffleXContent(copy) ) {
assertToXContentEquivalent(BytesReference.bytes(original), BytesReference.bytes(copyShuffled), original.contentType());
}