Merge pull request #11279 from jpountz/fix/simplify_compression
Internal: tighten up our compression framework.
This commit is contained in:
commit
0f3206e60c
|
@ -43,7 +43,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -1080,7 +1080,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// to find a _meta document
|
||||
// So we have no choice but to index first and send mappings afterwards
|
||||
MapperService mapperService = indexShard.indexService().mapperService();
|
||||
mapperService.merge(request.type(), new CompressedString(update.toBytes()), true);
|
||||
mapperService.merge(request.type(), new CompressedXContent(update.toBytes()), true);
|
||||
created = operation.execute(indexShard);
|
||||
mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update);
|
||||
} else {
|
||||
|
|
|
@ -20,8 +20,14 @@
|
|||
package org.elasticsearch.client.transport;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.client.support.AbstractClient;
|
||||
import org.elasticsearch.client.support.Headers;
|
||||
|
@ -30,7 +36,6 @@ import org.elasticsearch.cluster.ClusterNameModule;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
|
@ -122,8 +127,6 @@ public class TransportClient extends AbstractClient {
|
|||
|
||||
Version version = Version.CURRENT;
|
||||
|
||||
CompressorFactory.configure(this.settings);
|
||||
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
|
||||
boolean success = false;
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -422,7 +422,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
builder.endObject();
|
||||
|
||||
builder.startObject("mappings");
|
||||
for (ObjectObjectCursor<String, CompressedString> cursor1 : templateMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, CompressedXContent> cursor1 : templateMetaData.mappings()) {
|
||||
byte[] mappingSource = cursor1.value.uncompressed();
|
||||
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
|
||||
Map<String, Object> mapping = parser.map();
|
||||
|
|
|
@ -23,7 +23,7 @@ import com.google.common.collect.ImmutableSet;
|
|||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -45,7 +45,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
|
||||
private final String alias;
|
||||
|
||||
private final CompressedString filter;
|
||||
private final CompressedXContent filter;
|
||||
|
||||
private final String indexRouting;
|
||||
|
||||
|
@ -53,7 +53,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
|
||||
private final Set<String> searchRoutingValues;
|
||||
|
||||
private AliasMetaData(String alias, CompressedString filter, String indexRouting, String searchRouting) {
|
||||
private AliasMetaData(String alias, CompressedXContent filter, String indexRouting, String searchRouting) {
|
||||
this.alias = alias;
|
||||
this.filter = filter;
|
||||
this.indexRouting = indexRouting;
|
||||
|
@ -77,11 +77,11 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
return alias();
|
||||
}
|
||||
|
||||
public CompressedString filter() {
|
||||
public CompressedXContent filter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
public CompressedString getFilter() {
|
||||
public CompressedXContent getFilter() {
|
||||
return filter();
|
||||
}
|
||||
|
||||
|
@ -176,9 +176,9 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
@Override
|
||||
public AliasMetaData readFrom(StreamInput in) throws IOException {
|
||||
String alias = in.readString();
|
||||
CompressedString filter = null;
|
||||
CompressedXContent filter = null;
|
||||
if (in.readBoolean()) {
|
||||
filter = CompressedString.readCompressedString(in);
|
||||
filter = CompressedXContent.readCompressedString(in);
|
||||
}
|
||||
String indexRouting = null;
|
||||
if (in.readBoolean()) {
|
||||
|
@ -195,7 +195,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
|
||||
private final String alias;
|
||||
|
||||
private CompressedString filter;
|
||||
private CompressedXContent filter;
|
||||
|
||||
private String indexRouting;
|
||||
|
||||
|
@ -217,7 +217,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
return alias;
|
||||
}
|
||||
|
||||
public Builder filter(CompressedString filter) {
|
||||
public Builder filter(CompressedXContent filter) {
|
||||
this.filter = filter;
|
||||
return this;
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().map(filter);
|
||||
this.filter = new CompressedString(builder.bytes());
|
||||
this.filter = new CompressedXContent(builder.bytes());
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
|
||||
|
@ -324,7 +324,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
}
|
||||
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
if ("filter".equals(currentFieldName)) {
|
||||
builder.filter(new CompressedString(parser.binaryValue()));
|
||||
builder.filter(new CompressedXContent(parser.binaryValue()));
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if ("routing".equals(currentFieldName)) {
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -874,7 +874,7 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
if ("mappings".equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
builder.putMapping(new MappingMetaData(new CompressedString(parser.binaryValue())));
|
||||
builder.putMapping(new MappingMetaData(new CompressedXContent(parser.binaryValue())));
|
||||
} else {
|
||||
Map<String, Object> mapping = parser.mapOrdered();
|
||||
if (mapping.size() == 1) {
|
||||
|
|
|
@ -24,7 +24,7 @@ import com.google.common.collect.Sets;
|
|||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -54,13 +54,13 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
private final Settings settings;
|
||||
|
||||
// the mapping source should always include the type as top level
|
||||
private final ImmutableOpenMap<String, CompressedString> mappings;
|
||||
private final ImmutableOpenMap<String, CompressedXContent> mappings;
|
||||
|
||||
private final ImmutableOpenMap<String, AliasMetaData> aliases;
|
||||
|
||||
private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
|
||||
|
||||
public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedString> mappings,
|
||||
public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedXContent> mappings,
|
||||
ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
|
||||
this.name = name;
|
||||
this.order = order;
|
||||
|
@ -103,11 +103,11 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
return settings();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, CompressedString> mappings() {
|
||||
public ImmutableOpenMap<String, CompressedXContent> mappings() {
|
||||
return this.mappings;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, CompressedString> getMappings() {
|
||||
public ImmutableOpenMap<String, CompressedXContent> getMappings() {
|
||||
return this.mappings;
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
builder.settings(Settings.readSettingsFromStream(in));
|
||||
int mappingsSize = in.readVInt();
|
||||
for (int i = 0; i < mappingsSize; i++) {
|
||||
builder.putMapping(in.readString(), CompressedString.readCompressedString(in));
|
||||
builder.putMapping(in.readString(), CompressedXContent.readCompressedString(in));
|
||||
}
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
|
@ -193,7 +193,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
out.writeString(template);
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
out.writeVInt(mappings.size());
|
||||
for (ObjectObjectCursor<String, CompressedString> cursor : mappings) {
|
||||
for (ObjectObjectCursor<String, CompressedXContent> cursor : mappings) {
|
||||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
|
||||
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
private final ImmutableOpenMap.Builder<String, CompressedString> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, CompressedXContent> mappings;
|
||||
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
|
||||
|
@ -276,13 +276,13 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder putMapping(String mappingType, CompressedString mappingSource) throws IOException {
|
||||
public Builder putMapping(String mappingType, CompressedXContent mappingSource) throws IOException {
|
||||
mappings.put(mappingType, mappingSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder putMapping(String mappingType, String mappingSource) throws IOException {
|
||||
mappings.put(mappingType, new CompressedString(mappingSource));
|
||||
mappings.put(mappingType, new CompressedXContent(mappingSource));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
|
||||
if (params.paramAsBoolean("reduce_mappings", false)) {
|
||||
builder.startObject("mappings");
|
||||
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, CompressedXContent> cursor : indexTemplateMetaData.mappings()) {
|
||||
byte[] mappingSource = cursor.value.uncompressed();
|
||||
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
|
||||
Map<String, Object> mapping = parser.map();
|
||||
|
@ -341,7 +341,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
builder.endObject();
|
||||
} else {
|
||||
builder.startArray("mappings");
|
||||
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, CompressedXContent> cursor : indexTemplateMetaData.mappings()) {
|
||||
byte[] data = cursor.value.uncompressed();
|
||||
XContentParser parser = XContentFactory.xContent(data).createParser(data);
|
||||
Map<String, Object> mapping = parser.mapOrderedAndClose();
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.TimestampParsingException;
|
|||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
|
@ -276,7 +276,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
private final String type;
|
||||
|
||||
private final CompressedString source;
|
||||
private final CompressedXContent source;
|
||||
|
||||
private Id id;
|
||||
private Routing routing;
|
||||
|
@ -294,9 +294,9 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
this.hasParentField = docMapper.parentFieldMapper().active();
|
||||
}
|
||||
|
||||
public MappingMetaData(CompressedString mapping) throws IOException {
|
||||
public MappingMetaData(CompressedXContent mapping) throws IOException {
|
||||
this.source = mapping;
|
||||
Map<String, Object> mappingMap = XContentHelper.createParser(mapping.compressed(), 0, mapping.compressed().length).mapOrderedAndClose();
|
||||
Map<String, Object> mappingMap = XContentHelper.createParser(mapping.compressedReference()).mapOrderedAndClose();
|
||||
if (mappingMap.size() != 1) {
|
||||
throw new IllegalStateException("Can't derive type from mapping, no root type: " + mapping.string());
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
public MappingMetaData(String type, Map<String, Object> mapping) throws IOException {
|
||||
this.type = type;
|
||||
XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().map(mapping);
|
||||
this.source = new CompressedString(mappingBuilder.bytes());
|
||||
this.source = new CompressedXContent(mappingBuilder.bytes());
|
||||
Map<String, Object> withoutType = mapping;
|
||||
if (mapping.size() == 1 && mapping.containsKey(type)) {
|
||||
withoutType = (Map<String, Object>) mapping.get(type);
|
||||
|
@ -322,7 +322,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
private MappingMetaData() {
|
||||
this.type = "";
|
||||
try {
|
||||
this.source = new CompressedString("");
|
||||
this.source = new CompressedXContent("{}");
|
||||
} catch (IOException ex) {
|
||||
throw new IllegalStateException("Cannot create MappingMetaData prototype", ex);
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
}
|
||||
}
|
||||
|
||||
public MappingMetaData(String type, CompressedString source, Id id, Routing routing, Timestamp timestamp, boolean hasParentField) {
|
||||
public MappingMetaData(String type, CompressedXContent source, Id id, Routing routing, Timestamp timestamp, boolean hasParentField) {
|
||||
this.type = type;
|
||||
this.source = source;
|
||||
this.id = id;
|
||||
|
@ -418,7 +418,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
return this.type;
|
||||
}
|
||||
|
||||
public CompressedString source() {
|
||||
public CompressedXContent source() {
|
||||
return this.source;
|
||||
}
|
||||
|
||||
|
@ -430,7 +430,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
* Converts the serialized compressed form of the mappings into a parsed map.
|
||||
*/
|
||||
public Map<String, Object> sourceAsMap() throws IOException {
|
||||
Map<String, Object> mapping = XContentHelper.convertToMap(source.compressed(), 0, source.compressed().length, true).v2();
|
||||
Map<String, Object> mapping = XContentHelper.convertToMap(source.compressedReference(), true).v2();
|
||||
if (mapping.size() == 1 && mapping.containsKey(type())) {
|
||||
// the type name is the root value, reduce it
|
||||
mapping = (Map<String, Object>) mapping.get(type());
|
||||
|
@ -599,7 +599,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
public MappingMetaData readFrom(StreamInput in) throws IOException {
|
||||
String type = in.readString();
|
||||
CompressedString source = CompressedString.readCompressedString(in);
|
||||
CompressedXContent source = CompressedXContent.readCompressedString(in);
|
||||
// id
|
||||
Id id = new Id(in.readBoolean() ? in.readString() : null);
|
||||
// routing
|
||||
|
|
|
@ -46,7 +46,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
@ -252,7 +252,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
// apply templates, merging the mappings into the request mapping if exists
|
||||
for (IndexTemplateMetaData template : templates) {
|
||||
templateNames.add(template.getName());
|
||||
for (ObjectObjectCursor<String, CompressedString> cursor : template.mappings()) {
|
||||
for (ObjectObjectCursor<String, CompressedXContent> cursor : template.mappings()) {
|
||||
if (mappings.containsKey(cursor.key)) {
|
||||
XContentHelper.mergeDefaults(mappings.get(cursor.key), parseMapping(cursor.value.string()));
|
||||
} else {
|
||||
|
@ -355,7 +355,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
// first, add the default mapping
|
||||
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
try {
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false);
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false);
|
||||
} catch (Exception e) {
|
||||
removalReason = "failed on parsing default mapping on index creation";
|
||||
throw new MapperParsingException("mapping [" + MapperService.DEFAULT_MAPPING + "]", e);
|
||||
|
@ -367,7 +367,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
try {
|
||||
// apply the default here, its the first time we parse it
|
||||
mapperService.merge(entry.getKey(), new CompressedString(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true);
|
||||
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true);
|
||||
} catch (Exception e) {
|
||||
removalReason = "failed on parsing mappings on index creation";
|
||||
throw new MapperParsingException("mapping [" + entry.getKey() + "]", e);
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -91,11 +91,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
|
||||
static class UpdateTask extends MappingTask {
|
||||
final String type;
|
||||
final CompressedString mappingSource;
|
||||
final CompressedXContent mappingSource;
|
||||
final String nodeId; // null fr unknown
|
||||
final ActionListener<ClusterStateUpdateResponse> listener;
|
||||
|
||||
UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
super(index, indexUUID);
|
||||
this.type = type;
|
||||
this.mappingSource = mappingSource;
|
||||
|
@ -254,7 +254,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
UpdateTask updateTask = (UpdateTask) task;
|
||||
try {
|
||||
String type = updateTask.type;
|
||||
CompressedString mappingSource = updateTask.mappingSource;
|
||||
CompressedXContent mappingSource = updateTask.mappingSource;
|
||||
|
||||
MappingMetaData mappingMetaData = builder.mapping(type);
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
|
||||
|
@ -376,9 +376,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), false);
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), existingMapper == null);
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true);
|
||||
|
@ -415,12 +415,12 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
CompressedString existingSource = null;
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false);
|
||||
CompressedString updatedSource = mergedMapper.mappingSource();
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
|
|
|
@ -352,6 +352,7 @@ public class PagedBytesReference implements BytesReference {
|
|||
private final int offset;
|
||||
private final int length;
|
||||
private int pos;
|
||||
private int mark;
|
||||
|
||||
public PagedBytesReferenceStreamInput(ByteArray bytearray, int offset, int length) {
|
||||
this.bytearray = bytearray;
|
||||
|
@ -420,9 +421,19 @@ public class PagedBytesReference implements BytesReference {
|
|||
return copiedBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mark(int readlimit) {
|
||||
this.mark = pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
pos = 0;
|
||||
pos = mark;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -30,10 +30,9 @@ import java.io.IOException;
|
|||
* @deprecated Used only for backward comp. to read old compressed files, since we now use codec based compression
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class CompressedIndexInput<T extends CompressorContext> extends IndexInput {
|
||||
public abstract class CompressedIndexInput extends IndexInput {
|
||||
|
||||
private IndexInput in;
|
||||
protected final T context;
|
||||
|
||||
private int version;
|
||||
private long totalUncompressedLength;
|
||||
|
@ -48,10 +47,9 @@ public abstract class CompressedIndexInput<T extends CompressorContext> extends
|
|||
private int currentOffsetIdx;
|
||||
private long currentUncompressedChunkPointer;
|
||||
|
||||
public CompressedIndexInput(IndexInput in, T context) throws IOException {
|
||||
public CompressedIndexInput(IndexInput in) throws IOException {
|
||||
super("compressed(" + in.toString() + ")");
|
||||
this.in = in;
|
||||
this.context = context;
|
||||
readHeader(in);
|
||||
this.version = in.readInt();
|
||||
long metaDataPosition = in.readLong();
|
||||
|
|
|
@ -27,10 +27,9 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
*/
|
||||
public abstract class CompressedStreamInput<T extends CompressorContext> extends StreamInput {
|
||||
public abstract class CompressedStreamInput extends StreamInput {
|
||||
|
||||
private final StreamInput in;
|
||||
protected final CompressorContext context;
|
||||
|
||||
private boolean closed;
|
||||
|
||||
|
@ -38,9 +37,8 @@ public abstract class CompressedStreamInput<T extends CompressorContext> extends
|
|||
private int position = 0;
|
||||
private int valid = 0;
|
||||
|
||||
public CompressedStreamInput(StreamInput in, T context) throws IOException {
|
||||
public CompressedStreamInput(StreamInput in) throws IOException {
|
||||
this.in = in;
|
||||
this.context = context;
|
||||
super.setVersion(in.getVersion());
|
||||
readHeader(in);
|
||||
}
|
||||
|
@ -51,13 +49,6 @@ public abstract class CompressedStreamInput<T extends CompressorContext> extends
|
|||
return super.setVersion(version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert!, resets to buffer start, without the need to decompress it again.
|
||||
*/
|
||||
public void resetToBufferStart() {
|
||||
this.position = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Method is overridden to report number of bytes that can now be read
|
||||
* from decoded data buffer, without reading bytes from the underlying
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.common.compress;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -34,33 +35,32 @@ import java.util.Arrays;
|
|||
* memory. Note that the compressed string might still sometimes need to be
|
||||
* decompressed in order to perform equality checks or to compute hash codes.
|
||||
*/
|
||||
public final class CompressedString {
|
||||
public final class CompressedXContent {
|
||||
|
||||
private final byte[] bytes;
|
||||
private int hashCode;
|
||||
|
||||
public CompressedString(BytesReference data) throws IOException {
|
||||
public CompressedXContent(BytesReference data) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(data);
|
||||
if (compressor != null) {
|
||||
// already compressed...
|
||||
this.bytes = data.toBytes();
|
||||
} else {
|
||||
BytesArray bytesArray = data.toBytesArray();
|
||||
this.bytes = CompressorFactory.defaultCompressor().compress(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
|
||||
assert CompressorFactory.compressor(bytes) != null;
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
try (StreamOutput compressedOutput = CompressorFactory.defaultCompressor().streamOutput(out)) {
|
||||
data.writeTo(compressedOutput);
|
||||
}
|
||||
this.bytes = out.bytes().toBytes();
|
||||
assert CompressorFactory.compressor(new BytesArray(bytes)) != null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public CompressedString(byte[] data, int offset, int length) throws IOException {
|
||||
this(new BytesArray(data, offset, length));
|
||||
public CompressedXContent(byte[] data) throws IOException {
|
||||
this(new BytesArray(data));
|
||||
}
|
||||
|
||||
public CompressedString(byte[] data) throws IOException {
|
||||
this(data, 0, data.length);
|
||||
}
|
||||
|
||||
public CompressedString(String str) throws IOException {
|
||||
public CompressedXContent(String str) throws IOException {
|
||||
this(new BytesArray(new BytesRef(str)));
|
||||
}
|
||||
|
||||
|
@ -69,12 +69,15 @@ public final class CompressedString {
|
|||
return this.bytes;
|
||||
}
|
||||
|
||||
/** Return the compressed bytes as a {@link BytesReference}. */
|
||||
public BytesReference compressedReference() {
|
||||
return new BytesArray(bytes);
|
||||
}
|
||||
|
||||
/** Return the uncompressed bytes. */
|
||||
public byte[] uncompressed() {
|
||||
Compressor compressor = CompressorFactory.compressor(bytes);
|
||||
assert compressor != null;
|
||||
try {
|
||||
return compressor.uncompress(bytes, 0, bytes.length);
|
||||
return CompressorFactory.uncompress(new BytesArray(bytes)).toBytes();
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Cannot decompress compressed string", e);
|
||||
}
|
||||
|
@ -84,10 +87,10 @@ public final class CompressedString {
|
|||
return new BytesRef(uncompressed()).utf8ToString();
|
||||
}
|
||||
|
||||
public static CompressedString readCompressedString(StreamInput in) throws IOException {
|
||||
public static CompressedXContent readCompressedString(StreamInput in) throws IOException {
|
||||
byte[] bytes = new byte[in.readVInt()];
|
||||
in.readBytes(bytes, 0, bytes.length);
|
||||
return new CompressedString(bytes);
|
||||
return new CompressedXContent(bytes);
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
@ -100,7 +103,7 @@ public final class CompressedString {
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
CompressedString that = (CompressedString) o;
|
||||
CompressedXContent that = (CompressedXContent) o;
|
||||
|
||||
if (Arrays.equals(compressed(), that.compressed())) {
|
||||
return true;
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,32 +31,20 @@ import java.io.IOException;
|
|||
*/
|
||||
public interface Compressor {
|
||||
|
||||
String type();
|
||||
|
||||
void configure(Settings settings);
|
||||
|
||||
boolean isCompressed(BytesReference bytes);
|
||||
|
||||
boolean isCompressed(byte[] data, int offset, int length);
|
||||
|
||||
boolean isCompressed(ChannelBuffer buffer);
|
||||
|
||||
StreamInput streamInput(StreamInput in) throws IOException;
|
||||
|
||||
StreamOutput streamOutput(StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Used for backward comp. since we now use Lucene compressed codec.
|
||||
*/
|
||||
@Deprecated
|
||||
boolean isCompressed(IndexInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
|
||||
*/
|
||||
byte[] uncompress(byte[] data, int offset, int length) throws IOException;
|
||||
|
||||
/**
|
||||
* Compresses the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
|
||||
*/
|
||||
byte[] compress(byte[] data, int offset, int length) throws IOException;
|
||||
|
||||
CompressedStreamInput streamInput(StreamInput in) throws IOException;
|
||||
|
||||
CompressedStreamOutput streamOutput(StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Used for backward comp. since we now use Lucene compressed codec.
|
||||
*/
|
||||
|
|
|
@ -19,68 +19,36 @@
|
|||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.compress.lzf.LZFCompressor;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class CompressorFactory {
|
||||
|
||||
private static final LZFCompressor LZF = new LZFCompressor();
|
||||
|
||||
private static final Compressor[] compressors;
|
||||
private static final ImmutableMap<String, Compressor> compressorsByType;
|
||||
private static Compressor defaultCompressor;
|
||||
private static volatile Compressor defaultCompressor;
|
||||
|
||||
static {
|
||||
List<Compressor> compressorsX = Lists.newArrayList();
|
||||
compressorsX.add(LZF);
|
||||
|
||||
compressors = compressorsX.toArray(new Compressor[compressorsX.size()]);
|
||||
MapBuilder<String, Compressor> compressorsByTypeX = MapBuilder.newMapBuilder();
|
||||
for (Compressor compressor : compressors) {
|
||||
compressorsByTypeX.put(compressor.type(), compressor);
|
||||
}
|
||||
compressorsByType = compressorsByTypeX.immutableMap();
|
||||
|
||||
defaultCompressor = LZF;
|
||||
compressors = new Compressor[] {
|
||||
new LZFCompressor(),
|
||||
new DeflateCompressor()
|
||||
};
|
||||
defaultCompressor = new DeflateCompressor();
|
||||
}
|
||||
|
||||
public static synchronized void configure(Settings settings) {
|
||||
for (Compressor compressor : compressors) {
|
||||
compressor.configure(settings);
|
||||
}
|
||||
String defaultType = settings.get("compress.default.type", "lzf").toLowerCase(Locale.ENGLISH);
|
||||
boolean found = false;
|
||||
for (Compressor compressor : compressors) {
|
||||
if (defaultType.equalsIgnoreCase(compressor.type())) {
|
||||
defaultCompressor = compressor;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
Loggers.getLogger(CompressorFactory.class).warn("failed to find default type [{}]", defaultType);
|
||||
}
|
||||
}
|
||||
|
||||
public static synchronized void setDefaultCompressor(Compressor defaultCompressor) {
|
||||
public static void setDefaultCompressor(Compressor defaultCompressor) {
|
||||
CompressorFactory.defaultCompressor = defaultCompressor;
|
||||
}
|
||||
|
||||
|
@ -92,14 +60,10 @@ public class CompressorFactory {
|
|||
return compressor(bytes) != null;
|
||||
}
|
||||
|
||||
public static boolean isCompressed(byte[] data) {
|
||||
return compressor(data, 0, data.length) != null;
|
||||
}
|
||||
|
||||
public static boolean isCompressed(byte[] data, int offset, int length) {
|
||||
return compressor(data, offset, length) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated we don't compress lucene indexes anymore and rely on lucene codecs
|
||||
*/
|
||||
@Deprecated
|
||||
public static boolean isCompressed(IndexInput in) throws IOException {
|
||||
return compressor(in) != null;
|
||||
}
|
||||
|
@ -108,37 +72,35 @@ public class CompressorFactory {
|
|||
public static Compressor compressor(BytesReference bytes) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(bytes)) {
|
||||
// bytes should be either detected as compressed or as xcontent,
|
||||
// if we have bytes that can be either detected as compressed or
|
||||
// as a xcontent, we have a problem
|
||||
assert XContentFactory.xContentType(bytes) == null;
|
||||
return compressor;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static Compressor compressor(byte[] data) {
|
||||
return compressor(data, 0, data.length);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static Compressor compressor(byte[] data, int offset, int length) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(data, offset, length)) {
|
||||
return compressor;
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(bytes);
|
||||
if (contentType == null) {
|
||||
throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static Compressor compressor(ChannelBuffer buffer) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(buffer)) {
|
||||
return compressor;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
throw new NotCompressedException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated we don't compress lucene indexes anymore and rely on lucene codecs
|
||||
*/
|
||||
@Deprecated
|
||||
@Nullable
|
||||
public static Compressor compressor(IndexInput in) throws IOException {
|
||||
for (Compressor compressor : compressors) {
|
||||
|
@ -149,25 +111,35 @@ public class CompressorFactory {
|
|||
return null;
|
||||
}
|
||||
|
||||
public static Compressor compressor(String type) {
|
||||
return compressorsByType.get(type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
|
||||
*/
|
||||
public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException {
|
||||
Compressor compressor = compressor(bytes);
|
||||
BytesReference uncompressed;
|
||||
if (compressor != null) {
|
||||
if (bytes.hasArray()) {
|
||||
return new BytesArray(compressor.uncompress(bytes.array(), bytes.arrayOffset(), bytes.length()));
|
||||
}
|
||||
StreamInput compressed = compressor.streamInput(bytes.streamInput());
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
Streams.copy(compressed, bStream);
|
||||
compressed.close();
|
||||
return bStream.bytes();
|
||||
uncompressed = uncompress(bytes, compressor);
|
||||
} else {
|
||||
uncompressed = bytes;
|
||||
}
|
||||
return bytes;
|
||||
|
||||
return uncompressed;
|
||||
}
|
||||
|
||||
/** Decompress the provided {@link BytesReference}. */
|
||||
public static BytesReference uncompress(BytesReference bytes) throws IOException {
|
||||
Compressor compressor = compressor(bytes);
|
||||
if (compressor == null) {
|
||||
throw new NotCompressedException();
|
||||
}
|
||||
return uncompress(bytes, compressor);
|
||||
}
|
||||
|
||||
private static BytesReference uncompress(BytesReference bytes, Compressor compressor) throws IOException {
|
||||
StreamInput compressed = compressor.streamInput(bytes.streamInput());
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
Streams.copy(compressed, bStream);
|
||||
compressed.close();
|
||||
return bStream.bytes();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,13 @@
|
|||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
/**
|
||||
*/
|
||||
public interface CompressorContext {
|
||||
/** Exception indicating that we were expecting something compressed, which
|
||||
* was not compressed or corrupted so that the compression format could not
|
||||
* be detected. */
|
||||
public class NotCompressedException extends RuntimeException {
|
||||
|
||||
public NotCompressedException() {
|
||||
super();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
|
||||
/** Exception indicating that we were expecting some {@link XContent} but could
|
||||
* not detect its type. */
|
||||
public class NotXContentException extends RuntimeException {
|
||||
|
||||
public NotXContentException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.zip.Deflater;
|
||||
import java.util.zip.DeflaterOutputStream;
|
||||
import java.util.zip.Inflater;
|
||||
import java.util.zip.InflaterInputStream;
|
||||
|
||||
/**
|
||||
* {@link Compressor} implementation based on the DEFLATE compression algorithm.
|
||||
*/
|
||||
public class DeflateCompressor implements Compressor {
|
||||
|
||||
// An arbitrary header that we use to identify compressed streams
|
||||
// It needs to be different from other compressors and to not be specific
|
||||
// enough so that no stream starting with these bytes could be detected as
|
||||
// a XContent
|
||||
private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' };
|
||||
// 3 is a good trade-off between speed and compression ratio
|
||||
private static final int LEVEL = 3;
|
||||
// We use buffering on the input and ouput of in/def-laters in order to
|
||||
// limit the number of JNI calls
|
||||
private static final int BUFFER_SIZE = 4096;
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(BytesReference bytes) {
|
||||
if (bytes.length() < HEADER.length) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < HEADER.length; ++i) {
|
||||
if (bytes.get(i) != HEADER[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
if (buffer.readableBytes() < HEADER.length) {
|
||||
return false;
|
||||
}
|
||||
final int offset = buffer.readerIndex();
|
||||
for (int i = 0; i < HEADER.length; ++i) {
|
||||
if (buffer.getByte(offset + i) != HEADER[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
final byte[] headerBytes = new byte[HEADER.length];
|
||||
int len = 0;
|
||||
while (len < headerBytes.length) {
|
||||
final int read = in.read(headerBytes, len, headerBytes.length - len);
|
||||
if (read == -1) {
|
||||
break;
|
||||
}
|
||||
len += read;
|
||||
}
|
||||
if (len != HEADER.length || Arrays.equals(headerBytes, HEADER) == false) {
|
||||
throw new IllegalArgumentException("Input stream is not compressed with DEFLATE!");
|
||||
}
|
||||
|
||||
final boolean nowrap = true;
|
||||
final Inflater inflater = new Inflater(nowrap);
|
||||
InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE);
|
||||
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
|
||||
return new InputStreamStreamInput(decompressedIn) {
|
||||
private boolean closed = false;
|
||||
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
super.close();
|
||||
} finally {
|
||||
if (closed == false) {
|
||||
// important to release native memory
|
||||
inflater.end();
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
out.writeBytes(HEADER);
|
||||
final boolean nowrap = true;
|
||||
final Deflater deflater = new Deflater(LEVEL, nowrap);
|
||||
final boolean syncFlush = true;
|
||||
OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
|
||||
compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE);
|
||||
return new OutputStreamStreamOutput(compressedOut) {
|
||||
private boolean closed = false;
|
||||
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
super.close();
|
||||
} finally {
|
||||
if (closed == false) {
|
||||
// important to release native memory
|
||||
deflater.end();
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(IndexInput in) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
|
@ -32,14 +32,14 @@ import java.util.Arrays;
|
|||
/**
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressedIndexInput extends CompressedIndexInput<LZFCompressorContext> {
|
||||
public class LZFCompressedIndexInput extends CompressedIndexInput {
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in, LZFCompressorContext.INSTANCE);
|
||||
super(in);
|
||||
|
||||
this.decoder = decoder;
|
||||
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
*/
|
||||
public class LZFCompressedStreamInput extends CompressedStreamInput<LZFCompressorContext> {
|
||||
public class LZFCompressedStreamInput extends CompressedStreamInput {
|
||||
|
||||
private final BufferRecycler recycler;
|
||||
|
||||
|
@ -39,7 +39,7 @@ public class LZFCompressedStreamInput extends CompressedStreamInput<LZFCompresso
|
|||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in, LZFCompressorContext.INSTANCE);
|
||||
super(in);
|
||||
this.recycler = BufferRecycler.instance();
|
||||
this.decoder = decoder;
|
||||
|
||||
|
|
|
@ -21,30 +21,27 @@ package org.elasticsearch.common.compress.lzf;
|
|||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import com.ning.compress.lzf.LZFEncoder;
|
||||
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.compress.CompressedStreamOutput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link DeflateCompressor} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressor implements Compressor {
|
||||
|
||||
static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
|
||||
|
||||
public static final String TYPE = "lzf";
|
||||
|
||||
private ChunkDecoder decoder;
|
||||
|
||||
public LZFCompressor() {
|
||||
|
@ -53,14 +50,6 @@ public class LZFCompressor implements Compressor {
|
|||
this.decoder.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Settings settings) {}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
|
@ -69,14 +58,6 @@ public class LZFCompressor implements Compressor {
|
|||
(bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(byte[] data, int offset, int length) {
|
||||
return length >= 3 &&
|
||||
data[offset] == LZFChunk.BYTE_Z &&
|
||||
data[offset + 1] == LZFChunk.BYTE_V &&
|
||||
(data[offset + 2] == LZFChunk.BLOCK_TYPE_COMPRESSED || data[offset + 2] == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
int offset = buffer.readerIndex();
|
||||
|
@ -104,23 +85,13 @@ public class LZFCompressor implements Compressor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public byte[] uncompress(byte[] data, int offset, int length) throws IOException {
|
||||
return decoder.decode(data, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] compress(byte[] data, int offset, int length) throws IOException {
|
||||
return LZFEncoder.safeEncode(data, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedStreamInput streamInput(StreamInput in) throws IOException {
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
return new LZFCompressedStreamInput(in, decoder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedStreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
return new LZFCompressedStreamOutput(out);
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -59,6 +59,16 @@ public class InputStreamStreamInput extends StreamInput {
|
|||
is.reset();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return is.markSupported();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mark(int readlimit) {
|
||||
is.mark(readlimit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
is.close();
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent;
|
|||
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORConstants;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileConstants;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -163,6 +164,9 @@ public class XContentFactory {
|
|||
if (c == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (Character.isWhitespace(c) == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -204,65 +208,76 @@ public class XContentFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* Guesses the content type based on the provided input stream.
|
||||
* Guesses the content type based on the provided input stream without consuming it.
|
||||
*/
|
||||
public static XContentType xContentType(InputStream si) throws IOException {
|
||||
final int firstInt = si.read(); // this must be an int since we need to respect the method contract
|
||||
if (firstInt == -1) {
|
||||
return null;
|
||||
if (si.markSupported() == false) {
|
||||
throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass());
|
||||
}
|
||||
|
||||
final int secondInt = si.read(); // this must be an int since we need to respect the method contract
|
||||
if (secondInt == -1) {
|
||||
return null;
|
||||
}
|
||||
final byte first = (byte) (0xff & firstInt);
|
||||
final byte second = (byte) (0xff & secondInt);
|
||||
if (first == SmileConstants.HEADER_BYTE_1 && second == SmileConstants.HEADER_BYTE_2) {
|
||||
int third = si.read();
|
||||
if (third == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
}
|
||||
if (first == '{' || second == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (first == '-' && second == '-') {
|
||||
int third = si.read();
|
||||
if (third == '-') {
|
||||
return XContentType.YAML;
|
||||
}
|
||||
}
|
||||
// CBOR logic similar to CBORFactory#hasCBORFormat
|
||||
if (first == CBORConstants.BYTE_OBJECT_INDEFINITE){
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, first)) {
|
||||
// Actually, specific "self-describe tag" is a very good indicator
|
||||
int third = si.read();
|
||||
if (third == -1) {
|
||||
si.mark(GUESS_HEADER_LENGTH);
|
||||
try {
|
||||
final int firstInt = si.read(); // this must be an int since we need to respect the method contract
|
||||
if (firstInt == -1) {
|
||||
return null;
|
||||
}
|
||||
if (first == (byte) 0xD9 && second == (byte) 0xD9 && third == (byte) 0xF7) {
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
}
|
||||
// for small objects, some encoders just encode as major type object, we can safely
|
||||
// say its CBOR since it doesn't contradict SMILE or JSON, and its a last resort
|
||||
if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, first)) {
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
|
||||
for (int i = 2; i < GUESS_HEADER_LENGTH; i++) {
|
||||
int val = si.read();
|
||||
if (val == -1) {
|
||||
return null;
|
||||
final int secondInt = si.read(); // this must be an int since we need to respect the method contract
|
||||
if (secondInt == -1) {
|
||||
return null;
|
||||
}
|
||||
if (val == '{') {
|
||||
final byte first = (byte) (0xff & firstInt);
|
||||
final byte second = (byte) (0xff & secondInt);
|
||||
if (first == SmileConstants.HEADER_BYTE_1 && second == SmileConstants.HEADER_BYTE_2) {
|
||||
int third = si.read();
|
||||
if (third == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
}
|
||||
if (first == '{' || second == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (first == '-' && second == '-') {
|
||||
int third = si.read();
|
||||
if (third == '-') {
|
||||
return XContentType.YAML;
|
||||
}
|
||||
}
|
||||
// CBOR logic similar to CBORFactory#hasCBORFormat
|
||||
if (first == CBORConstants.BYTE_OBJECT_INDEFINITE){
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, first)) {
|
||||
// Actually, specific "self-describe tag" is a very good indicator
|
||||
int third = si.read();
|
||||
if (third == -1) {
|
||||
return null;
|
||||
}
|
||||
if (first == (byte) 0xD9 && second == (byte) 0xD9 && third == (byte) 0xF7) {
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
}
|
||||
// for small objects, some encoders just encode as major type object, we can safely
|
||||
// say its CBOR since it doesn't contradict SMILE or JSON, and its a last resort
|
||||
if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, first)) {
|
||||
return XContentType.CBOR;
|
||||
}
|
||||
|
||||
for (int i = 2; i < GUESS_HEADER_LENGTH; i++) {
|
||||
int val = si.read();
|
||||
if (val == -1) {
|
||||
return null;
|
||||
}
|
||||
if (val == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (Character.isWhitespace(val) == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
si.reset();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -284,7 +299,7 @@ public class XContentFactory {
|
|||
* Guesses the content type based on the provided bytes.
|
||||
*/
|
||||
public static XContentType xContentType(BytesReference bytes) {
|
||||
int length = bytes.length() < GUESS_HEADER_LENGTH ? bytes.length() : GUESS_HEADER_LENGTH;
|
||||
int length = bytes.length();
|
||||
if (length == 0) {
|
||||
return null;
|
||||
}
|
||||
|
@ -316,9 +331,13 @@ public class XContentFactory {
|
|||
|
||||
// a last chance for JSON
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (bytes.get(i) == '{') {
|
||||
byte b = bytes.get(i);
|
||||
if (b == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (Character.isWhitespace(b) == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -28,14 +28,14 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -49,45 +49,30 @@ import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
|||
public class XContentHelper {
|
||||
|
||||
public static XContentParser createParser(BytesReference bytes) throws IOException {
|
||||
if (bytes.hasArray()) {
|
||||
return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
|
||||
}
|
||||
Compressor compressor = CompressorFactory.compressor(bytes);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedInput = compressor.streamInput(bytes.streamInput());
|
||||
InputStream compressedInput = compressor.streamInput(bytes.streamInput());
|
||||
if (compressedInput.markSupported() == false) {
|
||||
compressedInput = new BufferedInputStream(compressedInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedInput);
|
||||
compressedInput.resetToBufferStart();
|
||||
return XContentFactory.xContent(contentType).createParser(compressedInput);
|
||||
} else {
|
||||
return XContentFactory.xContent(bytes).createParser(bytes.streamInput());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static XContentParser createParser(byte[] data, int offset, int length) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(data, offset, length);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedInput = compressor.streamInput(StreamInput.wrap(data, offset, length));
|
||||
XContentType contentType = XContentFactory.xContentType(compressedInput);
|
||||
compressedInput.resetToBufferStart();
|
||||
return XContentFactory.xContent(contentType).createParser(compressedInput);
|
||||
} else {
|
||||
return XContentFactory.xContent(data, offset, length).createParser(data, offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
public static Tuple<XContentType, Map<String, Object>> convertToMap(BytesReference bytes, boolean ordered) throws ElasticsearchParseException {
|
||||
if (bytes.hasArray()) {
|
||||
return convertToMap(bytes.array(), bytes.arrayOffset(), bytes.length(), ordered);
|
||||
}
|
||||
try {
|
||||
XContentParser parser;
|
||||
XContentType contentType;
|
||||
Compressor compressor = CompressorFactory.compressor(bytes);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedStreamInput = compressor.streamInput(bytes.streamInput());
|
||||
InputStream compressedStreamInput = compressor.streamInput(bytes.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
compressedStreamInput.resetToBufferStart();
|
||||
parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
|
||||
} else {
|
||||
contentType = XContentFactory.xContentType(bytes);
|
||||
|
@ -103,34 +88,6 @@ public class XContentHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static Tuple<XContentType, Map<String, Object>> convertToMap(byte[] data, boolean ordered) throws ElasticsearchParseException {
|
||||
return convertToMap(data, 0, data.length, ordered);
|
||||
}
|
||||
|
||||
public static Tuple<XContentType, Map<String, Object>> convertToMap(byte[] data, int offset, int length, boolean ordered) throws ElasticsearchParseException {
|
||||
try {
|
||||
XContentParser parser;
|
||||
XContentType contentType;
|
||||
Compressor compressor = CompressorFactory.compressor(data, offset, length);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedStreamInput = compressor.streamInput(StreamInput.wrap(data, offset, length));
|
||||
contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
compressedStreamInput.resetToBufferStart();
|
||||
parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
|
||||
} else {
|
||||
contentType = XContentFactory.xContentType(data, offset, length);
|
||||
parser = XContentFactory.xContent(contentType).createParser(data, offset, length);
|
||||
}
|
||||
if (ordered) {
|
||||
return Tuple.tuple(contentType, parser.mapOrderedAndClose());
|
||||
} else {
|
||||
return Tuple.tuple(contentType, parser.mapAndClose());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchParseException("Failed to parse content to map", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static String convertToJson(BytesReference bytes, boolean reformatJson) throws IOException {
|
||||
return convertToJson(bytes, reformatJson, false);
|
||||
}
|
||||
|
@ -426,9 +383,11 @@ public class XContentHelper {
|
|||
public static void writeDirect(BytesReference source, XContentBuilder rawBuilder, ToXContent.Params params) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
compressedStreamInput.resetToBufferStart();
|
||||
if (contentType == rawBuilder.contentType()) {
|
||||
Streams.copy(compressedStreamInput, rawBuilder.stream());
|
||||
} else {
|
||||
|
@ -457,9 +416,11 @@ public class XContentHelper {
|
|||
public static void writeRawField(String field, BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
compressedStreamInput.resetToBufferStart();
|
||||
if (contentType == builder.contentType()) {
|
||||
builder.rawField(field, compressedStreamInput);
|
||||
} else {
|
||||
|
|
|
@ -227,21 +227,21 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
|
||||
public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(true);
|
||||
clusterState.writeTo(stream);
|
||||
stream.close();
|
||||
try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(true);
|
||||
clusterState.writeTo(stream);
|
||||
}
|
||||
return bStream.bytes();
|
||||
}
|
||||
|
||||
public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(false);
|
||||
diff.writeTo(stream);
|
||||
stream.close();
|
||||
try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) {
|
||||
stream.setVersion(nodeVersion);
|
||||
stream.writeBoolean(false);
|
||||
diff.writeTo(stream);
|
||||
}
|
||||
return bStream.bytes();
|
||||
}
|
||||
|
||||
|
|
|
@ -21,16 +21,26 @@ package org.elasticsearch.gateway;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Collections2;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.OutputStreamIndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
@ -280,7 +290,7 @@ public abstract class MetaDataStateFormat<T> {
|
|||
logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath());
|
||||
continue;
|
||||
}
|
||||
parser = XContentHelper.createParser(data, 0, data.length);
|
||||
parser = XContentHelper.createParser(new BytesArray(data));
|
||||
state = fromXContent(parser);
|
||||
if (state == null) {
|
||||
logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath());
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index.aliases;
|
|||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -30,11 +30,11 @@ public class IndexAlias {
|
|||
|
||||
private final String alias;
|
||||
|
||||
private final CompressedString filter;
|
||||
private final CompressedXContent filter;
|
||||
|
||||
private final Query parsedFilter;
|
||||
|
||||
public IndexAlias(String alias, @Nullable CompressedString filter, @Nullable Query parsedFilter) {
|
||||
public IndexAlias(String alias, @Nullable CompressedXContent filter, @Nullable Query parsedFilter) {
|
||||
this.alias = alias;
|
||||
this.filter = filter;
|
||||
this.parsedFilter = parsedFilter;
|
||||
|
@ -45,7 +45,7 @@ public class IndexAlias {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public CompressedString filter() {
|
||||
public CompressedXContent filter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
@ -63,11 +63,11 @@ public class IndexAliasesService extends AbstractIndexComponent implements Itera
|
|||
return aliases.get(alias);
|
||||
}
|
||||
|
||||
public IndexAlias create(String alias, @Nullable CompressedString filter) {
|
||||
public IndexAlias create(String alias, @Nullable CompressedXContent filter) {
|
||||
return new IndexAlias(alias, filter, parse(alias, filter));
|
||||
}
|
||||
|
||||
public void add(String alias, @Nullable CompressedString filter) {
|
||||
public void add(String alias, @Nullable CompressedXContent filter) {
|
||||
add(new IndexAlias(alias, filter, parse(alias, filter)));
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ public class IndexAliasesService extends AbstractIndexComponent implements Itera
|
|||
aliases.remove(alias);
|
||||
}
|
||||
|
||||
private Query parse(String alias, CompressedString filter) {
|
||||
private Query parse(String alias, CompressedXContent filter) {
|
||||
if (filter == null) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -150,7 +150,7 @@ public class DocumentMapper implements ToXContent {
|
|||
private final String type;
|
||||
private final StringAndBytesText typeText;
|
||||
|
||||
private volatile CompressedString mappingSource;
|
||||
private volatile CompressedXContent mappingSource;
|
||||
|
||||
private final Mapping mapping;
|
||||
|
||||
|
@ -235,7 +235,7 @@ public class DocumentMapper implements ToXContent {
|
|||
return mapping.meta;
|
||||
}
|
||||
|
||||
public CompressedString mappingSource() {
|
||||
public CompressedXContent mappingSource() {
|
||||
return this.mappingSource;
|
||||
}
|
||||
|
||||
|
@ -468,12 +468,12 @@ public class DocumentMapper implements ToXContent {
|
|||
private void refreshSource() throws ElasticsearchGenerationException {
|
||||
try {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, CompressorFactory.defaultCompressor().streamOutput(bStream));
|
||||
builder.startObject();
|
||||
toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
builder.close();
|
||||
mappingSource = new CompressedString(bStream.bytes());
|
||||
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, CompressorFactory.defaultCompressor().streamOutput(bStream))) {
|
||||
builder.startObject();
|
||||
toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
}
|
||||
mappingSource = new CompressedXContent(bStream.bytes());
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchGenerationException("failed to serialize source for type [" + type + "]", e);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.geo.ShapesAvailability;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -194,15 +194,15 @@ public class DocumentMapperParser extends AbstractIndexComponent {
|
|||
return parse(type, mapping, defaultSource);
|
||||
}
|
||||
|
||||
public DocumentMapper parseCompressed(@Nullable String type, CompressedString source) throws MapperParsingException {
|
||||
public DocumentMapper parseCompressed(@Nullable String type, CompressedXContent source) throws MapperParsingException {
|
||||
return parseCompressed(type, source, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public DocumentMapper parseCompressed(@Nullable String type, CompressedString source, String defaultSource) throws MapperParsingException {
|
||||
public DocumentMapper parseCompressed(@Nullable String type, CompressedXContent source, String defaultSource) throws MapperParsingException {
|
||||
Map<String, Object> mapping = null;
|
||||
if (source != null) {
|
||||
Map<String, Object> root = XContentHelper.convertToMap(source.compressed(), true).v2();
|
||||
Map<String, Object> root = XContentHelper.convertToMap(source.compressedReference(), true).v2();
|
||||
Tuple<String, Map<String, Object>> t = extractMapping(type, root);
|
||||
type = t.v1();
|
||||
mapping = t.v2();
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
@ -214,7 +214,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
typeListeners.remove(listener);
|
||||
}
|
||||
|
||||
public DocumentMapper merge(String type, CompressedString mappingSource, boolean applyDefault) {
|
||||
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault) {
|
||||
if (DEFAULT_MAPPING.equals(type)) {
|
||||
// verify we can parse it
|
||||
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
|
||||
|
@ -293,7 +293,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
|
||||
private boolean assertSerialization(DocumentMapper mapper) {
|
||||
// capture the source now, it may change due to concurrent parsing
|
||||
final CompressedString mappingSource = mapper.mappingSource();
|
||||
final CompressedXContent mappingSource = mapper.mappingSource();
|
||||
DocumentMapper newMapper = parse(mapper.type(), mappingSource, false);
|
||||
|
||||
if (newMapper.mappingSource().equals(mappingSource) == false) {
|
||||
|
@ -328,7 +328,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
this.fieldMappers = this.fieldMappers.copyAndAddAll(fieldMappers);
|
||||
}
|
||||
|
||||
public DocumentMapper parse(String mappingType, CompressedString mappingSource, boolean applyDefault) throws MapperParsingException {
|
||||
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
|
||||
String defaultMappingSource;
|
||||
if (PercolatorService.TYPE_NAME.equals(mappingType)) {
|
||||
defaultMappingSource = this.defaultPercolatorMappingSource;
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.compress.NotXContentException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -144,10 +145,18 @@ public class BinaryFieldMapper extends AbstractFieldMapper {
|
|||
}
|
||||
try {
|
||||
if (indexCreatedBefore2x) {
|
||||
return CompressorFactory.uncompressIfNeeded(bytes);
|
||||
} else {
|
||||
return bytes;
|
||||
try {
|
||||
return CompressorFactory.uncompressIfNeeded(bytes);
|
||||
} catch (NotXContentException e) {
|
||||
// NOTE: previous versions of Elasticsearch used to try to detect if
|
||||
// data was compressed. However this could cause decompression failures
|
||||
// as a user may have submitted arbitrary data which looks like it is
|
||||
// compressed to elasticsearch but is not. So we removed the ability to
|
||||
// compress binary fields and keep this empty catch block for backward
|
||||
// compatibility with 1.x
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchParseException("failed to decompress source", e);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper.internal;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
|
@ -31,7 +32,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
|
@ -53,7 +53,9 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.RootMapper;
|
||||
import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -324,9 +326,11 @@ public class SourceFieldMapper extends AbstractFieldMapper implements RootMapper
|
|||
// see if we need to convert the content type
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
compressedStreamInput.resetToBufferStart();
|
||||
if (contentType != formatContentType) {
|
||||
// we need to reread and store back, compressed....
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.elasticsearch.cluster.routing.*;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -369,7 +369,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
|
||||
MappingMetaData mappingMd = cursor.value;
|
||||
String mappingType = mappingMd.type();
|
||||
CompressedString mappingSource = mappingMd.source();
|
||||
CompressedXContent mappingSource = mappingMd.source();
|
||||
if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first
|
||||
continue;
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
}
|
||||
|
||||
private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedString mappingSource) throws Throwable {
|
||||
private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable {
|
||||
if (!seenMappings.containsKey(new Tuple<>(index, mappingType))) {
|
||||
seenMappings.put(new Tuple<>(index, mappingType), true);
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
for (ObjectCursor<AliasMetaData> cursor : aliases) {
|
||||
AliasMetaData aliasMd = cursor.value;
|
||||
String alias = aliasMd.alias();
|
||||
CompressedString filter = aliasMd.filter();
|
||||
CompressedXContent filter = aliasMd.filter();
|
||||
try {
|
||||
if (!indexAliasesService.hasAlias(alias)) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.StopWatch;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
|
@ -151,7 +150,6 @@ public class Node implements Releasable {
|
|||
// create the environment based on the finalized (processed) view of the settings
|
||||
this.environment = new Environment(this.settings());
|
||||
|
||||
CompressorFactory.configure(settings);
|
||||
final NodeEnvironment nodeEnvironment;
|
||||
try {
|
||||
nodeEnvironment = new NodeEnvironment(this.settings, this.environment);
|
||||
|
|
|
@ -24,27 +24,35 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.io.ByteStreams;
|
||||
|
||||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.compress.NotXContentException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.IndexShardException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
|
@ -54,14 +62,21 @@ import org.elasticsearch.repositories.Repository;
|
|||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.RepositorySettings;
|
||||
import org.elasticsearch.repositories.RepositoryVerificationException;
|
||||
import org.elasticsearch.snapshots.*;
|
||||
import org.elasticsearch.snapshots.InvalidSnapshotNameException;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotCreationException;
|
||||
import org.elasticsearch.snapshots.SnapshotException;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotShardFailure;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
|
@ -229,19 +244,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
}
|
||||
// Write Global MetaData
|
||||
// TODO: Check if metadata needs to be written
|
||||
try (OutputStream output = snapshotsBlobContainer.createOutput(metaDataBlobName(snapshotId))) {
|
||||
try (StreamOutput output = compressIfNeeded(snapshotsBlobContainer.createOutput(metaDataBlobName(snapshotId)))) {
|
||||
writeGlobalMetaData(metaData, output);
|
||||
}
|
||||
for (String index : indices) {
|
||||
final IndexMetaData indexMetaData = metaData.index(index);
|
||||
final BlobPath indexPath = basePath().add("indices").add(index);
|
||||
final BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||
try (OutputStream output = indexMetaDataBlobContainer.createOutput(snapshotBlobName(snapshotId))) {
|
||||
StreamOutput stream = new OutputStreamStreamOutput(output);
|
||||
if (isCompress()) {
|
||||
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
|
||||
try (StreamOutput output = compressIfNeeded(indexMetaDataBlobContainer.createOutput(snapshotBlobName(snapshotId)))) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, output);
|
||||
builder.startObject();
|
||||
IndexMetaData.Builder.toXContent(indexMetaData, builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
@ -317,6 +328,22 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
}
|
||||
}
|
||||
|
||||
private StreamOutput compressIfNeeded(OutputStream output) throws IOException {
|
||||
StreamOutput out = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
out = new OutputStreamStreamOutput(output);
|
||||
if (isCompress()) {
|
||||
out = CompressorFactory.defaultCompressor().streamOutput(out);
|
||||
}
|
||||
success = true;
|
||||
return out;
|
||||
} finally {
|
||||
if (!success) {
|
||||
IOUtils.closeWhileHandlingException(out, output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
|
@ -327,7 +354,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
String tempBlobName = tempSnapshotBlobName(snapshotId);
|
||||
String blobName = snapshotBlobName(snapshotId);
|
||||
Snapshot blobStoreSnapshot = new Snapshot(snapshotId.getSnapshot(), indices, startTime, failure, System.currentTimeMillis(), totalShards, shardFailures);
|
||||
try (OutputStream output = snapshotsBlobContainer.createOutput(tempBlobName)) {
|
||||
try (StreamOutput output = compressIfNeeded(snapshotsBlobContainer.createOutput(tempBlobName))) {
|
||||
writeSnapshot(blobStoreSnapshot, output);
|
||||
}
|
||||
snapshotsBlobContainer.move(tempBlobName, blobName);
|
||||
|
@ -386,7 +413,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
}
|
||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||
throw new SnapshotMissingException(snapshotId, ex);
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException | NotXContentException ex) {
|
||||
throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
|
||||
}
|
||||
}
|
||||
|
@ -407,7 +434,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||
try (InputStream blob = indexMetaDataBlobContainer.openInput(snapshotBlobName(snapshotId))) {
|
||||
byte[] data = ByteStreams.toByteArray(blob);
|
||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||
try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) {
|
||||
XContentParser.Token token;
|
||||
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
|
||||
IndexMetaData indexMetaData = IndexMetaData.Builder.fromXContent(parser);
|
||||
|
@ -459,7 +486,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
* @throws IOException parse exceptions
|
||||
*/
|
||||
public Snapshot readSnapshot(byte[] data) throws IOException {
|
||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||
try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) {
|
||||
XContentParser.Token token;
|
||||
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
|
||||
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -484,7 +511,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
* @throws IOException parse exceptions
|
||||
*/
|
||||
private MetaData readMetaData(byte[] data) throws IOException {
|
||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||
try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) {
|
||||
XContentParser.Token token;
|
||||
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
|
||||
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -536,12 +563,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
* @return BytesStreamOutput representing JSON serialized Snapshot
|
||||
* @throws IOException
|
||||
*/
|
||||
private void writeSnapshot(Snapshot snapshot, OutputStream outputStream) throws IOException {
|
||||
StreamOutput stream = new OutputStreamStreamOutput(outputStream);
|
||||
if (isCompress()) {
|
||||
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
|
||||
private void writeSnapshot(Snapshot snapshot, StreamOutput outputStream) throws IOException {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, outputStream);
|
||||
builder.startObject();
|
||||
snapshot.toXContent(builder, snapshotOnlyFormatParams);
|
||||
builder.endObject();
|
||||
|
@ -555,12 +578,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
* @return BytesStreamOutput representing JSON serialized global MetaData
|
||||
* @throws IOException
|
||||
*/
|
||||
private void writeGlobalMetaData(MetaData metaData, OutputStream outputStream) throws IOException {
|
||||
StreamOutput stream = new OutputStreamStreamOutput(outputStream);
|
||||
if (isCompress()) {
|
||||
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
|
||||
private void writeGlobalMetaData(MetaData metaData, StreamOutput outputStream) throws IOException {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, outputStream);
|
||||
builder.startObject();
|
||||
MetaData.Builder.toXContent(metaData, builder, snapshotOnlyFormatParams);
|
||||
builder.endObject();
|
||||
|
@ -608,7 +627,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
|||
try (InputStream blob = snapshotsBlobContainer.openInput(SNAPSHOTS_FILE)) {
|
||||
final byte[] data = ByteStreams.toByteArray(blob);
|
||||
ArrayList<SnapshotId> snapshots = new ArrayList<>();
|
||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||
try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) {
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||
String currentFieldName = parser.currentName();
|
||||
|
|
|
@ -91,14 +91,6 @@ public class SourceLookup implements Map {
|
|||
return sourceAsMapAndType(source).v2();
|
||||
}
|
||||
|
||||
public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
|
||||
return XContentHelper.convertToMap(bytes, offset, length, false);
|
||||
}
|
||||
|
||||
public static Map<String, Object> sourceAsMap(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
|
||||
return sourceAsMapAndType(bytes, offset, length).v2();
|
||||
}
|
||||
|
||||
public void setSegmentAndDocument(LeafReaderContext context, int docId) {
|
||||
if (this.reader == context.reader() && this.docId == docId) {
|
||||
// if we are called with the same document, don't invalidate source
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.compress.NotCompressedException;
|
||||
import org.elasticsearch.common.io.ThrowableObjectInputStream;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -91,8 +92,10 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
|
|||
|
||||
StreamInput wrappedStream;
|
||||
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
|
||||
Compressor compressor = CompressorFactory.compressor(buffer);
|
||||
if (compressor == null) {
|
||||
Compressor compressor;
|
||||
try {
|
||||
compressor = CompressorFactory.compressor(buffer);
|
||||
} catch (NotCompressedException ex) {
|
||||
int maxToRead = Math.min(buffer.readableBytes(), 10);
|
||||
int offset = buffer.readerIndex();
|
||||
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
|
@ -34,7 +34,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAlone() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -52,7 +52,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testFailIfIdIsNoValue() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -79,7 +79,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseRoutingAlone() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -97,7 +97,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseTimestampAlone() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -115,11 +115,11 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseTimestampEquals() throws Exception {
|
||||
MappingMetaData md1 = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md1 = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
MappingMetaData md2 = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md2 = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -128,7 +128,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAndRoutingAndTimestamp() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "routing"),
|
||||
new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -143,7 +143,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAndRoutingAndTimestampWithPath() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -161,7 +161,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdWithPath() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -182,7 +182,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseRoutingWithPath() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -203,7 +203,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseTimestampWithPath() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -224,7 +224,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -242,7 +242,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.obj0.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.obj2.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj1.obj3.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -271,7 +271,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("obj1.id"),
|
||||
new MappingMetaData.Routing(true, "obj1.routing"),
|
||||
new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -291,7 +291,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
//
|
||||
@Test
|
||||
public void testParseIdRoutingTimestampWithRepeatedField() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("field1"),
|
||||
new MappingMetaData.Routing(true, "field1.field1"),
|
||||
new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -314,7 +314,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id("id"),
|
||||
new MappingMetaData.Routing(true, "field1.field1.field2"),
|
||||
new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
@ -337,7 +337,7 @@ public class MappingMetaDataParserTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception {
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
|
||||
MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
|
||||
new MappingMetaData.Id(null),
|
||||
new MappingMetaData.Routing(true, "field1.field2"),
|
||||
new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
|
||||
|
|
|
@ -38,12 +38,12 @@ import java.util.concurrent.CountDownLatch;
|
|||
/**
|
||||
* Test streaming compression (e.g. used for recovery)
|
||||
*/
|
||||
public class CompressedStreamTests extends ElasticsearchTestCase {
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
CompressorFactory.configure(Settings.settingsBuilder().put("compress.default.type", "lzf").build());
|
||||
public abstract class AbstractCompressedStreamTests extends ElasticsearchTestCase {
|
||||
|
||||
private final Compressor compressor;
|
||||
|
||||
protected AbstractCompressedStreamTests(Compressor compressor) {
|
||||
this.compressor = compressor;
|
||||
}
|
||||
|
||||
public void testRandom() throws IOException {
|
||||
|
@ -54,7 +54,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
doTest(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRandomThreads() throws Exception {
|
||||
final Random r = getRandom();
|
||||
int threadCount = TestUtil.nextInt(r, 2, 10);
|
||||
|
@ -85,7 +85,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testLineDocs() throws IOException {
|
||||
Random r = getRandom();
|
||||
LineFileDocs lineFileDocs = new LineFileDocs(r);
|
||||
|
@ -100,7 +100,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
}
|
||||
lineFileDocs.close();
|
||||
}
|
||||
|
||||
|
||||
public void testLineDocsThreads() throws Exception {
|
||||
final Random r = getRandom();
|
||||
int threadCount = TestUtil.nextInt(r, 2, 10);
|
||||
|
@ -137,7 +137,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsL() throws IOException {
|
||||
Random r = getRandom();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
@ -160,7 +160,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
doTest(bos.toByteArray());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsLThreads() throws Exception {
|
||||
final Random r = getRandom();
|
||||
int threadCount = TestUtil.nextInt(r, 2, 10);
|
||||
|
@ -205,7 +205,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsI() throws IOException {
|
||||
Random r = getRandom();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
@ -224,7 +224,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
doTest(bos.toByteArray());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsIThreads() throws Exception {
|
||||
final Random r = getRandom();
|
||||
int threadCount = TestUtil.nextInt(r, 2, 10);
|
||||
|
@ -265,7 +265,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsS() throws IOException {
|
||||
Random r = getRandom();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
@ -348,7 +348,7 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
r.nextBytes(bytes);
|
||||
bos.write(bytes);
|
||||
}
|
||||
|
||||
|
||||
public void testRepetitionsSThreads() throws Exception {
|
||||
final Random r = getRandom();
|
||||
int threadCount = TestUtil.nextInt(r, 2, 10);
|
||||
|
@ -387,16 +387,16 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void doTest(byte bytes[]) throws IOException {
|
||||
ByteBuffer bb = ByteBuffer.wrap(bytes);
|
||||
StreamInput rawIn = new ByteBufferStreamInput(bb);
|
||||
Compressor c = CompressorFactory.defaultCompressor();
|
||||
|
||||
Compressor c = compressor;
|
||||
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
OutputStreamStreamOutput rawOs = new OutputStreamStreamOutput(bos);
|
||||
StreamOutput os = c.streamOutput(rawOs);
|
||||
|
||||
|
||||
Random r = getRandom();
|
||||
int bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
|
||||
int prepadding = r.nextInt(70000);
|
||||
|
@ -409,27 +409,27 @@ public class CompressedStreamTests extends ElasticsearchTestCase {
|
|||
}
|
||||
os.close();
|
||||
rawIn.close();
|
||||
|
||||
|
||||
// now we have compressed byte array
|
||||
|
||||
|
||||
byte compressed[] = bos.toByteArray();
|
||||
ByteBuffer bb2 = ByteBuffer.wrap(compressed);
|
||||
StreamInput compressedIn = new ByteBufferStreamInput(bb2);
|
||||
StreamInput in = c.streamInput(compressedIn);
|
||||
|
||||
|
||||
// randomize constants again
|
||||
bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
|
||||
prepadding = r.nextInt(70000);
|
||||
postpadding = r.nextInt(70000);
|
||||
buffer = new byte[prepadding + bufferSize + postpadding];
|
||||
r.nextBytes(buffer); // fill block completely with junk
|
||||
|
||||
|
||||
ByteArrayOutputStream uncompressedOut = new ByteArrayOutputStream();
|
||||
while ((len = in.read(buffer, prepadding, bufferSize)) != -1) {
|
||||
uncompressedOut.write(buffer, prepadding, len);
|
||||
}
|
||||
uncompressedOut.close();
|
||||
|
||||
|
||||
assertArrayEquals(bytes, uncompressedOut.toByteArray());
|
||||
}
|
||||
}
|
|
@ -23,10 +23,8 @@ import org.apache.lucene.util.TestUtil;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
@ -37,49 +35,58 @@ import static org.hamcrest.Matchers.not;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class CompressedStringTests extends ElasticsearchTestCase {
|
||||
public abstract class AbstractCompressedXContentTests extends ElasticsearchTestCase {
|
||||
|
||||
@Test
|
||||
public void simpleTestsLZF() throws IOException {
|
||||
simpleTests("lzf");
|
||||
private final Compressor compressor;
|
||||
|
||||
protected AbstractCompressedXContentTests(Compressor compressor) {
|
||||
this.compressor = compressor;
|
||||
}
|
||||
|
||||
private void assertEquals(CompressedString s1, CompressedString s2) {
|
||||
private void assertEquals(CompressedXContent s1, CompressedXContent s2) {
|
||||
Assert.assertEquals(s1, s2);
|
||||
assertArrayEquals(s1.uncompressed(), s2.uncompressed());
|
||||
assertEquals(s1.hashCode(), s2.hashCode());
|
||||
}
|
||||
|
||||
public void simpleTests(String compressor) throws IOException {
|
||||
CompressorFactory.configure(Settings.settingsBuilder().put("compress.default.type", compressor).build());
|
||||
String str = "this is a simple string";
|
||||
CompressedString cstr = new CompressedString(str);
|
||||
assertThat(cstr.string(), equalTo(str));
|
||||
assertThat(new CompressedString(str), equalTo(cstr));
|
||||
public void simpleTests() throws IOException {
|
||||
Compressor defaultCompressor = CompressorFactory.defaultCompressor();
|
||||
try {
|
||||
CompressorFactory.setDefaultCompressor(compressor);
|
||||
String str = "---\nf:this is a simple string";
|
||||
CompressedXContent cstr = new CompressedXContent(str);
|
||||
assertThat(cstr.string(), equalTo(str));
|
||||
assertThat(new CompressedXContent(str), equalTo(cstr));
|
||||
|
||||
String str2 = "this is a simple string 2";
|
||||
CompressedString cstr2 = new CompressedString(str2);
|
||||
assertThat(cstr2.string(), not(equalTo(str)));
|
||||
assertThat(new CompressedString(str2), not(equalTo(cstr)));
|
||||
assertEquals(new CompressedString(str2), cstr2);
|
||||
String str2 = "---\nf:this is a simple string 2";
|
||||
CompressedXContent cstr2 = new CompressedXContent(str2);
|
||||
assertThat(cstr2.string(), not(equalTo(str)));
|
||||
assertThat(new CompressedXContent(str2), not(equalTo(cstr)));
|
||||
assertEquals(new CompressedXContent(str2), cstr2);
|
||||
} finally {
|
||||
CompressorFactory.setDefaultCompressor(defaultCompressor);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandom() throws IOException {
|
||||
String compressor = "lzf";
|
||||
CompressorFactory.configure(Settings.settingsBuilder().put("compress.default.type", compressor).build());
|
||||
Random r = getRandom();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String string = TestUtil.randomUnicodeString(r, 10000);
|
||||
CompressedString compressedString = new CompressedString(string);
|
||||
assertThat(compressedString.string(), equalTo(string));
|
||||
Compressor defaultCompressor = CompressorFactory.defaultCompressor();
|
||||
try {
|
||||
CompressorFactory.setDefaultCompressor(compressor);
|
||||
Random r = getRandom();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String string = TestUtil.randomUnicodeString(r, 10000);
|
||||
// hack to make it detected as YAML
|
||||
string = "---\n" + string;
|
||||
CompressedXContent compressedXContent = new CompressedXContent(string);
|
||||
assertThat(compressedXContent.string(), equalTo(string));
|
||||
}
|
||||
} finally {
|
||||
CompressorFactory.setDefaultCompressor(defaultCompressor);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDifferentCompressedRepresentation() throws Exception {
|
||||
byte[] b = "abcdefghijabcdefghij".getBytes("UTF-8");
|
||||
CompressorFactory.defaultCompressor();
|
||||
|
||||
Compressor compressor = CompressorFactory.defaultCompressor();
|
||||
byte[] b = "---\nf:abcdefghijabcdefghij".getBytes("UTF-8");
|
||||
BytesStreamOutput bout = new BytesStreamOutput();
|
||||
StreamOutput out = compressor.streamOutput(bout);
|
||||
out.writeBytes(b);
|
||||
|
@ -100,14 +107,14 @@ public class CompressedStringTests extends ElasticsearchTestCase {
|
|||
// of different size are being used
|
||||
assertFalse(b1.equals(b2));
|
||||
// we used the compressed representation directly and did not recompress
|
||||
assertArrayEquals(b1.toBytes(), new CompressedString(b1).compressed());
|
||||
assertArrayEquals(b2.toBytes(), new CompressedString(b2).compressed());
|
||||
assertArrayEquals(b1.toBytes(), new CompressedXContent(b1).compressed());
|
||||
assertArrayEquals(b2.toBytes(), new CompressedXContent(b2).compressed());
|
||||
// but compressedstring instances are still equal
|
||||
assertEquals(new CompressedString(b1), new CompressedString(b2));
|
||||
assertEquals(new CompressedXContent(b1), new CompressedXContent(b2));
|
||||
}
|
||||
|
||||
public void testHashCode() throws IOException {
|
||||
assertFalse(new CompressedString("a").hashCode() == new CompressedString("b").hashCode());
|
||||
assertFalse(new CompressedXContent("{\"a\":\"b\"}").hashCode() == new CompressedXContent("{\"a\":\"c\"}").hashCode());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
|
||||
import org.elasticsearch.common.compress.AbstractCompressedStreamTests;
|
||||
|
||||
public class DeflateCompressedStreamTests extends AbstractCompressedStreamTests {
|
||||
|
||||
public DeflateCompressedStreamTests() {
|
||||
super(new DeflateCompressor());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.deflate;
|
||||
|
||||
import org.elasticsearch.common.compress.AbstractCompressedXContentTests;
|
||||
|
||||
public class DeflateXContentTests extends AbstractCompressedXContentTests {
|
||||
|
||||
public DeflateXContentTests() {
|
||||
super(new DeflateCompressor());
|
||||
}
|
||||
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress;
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -26,10 +26,9 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
*/
|
||||
public abstract class CompressedStreamOutput<T extends CompressorContext> extends StreamOutput {
|
||||
public abstract class CompressedStreamOutput extends StreamOutput {
|
||||
|
||||
private final StreamOutput out;
|
||||
protected final T context;
|
||||
|
||||
protected byte[] uncompressed;
|
||||
protected int uncompressedLength;
|
||||
|
@ -37,9 +36,8 @@ public abstract class CompressedStreamOutput<T extends CompressorContext> extend
|
|||
|
||||
private boolean closed;
|
||||
|
||||
public CompressedStreamOutput(StreamOutput out, T context) throws IOException {
|
||||
public CompressedStreamOutput(StreamOutput out) throws IOException {
|
||||
this.out = out;
|
||||
this.context = context;
|
||||
super.setVersion(out.getVersion());
|
||||
writeHeader(out);
|
||||
}
|
|
@ -23,18 +23,18 @@ import com.ning.compress.BufferRecycler;
|
|||
import com.ning.compress.lzf.ChunkEncoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import com.ning.compress.lzf.util.ChunkEncoderFactory;
|
||||
import org.elasticsearch.common.compress.CompressedStreamOutput;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class LZFCompressedStreamOutput extends CompressedStreamOutput<LZFCompressorContext> {
|
||||
public class LZFCompressedStreamOutput extends CompressedStreamOutput {
|
||||
|
||||
private final BufferRecycler recycler;
|
||||
private final ChunkEncoder encoder;
|
||||
|
||||
public LZFCompressedStreamOutput(StreamOutput out) throws IOException {
|
||||
super(out, LZFCompressorContext.INSTANCE);
|
||||
super(out);
|
||||
this.recycler = BufferRecycler.instance();
|
||||
this.uncompressed = this.recycler.allocOutputBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import org.elasticsearch.common.compress.AbstractCompressedStreamTests;
|
||||
|
||||
public class LZFCompressedStreamTests extends AbstractCompressedStreamTests {
|
||||
|
||||
public LZFCompressedStreamTests() {
|
||||
super(new LZFTestCompressor());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
// LZF compressor with write support, for testing only
|
||||
public class LZFTestCompressor extends LZFCompressor {
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
return new LZFCompressedStreamOutput(out);
|
||||
}
|
||||
|
||||
}
|
|
@ -19,11 +19,12 @@
|
|||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import org.elasticsearch.common.compress.CompressorContext;
|
||||
import org.elasticsearch.common.compress.AbstractCompressedXContentTests;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class LZFCompressorContext implements CompressorContext {
|
||||
public class LZFXContentTests extends AbstractCompressedXContentTests {
|
||||
|
||||
public LZFXContentTests() {
|
||||
super(new LZFTestCompressor());
|
||||
}
|
||||
|
||||
public static final LZFCompressorContext INSTANCE = new LZFCompressorContext();
|
||||
}
|
|
@ -82,7 +82,7 @@ public class XContentFactoryTests extends ElasticsearchTestCase {
|
|||
// this if for {"foo" : 5} in python CBOR
|
||||
bytes = new byte[] {(byte) 0xA1, (byte) 0x63, (byte) 0x66, (byte) 0x6f, (byte) 0x6f, (byte) 0x5};
|
||||
assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR));
|
||||
assertThat(((Number) XContentHelper.convertToMap(bytes, true).v2().get("foo")).intValue(), equalTo(5));
|
||||
assertThat(((Number) XContentHelper.convertToMap(new BytesArray(bytes), true).v2().get("foo")).intValue(), equalTo(5));
|
||||
|
||||
// also make sure major type check doesn't collide with SMILE and JSON, just in case
|
||||
assertThat(CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, SmileConstants.HEADER_BYTE_1), equalTo(false));
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.aliases;
|
||||
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -47,11 +48,11 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest {
|
|||
return indexService.aliasesService();
|
||||
}
|
||||
|
||||
public static CompressedString filter(QueryBuilder filterBuilder) throws IOException {
|
||||
public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
return new CompressedString(builder.string());
|
||||
return new CompressedXContent(builder.string());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TopFieldDocs;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -63,10 +63,10 @@ public class ParentChildFieldDataTests extends AbstractFieldDataTests {
|
|||
@Before
|
||||
public void before() throws Exception {
|
||||
mapperService.merge(
|
||||
childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
|
||||
childType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
|
||||
);
|
||||
mapperService.merge(
|
||||
grandChildType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(grandChildType, "_parent", "type=" + childType).string()), true
|
||||
grandChildType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(grandChildType, "_parent", "type=" + childType).string()), true
|
||||
);
|
||||
|
||||
Document d = new Document();
|
||||
|
|
|
@ -82,7 +82,7 @@ public class BinaryMappingTests extends ElasticsearchSingleNodeTest {
|
|||
new BytesArray(binaryValue1).writeTo(compressed);
|
||||
}
|
||||
final byte[] binaryValue2 = out.bytes().toBytes();
|
||||
assertTrue(CompressorFactory.isCompressed(binaryValue2));
|
||||
assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue2)));
|
||||
|
||||
for (byte[] value : Arrays.asList(binaryValue1, binaryValue2)) {
|
||||
ParsedDocument doc = mapper.parse("type", "id", XContentFactory.jsonBuilder().startObject().field("field", value).endObject().bytes());
|
||||
|
@ -114,7 +114,7 @@ public class BinaryMappingTests extends ElasticsearchSingleNodeTest {
|
|||
new BytesArray(original).writeTo(compressed);
|
||||
}
|
||||
final byte[] binaryValue = out.bytes().toBytes();
|
||||
assertTrue(CompressorFactory.isCompressed(binaryValue));
|
||||
assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue)));
|
||||
|
||||
ParsedDocument doc = mapper.parse("type", "id", XContentFactory.jsonBuilder().startObject().field("field", binaryValue).endObject().bytes());
|
||||
BytesRef indexedValue = doc.rootDoc().getBinaryValue("field");
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index.mapper.merge;
|
|||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.analysis.FieldNameAnalyzer;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
|
@ -160,7 +160,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
|
|||
|
||||
public void testConcurrentMergeTest() throws Throwable {
|
||||
final MapperService mapperService = createIndex("test").mapperService();
|
||||
mapperService.merge("test", new CompressedString("{\"test\":{}}"), true);
|
||||
mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), true);
|
||||
final DocumentMapper documentMapper = mapperService.documentMapper("test");
|
||||
|
||||
DocumentFieldMappers dfm = documentMapper.mappers();
|
||||
|
@ -186,7 +186,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
|
|||
Mapping update = doc.dynamicMappingsUpdate();
|
||||
assert update != null;
|
||||
lastIntroducedFieldName.set(fieldName);
|
||||
mapperService.merge("test", new CompressedString(update.toString()), false);
|
||||
mapperService.merge("test", new CompressedXContent(update.toString()), false);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
error.set(t);
|
||||
|
|
|
@ -432,7 +432,7 @@ public class MultiFieldTests extends ElasticsearchSingleNodeTest {
|
|||
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
||||
Arrays.sort(multiFieldNames);
|
||||
|
||||
Map<String, Object> sourceAsMap = XContentHelper.convertToMap(docMapper.mappingSource().compressed(), true).v2();
|
||||
Map<String, Object> sourceAsMap = XContentHelper.convertToMap(docMapper.mappingSource().compressedReference(), true).v2();
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> multiFields = (Map<String, Object>) XContentMapValues.extractValue("type.properties.my_field.fields", sourceAsMap);
|
||||
assertThat(multiFields.size(), equalTo(multiFieldNames.length));
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.source;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -51,7 +52,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.field("field2", "value2")
|
||||
.endObject().bytes());
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -68,7 +69,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject().bytes());
|
||||
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -84,7 +85,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject().bytes());
|
||||
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
|
||||
|
||||
doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
|
@ -95,6 +96,6 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject().bytes());
|
||||
|
||||
bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexableField;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -193,7 +193,7 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject().endObject().string();
|
||||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), true);
|
||||
|
||||
DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type").v1();
|
||||
assertThat(mapper.type(), equalTo("my_type"));
|
||||
|
@ -206,12 +206,12 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject().endObject().string();
|
||||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), true);
|
||||
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
|
||||
.startObject("_source").field("enabled", true).endObject()
|
||||
.endObject().endObject().string();
|
||||
mapperService.merge("my_type", new CompressedString(mapping), true);
|
||||
mapperService.merge("my_type", new CompressedXContent(mapping), true);
|
||||
|
||||
DocumentMapper mapper = mapperService.documentMapper("my_type");
|
||||
assertThat(mapper.type(), equalTo("my_type"));
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
|
@ -450,7 +450,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest {
|
|||
{
|
||||
MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
|
||||
TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, null, null);
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedString("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
|
||||
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
@ -467,7 +467,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest {
|
|||
{
|
||||
MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
|
||||
TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, "now", null);
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedString("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
|
||||
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
@ -484,7 +484,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest {
|
|||
{
|
||||
MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
|
||||
TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, "now", false);
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedString("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
|
||||
new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
|
||||
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
@ -652,7 +652,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject()
|
||||
.endObject().endObject().string();
|
||||
// This was causing a NPE
|
||||
new MappingMetaData(new CompressedString(mapping));
|
||||
new MappingMetaData(new CompressedXContent(mapping));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -27,7 +27,8 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -196,7 +197,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
public void testNoConflictIfNothingSetAndDisabledLater() throws Exception {
|
||||
IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
|
||||
XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d");
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean());
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean());
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
}
|
||||
|
||||
|
@ -204,7 +205,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
public void testNoConflictIfNothingSetAndEnabledLater() throws Exception {
|
||||
IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
|
||||
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean());
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean());
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
}
|
||||
|
||||
|
@ -213,23 +214,23 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
|
||||
IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled);
|
||||
XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMergeWithOnlyDefaultSetTtlDisabled() throws Exception {
|
||||
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlDisabled("7d");
|
||||
IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled);
|
||||
CompressedString mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterCreation, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -238,12 +239,12 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
//check if default ttl changed when simulate set to true
|
||||
XContentBuilder mappingWithTtl = getMappingWithTtlEnabled("6d");
|
||||
IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtl);
|
||||
CompressedString mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
CompressedXContent mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d");
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDifferentDefault.string()), true).mapping(), true);
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
// make sure simulate flag actually worked - no mappings applied
|
||||
CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge));
|
||||
|
||||
client().admin().indices().prepareDelete("testindex").get();
|
||||
|
@ -252,7 +253,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
|
||||
mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled();
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true);
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
// make sure simulate flag actually worked - no mappings applied
|
||||
mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
|
@ -264,7 +265,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
|
||||
mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true);
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
// make sure simulate flag actually worked - no mappings applied
|
||||
mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
|
@ -275,21 +276,21 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
mappingWithoutTtl = getMappingWithTtlDisabled("6d");
|
||||
indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
|
||||
mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false);
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
// make sure simulate flag actually worked - mappings applied
|
||||
mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
|
||||
client().admin().indices().prepareDelete("testindex").get();
|
||||
// check if switching simulate flag off works if nothing was applied in the beginning
|
||||
indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
|
||||
mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false);
|
||||
mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false);
|
||||
assertFalse(mergeResult.hasConflicts());
|
||||
// make sure simulate flag actually worked - mappings applied
|
||||
mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
|
||||
|
||||
}
|
||||
|
||||
|
@ -348,4 +349,4 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
|
||||
.endObject().endObject();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.index.mapper.update;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -80,11 +80,11 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException {
|
||||
IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping);
|
||||
// simulate like in MetaDataMappingService#putMapping
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), false);
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false);
|
||||
// assure we have no conflicts
|
||||
assertThat(mergeResult.buildConflicts().length, equalTo(0));
|
||||
// make sure mappings applied
|
||||
CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string()));
|
||||
}
|
||||
|
||||
|
@ -102,13 +102,13 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
|
||||
protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mapping, XContentBuilder mappingUpdate) throws IOException {
|
||||
IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping);
|
||||
CompressedString mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
// simulate like in MetaDataMappingService#putMapping
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), true);
|
||||
MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true);
|
||||
// assure we have conflicts
|
||||
assertThat(mergeResult.buildConflicts().length, equalTo(1));
|
||||
// make sure simulate flag actually worked - no mappings applied
|
||||
CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
|
||||
assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate));
|
||||
}
|
||||
|
||||
|
@ -124,9 +124,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedString(indexMapping.string()), true);
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
|
||||
assertThat(documentMapper.indexMapper().enabled(), equalTo(enabled));
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedString(documentMapper.mappingSource().string()), true);
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
|
||||
assertThat(documentMapper.indexMapper().enabled(), equalTo(enabled));
|
||||
}
|
||||
|
||||
|
@ -146,11 +146,11 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedString(indexMapping.string()), true);
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
|
||||
assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled));
|
||||
assertTrue(documentMapper.timestampFieldMapper().fieldType().stored());
|
||||
assertTrue(documentMapper.timestampFieldMapper().hasDocValues());
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedString(documentMapper.mappingSource().string()), true);
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
|
||||
assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled));
|
||||
assertTrue(documentMapper.timestampFieldMapper().hasDocValues());
|
||||
assertTrue(documentMapper.timestampFieldMapper().fieldType().stored());
|
||||
|
@ -168,10 +168,10 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedString(indexMapping.string()), true);
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
|
||||
assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled));
|
||||
assertTrue(documentMapper.sizeFieldMapper().fieldType().stored());
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedString(documentMapper.mappingSource().string()), true);
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
|
||||
assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled));
|
||||
}
|
||||
|
||||
|
@ -179,9 +179,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
public void testSizeTimestampIndexParsing() throws IOException {
|
||||
IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json");
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedString(mapping), true);
|
||||
DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(mapping), true);
|
||||
assertThat(documentMapper.mappingSource().string(), equalTo(mapping));
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedString(documentMapper.mappingSource().string()), true);
|
||||
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
|
||||
assertThat(documentMapper.mappingSource().string(), equalTo(mapping));
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ package org.elasticsearch.index.query;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -57,7 +57,7 @@ public class IndexQueryParserFilterDateRangeFormatTests extends ElasticsearchSin
|
|||
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
|
||||
mapperService.merge("person", new CompressedString(mapping), true);
|
||||
mapperService.merge("person", new CompressedXContent(mapping), true);
|
||||
ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
|
||||
assertNotNull(doc.dynamicMappingsUpdate());
|
||||
client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
|
||||
|
|
|
@ -23,7 +23,7 @@ package org.elasticsearch.index.query;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -58,7 +58,7 @@ public class IndexQueryParserFilterDateRangeTimezoneTests extends ElasticsearchS
|
|||
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
|
||||
mapperService.merge("person", new CompressedString(mapping), true);
|
||||
mapperService.merge("person", new CompressedXContent(mapping), true);
|
||||
ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
|
||||
assertNotNull(doc.dynamicMappingsUpdate());
|
||||
client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
|
||||
|
|
|
@ -70,7 +70,7 @@ import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
|||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -209,7 +209,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest {
|
|||
MapperService mapperService = indexService.mapperService();
|
||||
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
|
||||
mapperService.merge("person", new CompressedString(mapping), true);
|
||||
mapperService.merge("person", new CompressedXContent(mapping), true);
|
||||
ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
|
||||
assertNotNull(doc.dynamicMappingsUpdate());
|
||||
client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util.BitDocIdSet;
|
|||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -67,8 +67,8 @@ public abstract class AbstractChildTests extends ElasticsearchSingleNodeTest {
|
|||
MapperService mapperService = indexService.mapperService();
|
||||
// Parent/child parsers require that the parent and child type to be presented in mapping
|
||||
// Sometimes we want a nested object field in the parent type that triggers nonNestedDocsFilter to be used
|
||||
mapperService.merge(parentType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(parentType, "nested_field", random().nextBoolean() ? "type=nested" : "type=object").string()), true);
|
||||
mapperService.merge(childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType, CHILD_SCORE_NAME, "type=double,doc_values=false").string()), true);
|
||||
mapperService.merge(parentType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(parentType, "nested_field", random().nextBoolean() ? "type=nested" : "type=object").string()), true);
|
||||
mapperService.merge(childType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType, CHILD_SCORE_NAME, "type=double,doc_values=false").string()), true);
|
||||
return createSearchContext(indexService);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.indices.template;
|
|||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.action.bulk.BulkResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.search.SearchHit;
|
|||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
@ -668,4 +671,5 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(response.getItems()[0].isFailed(), equalTo(true));
|
||||
assertThat(response.getItems()[0].getFailureMessage(), containsString("failed to parse filter for alias [alias4]"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
|
@ -117,7 +117,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest {
|
|||
IndexSearcher searcher = new IndexSearcher(directoryReader);
|
||||
|
||||
IndexService indexService = createIndex("test");
|
||||
indexService.mapperService().merge("test", new CompressedString(PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()), true);
|
||||
indexService.mapperService().merge("test", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()), true);
|
||||
SearchContext searchContext = createSearchContext(indexService);
|
||||
AggregationContext context = new AggregationContext(searchContext);
|
||||
|
||||
|
|
|
@ -24,8 +24,9 @@ import org.elasticsearch.action.get.GetResponse;
|
|||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.compress.lzf.LZFCompressor;
|
||||
import org.elasticsearch.common.compress.lzf.LZFTestCompressor;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -41,10 +42,15 @@ public class SearchSourceCompressTests extends ElasticsearchSingleNodeTest {
|
|||
|
||||
@Test
|
||||
public void testSourceCompressionLZF() throws IOException {
|
||||
CompressorFactory.setDefaultCompressor(new LZFCompressor());
|
||||
verifySource(true);
|
||||
verifySource(false);
|
||||
verifySource(null);
|
||||
final Compressor defaultCompressor = CompressorFactory.defaultCompressor();
|
||||
try {
|
||||
CompressorFactory.setDefaultCompressor(new LZFTestCompressor());
|
||||
verifySource(true);
|
||||
verifySource(false);
|
||||
verifySource(null);
|
||||
} finally {
|
||||
CompressorFactory.setDefaultCompressor(defaultCompressor);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifySource(Boolean compress) throws IOException {
|
||||
|
|
Loading…
Reference in New Issue