Remove custom Base64 implementation. #18413

This replaces o.e.common.Base64 with java.util.Base64.
This commit is contained in:
Adrien Grand 2016-05-17 17:16:23 +02:00
parent e7eb664c78
commit 459916f5dd
20 changed files with 73 additions and 1837 deletions

View File

@ -19,8 +19,6 @@
package org.elasticsearch.action.search;
import java.util.Map;
/**
*
*/
@ -36,13 +34,10 @@ class ParsedScrollId {
private final ScrollIdForNode[] context;
private final Map<String, String> attributes;
public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map<String, String> attributes) {
public ParsedScrollId(String source, String type, ScrollIdForNode[] context) {
this.source = source;
this.type = type;
this.context = context;
this.attributes = attributes;
}
public String getSource() {
@ -56,8 +51,4 @@ class ParsedScrollId {
public ScrollIdForNode[] getContext() {
return context;
}
public Map<String, String> getAttributes() {
return this.attributes;
}
}

View File

@ -123,7 +123,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
queryFetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
buildTookInMillis(), buildShardFailures()));

View File

@ -200,7 +200,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
fetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
buildTookInMillis(), buildShardFailures()));

View File

@ -66,7 +66,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
firstResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
buildTookInMillis(), buildShardFailures()));

View File

@ -133,7 +133,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
fetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
successfulOps.get(), buildTookInMillis(), buildShardFailures()));

View File

@ -19,21 +19,16 @@
package org.elasticsearch.action.search;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.RAMOutputStream;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.emptyMap;
import java.util.Base64;
/**
*
@ -49,79 +44,49 @@ final class TransportSearchHelper {
return new InternalScrollSearchRequest(request, id);
}
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
@Nullable Map<String, String> attributes) throws IOException {
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults);
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes);
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults);
} else {
throw new IllegalStateException("search_type [" + searchType + "] not supported");
}
}
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
@Nullable Map<String, String> attributes) throws IOException {
StringBuilder sb = new StringBuilder().append(type).append(';');
sb.append(searchPhaseResults.asList().size()).append(';');
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
SearchPhaseResult searchPhaseResult = entry.value;
sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';');
}
if (attributes == null) {
sb.append("0;");
} else {
sb.append(attributes.size()).append(";");
for (Map.Entry<String, String> entry : attributes.entrySet()) {
sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
try (RAMOutputStream out = new RAMOutputStream()) {
out.writeString(type);
out.writeVInt(searchPhaseResults.asList().size());
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
SearchPhaseResult searchPhaseResult = entry.value;
out.writeLong(searchPhaseResult.id());
out.writeString(searchPhaseResult.shardTarget().nodeId());
}
byte[] bytes = new byte[(int) out.getFilePointer()];
out.writeTo(bytes, 0);
return Base64.getUrlEncoder().encodeToString(bytes);
}
BytesRef bytesRef = new BytesRef(sb);
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
}
static ParsedScrollId parseScrollId(String scrollId) {
CharsRefBuilder spare = new CharsRefBuilder();
try {
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
spare.copyUTF8Bytes(decode, 0, decode.length);
byte[] bytes = Base64.getUrlDecoder().decode(scrollId);
ByteArrayDataInput in = new ByteArrayDataInput(bytes);
String type = in.readString();
ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()];
for (int i = 0; i < context.length; ++i) {
long id = in.readLong();
String target = in.readString();
context[i] = new ScrollIdForNode(target, id);
}
if (in.getPosition() != bytes.length) {
throw new IllegalArgumentException("Not all bytes were read");
}
return new ParsedScrollId(scrollId, type, context);
} catch (Exception e) {
throw new IllegalArgumentException("Failed to decode scrollId", e);
throw new IllegalArgumentException("Cannot parse scroll id", e);
}
String[] elements = spare.get().toString().split(";");
if (elements.length < 2) {
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
}
int index = 0;
String type = elements[index++];
int contextSize = Integer.parseInt(elements[index++]);
if (elements.length < contextSize + 2) {
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
}
ScrollIdForNode[] context = new ScrollIdForNode[contextSize];
for (int i = 0; i < contextSize; i++) {
String element = elements[index++];
int sep = element.indexOf(':');
if (sep == -1) {
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
}
context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
}
Map<String, String> attributes;
int attributesSize = Integer.parseInt(elements[index++]);
if (attributesSize == 0) {
attributes = emptyMap();
} else {
attributes = new HashMap<>(attributesSize);
for (int i = 0; i < attributesSize; i++) {
String element = elements[index++];
int sep = element.indexOf(':');
attributes.put(element.substring(0, sep), element.substring(sep + 1));
}
}
return new ParsedScrollId(scrollId, type, context, attributes);
}
private TransportSearchHelper() {

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,7 @@ package org.elasticsearch.common;
import java.io.IOException;
import java.util.Base64;
import java.util.Random;
class RandomBasedUUIDGenerator implements UUIDGenerator {
@ -54,14 +55,6 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
* We set only the MSB of the variant*/
randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
try {
byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE);
// we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added
assert encoded[encoded.length - 1] == '=';
assert encoded[encoded.length - 2] == '=';
return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING);
} catch (IOException e) {
throw new IllegalStateException("should not be thrown");
}
return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes);
}
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.common;
import java.io.IOException;
import java.util.Base64;
import java.util.concurrent.atomic.AtomicInteger;
/** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but
@ -80,15 +79,6 @@ class TimeBasedUUIDGenerator implements UUIDGenerator {
assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length;
byte[] encoded;
try {
encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE);
} catch (IOException e) {
throw new IllegalStateException("should not be thrown", e);
}
// We are a multiple of 3 bytes so we should not see any padding:
assert encoded[encoded.length - 1] != '=';
return new String(encoded, 0, encoded.length, Base64.PREFERRED_ENCODING);
return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes);
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.engine;
import org.apache.lucene.index.SegmentInfos;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -29,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Base64;
import java.util.Map;
/** a class the returns dynamic information with respect to the last commit point of this shard */
@ -44,9 +44,7 @@ public final class CommitStats implements Streamable, ToXContent {
userData = MapBuilder.<String, String>newMapBuilder().putAll(segmentInfos.getUserData()).immutableMap();
// lucene calls the current generation, last generation.
generation = segmentInfos.getLastGeneration();
if (segmentInfos.getId() != null) { // id is only written starting with Lucene 5.0
id = Base64.encodeBytes(segmentInfos.getId());
}
id = Base64.getEncoder().encodeToString(segmentInfos.getId());
numDocs = Lucene.getNumDocs(segmentInfos);
}

View File

@ -28,22 +28,18 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SearcherManager;
import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@ -65,7 +61,6 @@ import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
@ -74,7 +69,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.NoSuchFileException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Base64;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
@ -1092,7 +1087,7 @@ public abstract class Engine implements Closeable {
@Override
public String toString() {
return Base64.encodeBytes(id);
return Base64.getEncoder().encodeToString(id);
}
public boolean idsEqual(byte[] id) {

View File

@ -26,8 +26,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
@ -45,6 +43,7 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import java.io.IOException;
import java.util.Base64;
import java.util.List;
import java.util.Map;
@ -124,11 +123,7 @@ public class BinaryFieldMapper extends FieldMapper {
} else if (value instanceof byte[]) {
bytes = new BytesArray((byte[]) value);
} else {
try {
bytes = new BytesArray(Base64.decode(value.toString()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to convert bytes", e);
}
bytes = new BytesArray(Base64.getDecoder().decode(value.toString()));
}
return bytes;
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.ingest.core;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
@ -30,11 +29,11 @@ import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
@ -43,8 +42,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.TimeZone;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* Represents a single document being captured before indexing and holds the source and metadata (like id, type and index).
*/
@ -144,11 +141,7 @@ public final class IngestDocument {
if (object instanceof byte[]) {
return (byte[]) object;
} else if (object instanceof String) {
try {
return Base64.decode(object.toString().getBytes(UTF_8));
} catch (IOException e) {
throw new IllegalArgumentException("Could not base64 decode path [ " + path + "]", e);
}
return Base64.getDecoder().decode(object.toString());
} else {
throw new IllegalArgumentException("Content field [" + path + "] of unknown type [" + object.getClass().getName() +
"], must be string or byte array");
@ -464,7 +457,6 @@ public final class IngestDocument {
private static void appendValues(List<Object> list, Object value) {
if (value instanceof List) {
@SuppressWarnings("unchecked")
List<?> valueList = (List<?>) value;
valueList.stream().forEach(list::add);
} else {

View File

@ -1,57 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Locale;
import static org.hamcrest.Matchers.is;
/**
*
*/
public class Base64Tests extends ESTestCase {
// issue #6334
public void testBase64DecodeWithExtraCharactersAfterPadding() throws Exception {
String plain = randomAsciiOfLengthBetween(1, 20) + ":" + randomAsciiOfLengthBetween(1, 20);
String encoded = Base64.encodeBytes(plain.getBytes(StandardCharsets.UTF_8));
assertValidBase64(encoded, plain);
// lets append some trash here, if the encoded string has been padded
char lastChar = encoded.charAt(encoded.length() - 1);
if (lastChar == '=') {
assertInvalidBase64(encoded + randomAsciiOfLength(3));
}
}
private void assertValidBase64(String base64, String expected) throws IOException {
String decoded = new String(Base64.decode(base64.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8);
assertThat(decoded, is(expected));
}
private void assertInvalidBase64(String base64) {
try {
Base64.decode(base64.getBytes(StandardCharsets.UTF_8));
fail(String.format(Locale.ROOT, "Expected IOException to be thrown for string %s (len %d)", base64, base64.length()));
} catch (IOException e) {}
}
}

View File

@ -54,7 +54,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -111,6 +110,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
@ -829,7 +829,7 @@ public class InternalEngineTests extends ESTestCase {
engine.index(new Engine.Index(newUid("1"), doc));
Engine.CommitId commitID = engine.flush();
assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId())));
byte[] wrongBytes = Base64.decode(commitID.toString());
byte[] wrongBytes = Base64.getDecoder().decode(commitID.toString());
wrongBytes[0] = (byte) ~wrongBytes[0];
Engine.CommitId wrongId = new Engine.CommitId(wrongBytes);
assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId),

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -318,7 +317,7 @@ public class SearchScrollIT extends ESIntegTestCase {
public void testClearNonExistentScrollId() throws Exception {
createIndex("idx");
ClearScrollResponse response = client().prepareClearScroll()
.addScrollId("cXVlcnlUaGVuRmV0Y2g7MzsyOlpBRC1qOUhrUjhhZ0NtQWUxU2FuWlE7MjpRcjRaNEJ2R1JZV1VEMW02ZGF1LW5ROzI6S0xUal9lZDRTd3lWNUhUU2VSb01CQTswOw==")
.addScrollId("DnF1ZXJ5VGhlbkZldGNoAwAAAAAAAAABFnRtLWMyRzBqUUQyNk1uM0xDTjJ4S0EAAAAAAAAAARYzNkhxbWFTYVFVNmgxTGQyYUZVYV9nAAAAAAAAAAEWdVcxNWZmRGZSVFN2V0xMUGF2NGx1Zw==")
.get();
// Whether we actually clear a scroll, we can't know, since that information isn't serialized in the
// free search context response, which is returned from each node we want to clear a particular scroll.
@ -330,24 +329,19 @@ public class SearchScrollIT extends ESIntegTestCase {
public void testClearIllegalScrollId() throws Exception {
createIndex("idx");
try {
client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get();
fail();
} catch (IllegalArgumentException e) {
}
try {
// Fails during base64 decoding (Base64-encoded string must have at least four characters)
client().prepareClearScroll().addScrollId("a").get();
fail();
} catch (IllegalArgumentException e) {
}
try {
client().prepareClearScroll().addScrollId("abcabc").get();
fail();
// if running without -ea this will also throw ElasticsearchIllegalArgumentException
} catch (UncategorizedExecutionException e) {
assertThat(e.getRootCause(), instanceOf(AssertionError.class));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get());
assertEquals("Cannot parse scroll id", e.getMessage());
e = expectThrows(IllegalArgumentException.class,
// Fails during base64 decoding (Base64-encoded string must have at least four characters)
() -> client().prepareClearScroll().addScrollId("a").get());
assertEquals("Cannot parse scroll id", e.getMessage());
e = expectThrows(IllegalArgumentException.class,
// Other invalid base64
() -> client().prepareClearScroll().addScrollId("abcabc").get());
assertEquals("Cannot parse scroll id", e.getMessage());
}
public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception {

View File

@ -22,7 +22,6 @@ package org.elasticsearch.messy.tests;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -45,8 +44,10 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
@ -413,7 +414,7 @@ public class SearchFieldsTests extends ESIntegTestCase {
.field("double_field", 6.0d)
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
.field("boolean_field", true)
.field("binary_field", Base64.encodeBytes("testing text".getBytes("UTF8")))
.field("binary_field", Base64.getEncoder().encodeToString("testing text".getBytes("UTF-8")))
.endObject()).execute().actionGet();
client().admin().indices().prepareRefresh().execute().actionGet();

View File

@ -99,7 +99,7 @@ public class TransportDeleteByQueryActionTests extends ESSingleNodeTestCase {
newAsyncAction(delete, listener).executeScroll("123");
waitForCompletion("scroll request should fail on malformed scroll id", listener);
assertFailure(listener, "Failed to decode scrollId");
assertFailure(listener, "Cannot parse scroll id");
assertSearchContextsClosed();
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.ingest.attachment;
import org.apache.commons.io.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Base64;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.test.ESTestCase;
@ -30,6 +29,7 @@ import org.junit.Before;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Base64;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
@ -209,7 +209,7 @@ public class AttachmentProcessorTests extends ESTestCase {
String path = "/org/elasticsearch/ingest/attachment/test/sample-files/" + filename;
try (InputStream is = AttachmentProcessorTests.class.getResourceAsStream(path)) {
byte bytes[] = IOUtils.toByteArray(is);
return Base64.encodeBytes(bytes);
return Base64.getEncoder().encodeToString(bytes);
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.mapper.attachments;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -33,6 +32,7 @@ import org.elasticsearch.index.mapper.core.TextFieldMapper;
import org.junit.Before;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.instanceOf;
@ -84,7 +84,7 @@ public class MultifieldAttachmentMapperTests extends AttachmentUnitTestCase {
String originalText = "This is an elasticsearch mapper attachment test.";
String forcedName = "dummyname.txt";
String bytes = Base64.encodeBytes(originalText.getBytes(StandardCharsets.ISO_8859_1));
String bytes = Base64.getEncoder().encodeToString(originalText.getBytes(StandardCharsets.ISO_8859_1));
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper());
@ -150,7 +150,7 @@ public class MultifieldAttachmentMapperTests extends AttachmentUnitTestCase {
String forcedLanguage = randomAsciiOfLength(20);
String forcedContentType = randomAsciiOfLength(20);
String bytes = Base64.encodeBytes(originalText.getBytes(StandardCharsets.ISO_8859_1));
String bytes = Base64.getEncoder().encodeToString(originalText.getBytes(StandardCharsets.ISO_8859_1));
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(),
Settings.builder().put(AttachmentMapper.INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING.getKey(), true).build(),