Remove Strings#splitStringToArray

This commit removes the method Strings#splitStringToArray and replaces
the call sites with invocations to String#split. There are only two
explanations for the existence of this method. The first is that
String#split is slightly tricky in that it accepts a regular expression
rather than a character to split on. This means that if s is a string,
s.split(".")  does not split on the character '.', but rather splits on
the regular expression '.' which splits on every character (of course,
this is easily fixed by invoking s.split("\\.") instead). The second
possible explanation is that (again) String#split accepts a regular
expression. This means that there could be a performance concern
compared to just splitting on a single character. However, it turns out
that String#split has a fast path for the case of splitting on a single
character and microbenchmarks show that String#split has 1.5x--2x the
throughput of Strings#splitStringToArray. There is a slight behavior
difference between Strings#splitStringToArray and String#split: namely,
the former would return an empty array in cases when the input string
was null or empty but String#split will just NPE at the call site on
null and return a one-element array containing the empty string when the
input string is empty. There was only one place relying on this behavior
and the call site has been modified accordingly.
This commit is contained in:
Jason Tedor 2016-05-03 09:12:28 -04:00
parent 85d2fc0e38
commit 2dea449949
16 changed files with 29 additions and 103 deletions

View File

@ -24,7 +24,6 @@ import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Base64; import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest;
@ -89,7 +88,7 @@ final class TransportSearchHelper {
} catch (Exception e) { } catch (Exception e) {
throw new IllegalArgumentException("Failed to decode scrollId", e); throw new IllegalArgumentException("Failed to decode scrollId", e);
} }
String[] elements = Strings.splitStringToArray(spare.get(), ';'); String[] elements = spare.get().toString().split(";");
if (elements.length < 2) { if (elements.length < 2) {
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
} }

View File

@ -38,7 +38,6 @@ import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Properties; import java.util.Properties;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.StringTokenizer; import java.util.StringTokenizer;
import java.util.TreeSet; import java.util.TreeSet;
@ -557,7 +556,8 @@ public class Strings {
} }
public static String[] splitStringByCommaToArray(final String s) { public static String[] splitStringByCommaToArray(final String s) {
return splitStringToArray(s, ','); if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
else return s.split(",");
} }
public static Set<String> splitStringToSet(final String s, final char c) { public static Set<String> splitStringToSet(final String s, final char c) {
@ -588,42 +588,6 @@ public class Strings {
return result; return result;
} }
public static String[] splitStringToArray(final CharSequence s, final char c) {
if (s == null || s.length() == 0) {
return Strings.EMPTY_ARRAY;
}
int count = 1;
for (int i = 0; i < s.length(); i++) {
if (s.charAt(i) == c) {
count++;
}
}
final String[] result = new String[count];
final StringBuilder builder = new StringBuilder();
int res = 0;
for (int i = 0; i < s.length(); i++) {
if (s.charAt(i) == c) {
if (builder.length() > 0) {
result[res++] = builder.toString();
builder.setLength(0);
}
} else {
builder.append(s.charAt(i));
}
}
if (builder.length() > 0) {
result[res++] = builder.toString();
}
if (res != count) {
// we have empty strings, copy over to a new array
String[] result1 = new String[res];
System.arraycopy(result, 0, result1, 0, res);
return result1;
}
return result;
}
/** /**
* Split a String at the first occurrence of the delimiter. * Split a String at the first occurrence of the delimiter.
* Does not include the delimiter in the result. * Does not include the delimiter in the result.

View File

@ -149,7 +149,7 @@ public class Table {
// get the attributes of the header cell we are going to add // get the attributes of the header cell we are going to add
mAttr.putAll(headers.get(currentCells.size()).attr); mAttr.putAll(headers.get(currentCells.size()).attr);
} }
String[] sAttrs = Strings.splitStringToArray(attributes, ';'); String[] sAttrs = attributes.split(";");
for (String sAttr : sAttrs) { for (String sAttr : sAttrs) {
if (sAttr.length() == 0) { if (sAttr.length() == 0) {
continue; continue;

View File

@ -19,8 +19,6 @@
package org.elasticsearch.common.path; package org.elasticsearch.common.path;
import org.elasticsearch.common.Strings;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -38,7 +36,7 @@ public class PathTrie<T> {
private final Decoder decoder; private final Decoder decoder;
private final TrieNode root; private final TrieNode root;
private final char separator; private final String separator;
private T rootValue; private T rootValue;
public PathTrie(Decoder decoder) { public PathTrie(Decoder decoder) {
@ -47,8 +45,9 @@ public class PathTrie<T> {
public PathTrie(char separator, String wildcard, Decoder decoder) { public PathTrie(char separator, String wildcard, Decoder decoder) {
this.decoder = decoder; this.decoder = decoder;
this.separator = separator; final String separatorAsString = new String(new char[]{separator});
root = new TrieNode(new String(new char[]{separator}), null, wildcard); this.separator = separatorAsString;
root = new TrieNode(separatorAsString, null, wildcard);
} }
public class TrieNode { public class TrieNode {
@ -196,7 +195,7 @@ public class PathTrie<T> {
} }
public void insert(String path, T value) { public void insert(String path, T value) {
String[] strings = Strings.splitStringToArray(path, separator); String[] strings = path.split(separator);
if (strings.length == 0) { if (strings.length == 0) {
rootValue = value; rootValue = value;
return; return;
@ -217,7 +216,7 @@ public class PathTrie<T> {
if (path.length() == 0) { if (path.length() == 0) {
return rootValue; return rootValue;
} }
String[] strings = Strings.splitStringToArray(path, separator); String[] strings = path.split(separator);
if (strings.length == 0) { if (strings.length == 0) {
return rootValue; return rootValue;
} }

View File

@ -899,19 +899,6 @@ public final class Settings implements ToXContent {
return this; return this;
} }
public Builder loadFromDelimitedString(String value, char delimiter) {
String[] values = Strings.splitStringToArray(value, delimiter);
for (String s : values) {
int index = s.indexOf('=');
if (index == -1) {
throw new IllegalArgumentException(
"value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing =");
}
map.put(s.substring(0, index), s.substring(index + 1));
}
return this;
}
/** /**
* Loads settings from the actual string content that represents them using the * Loads settings from the actual string content that represents them using the
* {@link SettingsLoaderFactory#loaderFromSource(String)}. * {@link SettingsLoaderFactory#loaderFromSource(String)}.

View File

@ -24,7 +24,6 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.hash.MurmurHash3;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -75,7 +74,7 @@ public class BloomFilter {
if (config == null) { if (config == null) {
return buildDefault(); return buildDefault();
} }
String[] sEntries = Strings.splitStringToArray(config, ','); String[] sEntries = config.split(",");
if (sEntries.length == 0) { if (sEntries.length == 0) {
if (config.length() > 0) { if (config.length() > 0) {
return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))}); return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))});

View File

@ -40,7 +40,7 @@ public class XContentMapValues {
*/ */
public static List<Object> extractRawValues(String path, Map<String, Object> map) { public static List<Object> extractRawValues(String path, Map<String, Object> map) {
List<Object> values = new ArrayList<>(); List<Object> values = new ArrayList<>();
String[] pathElements = Strings.splitStringToArray(path, '.'); String[] pathElements = path.split("\\.");
if (pathElements.length == 0) { if (pathElements.length == 0) {
return values; return values;
} }
@ -93,7 +93,7 @@ public class XContentMapValues {
} }
public static Object extractValue(String path, Map<String, Object> map) { public static Object extractValue(String path, Map<String, Object> map) {
String[] pathElements = Strings.splitStringToArray(path, '.'); String[] pathElements = path.split("\\.");
if (pathElements.length == 0) { if (pathElements.length == 0) {
return null; return null;
} }

View File

@ -19,13 +19,6 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableField;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -51,6 +44,13 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
/** A parser for documents, given mappings from a DocumentMapper */ /** A parser for documents, given mappings from a DocumentMapper */
final class DocumentParser { final class DocumentParser {
@ -829,8 +829,7 @@ final class DocumentParser {
// The path of the dest field might be completely different from the current one so we need to reset it // The path of the dest field might be completely different from the current one so we need to reset it
context = context.overridePath(new ContentPath(0)); context = context.overridePath(new ContentPath(0));
// TODO: why Strings.splitStringToArray instead of String.split? final String[] paths = field.split("\\.");
final String[] paths = Strings.splitStringToArray(field, '.');
final String fieldName = paths[paths.length-1]; final String fieldName = paths[paths.length-1];
ObjectMapper mapper = context.root(); ObjectMapper mapper = context.root();
ObjectMapper[] mappers = new ObjectMapper[paths.length-1]; ObjectMapper[] mappers = new ObjectMapper[paths.length-1];

View File

@ -33,7 +33,6 @@ import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapeRelation;
import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.common.geo.SpatialStrategy;
@ -379,7 +378,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + "] not found"); throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + "] not found");
} }
String[] pathElements = Strings.splitStringToArray(path, '.'); String[] pathElements = path.split("\\.");
int currentPathSlot = 0; int currentPathSlot = 0;
try (XContentParser parser = XContentHelper.createParser(response.getSourceAsBytesRef())) { try (XContentParser parser = XContentHelper.createParser(response.getSourceAsBytesRef())) {

View File

@ -622,8 +622,8 @@ public final class IngestDocument {
newPath = path; newPath = path;
} }
} }
this.pathElements = Strings.splitStringToArray(newPath, '.'); this.pathElements = newPath.split("\\.");
if (pathElements.length == 0) { if (pathElements.length == 1 && pathElements[0].isEmpty()) {
throw new IllegalArgumentException("path [" + path + "] is not valid"); throw new IllegalArgumentException("path [" + path + "] is not valid");
} }
} }

View File

@ -492,7 +492,7 @@ public class TasksIT extends ESIntegTestCase {
private void registerTaskManageListeners(String actionMasks) { private void registerTaskManageListeners(String actionMasks) {
for (String nodeName : internalCluster().getNodeNames()) { for (String nodeName : internalCluster().getNodeNames()) {
DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode(); DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode();
RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ',')); RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, actionMasks.split(","));
((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener); ((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener);
RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.getName(), actionMasks), listener); RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.getName(), actionMasks), listener);
assertNull(oldListener); assertNull(oldListener);

View File

@ -38,24 +38,6 @@ import static org.hamcrest.Matchers.nullValue;
public class SettingsTests extends ESTestCase { public class SettingsTests extends ESTestCase {
public void testLoadFromDelimitedString() {
Settings settings = Settings.builder()
.loadFromDelimitedString("key1=value1;key2=value2", ';')
.build();
assertThat(settings.get("key1"), equalTo("value1"));
assertThat(settings.get("key2"), equalTo("value2"));
assertThat(settings.getAsMap().size(), equalTo(2));
assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
settings = Settings.builder()
.loadFromDelimitedString("key1=value1;key2=value2;", ';')
.build();
assertThat(settings.get("key1"), equalTo("value1"));
assertThat(settings.get("key2"), equalTo("value2"));
assertThat(settings.getAsMap().size(), equalTo(2));
assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
}
public void testReplacePropertiesPlaceholderSystemProperty() { public void testReplacePropertiesPlaceholderSystemProperty() {
String value = System.getProperty("java.home"); String value = System.getProperty("java.home");
assertFalse(value.isEmpty()); assertFalse(value.isEmpty());

View File

@ -92,7 +92,7 @@ public class GceNameResolver extends AbstractComponent implements CustomNameReso
} else if (value.startsWith(GceAddressResolverType.PRIVATE_IP.configName)) { } else if (value.startsWith(GceAddressResolverType.PRIVATE_IP.configName)) {
// We extract the network interface from gce:privateIp:XX // We extract the network interface from gce:privateIp:XX
String network = "0"; String network = "0";
String[] privateIpConfig = Strings.splitStringToArray(value, ':'); String[] privateIpConfig = value.split(":");
if (privateIpConfig != null && privateIpConfig.length == 3) { if (privateIpConfig != null && privateIpConfig.length == 3) {
network = privateIpConfig[2]; network = privateIpConfig[2];
} }

View File

@ -116,7 +116,7 @@ public class AzureRepository extends BlobStoreRepository {
// Remove starting / if any // Remove starting / if any
basePath = Strings.trimLeadingCharacter(basePath, '/'); basePath = Strings.trimLeadingCharacter(basePath, '/');
BlobPath path = new BlobPath(); BlobPath path = new BlobPath();
for(String elem : Strings.splitStringToArray(basePath, '/')) { for(String elem : basePath.split("/")) {
path = path.add(elem); path = path.add(elem);
} }
this.basePath = path; this.basePath = path;

View File

@ -287,7 +287,7 @@ public class S3Repository extends BlobStoreRepository {
String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING);
if (Strings.hasLength(basePath)) { if (Strings.hasLength(basePath)) {
BlobPath path = new BlobPath(); BlobPath path = new BlobPath();
for(String elem : Strings.splitStringToArray(basePath, '/')) { for(String elem : basePath.split("/")) {
path = path.add(elem); path = path.add(elem);
} }
this.basePath = path; this.basePath = path;

View File

@ -22,8 +22,6 @@ package org.elasticsearch.ingest;
import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.IngestDocument;
import java.util.ArrayList; import java.util.ArrayList;
@ -105,7 +103,7 @@ public final class RandomDocumentPicks {
* that each node of the tree either doesn't exist or is a map, otherwise new fields cannot be added. * that each node of the tree either doesn't exist or is a map, otherwise new fields cannot be added.
*/ */
public static boolean canAddField(String path, IngestDocument ingestDocument) { public static boolean canAddField(String path, IngestDocument ingestDocument) {
String[] pathElements = Strings.splitStringToArray(path, '.'); String[] pathElements = path.split("\\.");
Map<String, Object> innerMap = ingestDocument.getSourceAndMetadata(); Map<String, Object> innerMap = ingestDocument.getSourceAndMetadata();
if (pathElements.length > 1) { if (pathElements.length > 1) {
for (int i = 0; i < pathElements.length - 1; i++) { for (int i = 0; i < pathElements.length - 1; i++) {