Merge pull request #19016 from jasontedor/hot-methods-redux

Hot methods redux
This commit is contained in:
Jason Tedor 2016-06-22 06:41:38 -04:00 committed by GitHub
commit 9d6d8152ee
9 changed files with 369 additions and 238 deletions

View File

@ -108,6 +108,15 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
final int shardCount = shardsIts.size();
failIfOverShardCountLimit(clusterService, shardCount);
expectedSuccessfulOps = shardCount;
// we need to add 1 for non active partition, since we count it in the total!
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
firstResults = new AtomicArray<>(shardsIts.size());
}
private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) {
final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING);
if (shardCount > shardCountLimit) {
throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of "
@ -116,11 +125,6 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
+ "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey()
+ "] to a greater value if you really want to query that many shards at the same time.");
}
expectedSuccessfulOps = shardCount;
// we need to add 1 for non active partition, since we count it in the total!
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
firstResults = new AtomicArray<>(shardsIts.size());
}
public void start() {

View File

@ -42,9 +42,6 @@ import java.util.Set;
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
/**
*
*/
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
/** The maximum number of shards for a single search request. */
@ -96,6 +93,10 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
logger.debug("failed to optimize search type, continue as normal", e);
}
searchAsyncAction(searchRequest, listener).start();
}
private AbstractSearchAsyncAction searchAsyncAction(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
AbstractSearchAsyncAction searchAsyncAction;
switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
@ -117,6 +118,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
default:
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
}
searchAsyncAction.start();
return searchAsyncAction;
}
}

View File

@ -112,6 +112,14 @@ public class ReplicationOperation<
if (logger.isTraceEnabled()) {
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
}
performOnReplicas(primaryId, replicaRequest);
successfulShards.incrementAndGet();
decPendingAndFinishIfNeeded();
}
private void performOnReplicas(ShardId primaryId, ReplicaRequest replicaRequest) {
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
@ -134,9 +142,6 @@ public class ReplicationOperation<
performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest);
}
}
successfulShards.incrementAndGet();
decPendingAndFinishIfNeeded();
}
private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) {

View File

@ -439,45 +439,21 @@ public abstract class StreamInput extends InputStream {
case 5:
return readBoolean();
case 6:
int bytesSize = readVInt();
byte[] value = new byte[bytesSize];
readBytes(value, 0, bytesSize);
return value;
return fastReadByteArray();
case 7:
int size = readVInt();
List list = new ArrayList(size);
for (int i = 0; i < size; i++) {
list.add(readGenericValue());
}
return list;
return readArrayList();
case 8:
int size8 = readVInt();
Object[] list8 = new Object[size8];
for (int i = 0; i < size8; i++) {
list8[i] = readGenericValue();
}
return list8;
return readArray();
case 9:
int size9 = readVInt();
Map map9 = new LinkedHashMap(size9);
for (int i = 0; i < size9; i++) {
map9.put(readString(), readGenericValue());
}
return map9;
return readLinkedHashMap();
case 10:
int size10 = readVInt();
Map map10 = new HashMap(size10);
for (int i = 0; i < size10; i++) {
map10.put(readString(), readGenericValue());
}
return map10;
return readHashMap();
case 11:
return readByte();
case 12:
return new Date(readLong());
return readDate();
case 13:
final String timeZoneId = readString();
return new DateTime(readLong(), DateTimeZone.forID(timeZoneId));
return readDateTime();
case 14:
return readBytesReference();
case 15:
@ -501,6 +477,59 @@ public abstract class StreamInput extends InputStream {
}
}
private byte[] fastReadByteArray() throws IOException {
int bytesSize = readVInt();
byte[] value = new byte[bytesSize];
readBytes(value, 0, bytesSize);
return value;
}
@SuppressWarnings("unchecked")
private List readArrayList() throws IOException {
int size = readVInt();
List list = new ArrayList(size);
for (int i = 0; i < size; i++) {
list.add(readGenericValue());
}
return list;
}
private DateTime readDateTime() throws IOException {
final String timeZoneId = readString();
return new DateTime(readLong(), DateTimeZone.forID(timeZoneId));
}
private Object[] readArray() throws IOException {
int size8 = readVInt();
Object[] list8 = new Object[size8];
for (int i = 0; i < size8; i++) {
list8[i] = readGenericValue();
}
return list8;
}
private Map readLinkedHashMap() throws IOException {
int size9 = readVInt();
Map map9 = new LinkedHashMap(size9);
for (int i = 0; i < size9; i++) {
map9.put(readString(), readGenericValue());
}
return map9;
}
private Map readHashMap() throws IOException {
int size10 = readVInt();
Map map10 = new HashMap(size10);
for (int i = 0; i < size10; i++) {
map10.put(readString(), readGenericValue());
}
return map10;
}
private Date readDate() throws IOException {
return new Date(readLong());
}
/**
* Reads a {@link GeoPoint} from this stream input
*/

View File

@ -48,7 +48,9 @@ import java.nio.file.FileSystemException;
import java.nio.file.FileSystemLoopException;
import java.nio.file.NoSuchFileException;
import java.nio.file.NotDirectoryException;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -401,100 +403,152 @@ public abstract class StreamOutput extends OutputStream {
writeGenericValue(map);
}
@FunctionalInterface
interface Writer {
void write(StreamOutput o, Object value) throws IOException;
}
private final static Map<Class<?>, Writer> WRITERS;
static {
Map<Class<?>, Writer> writers = new HashMap<>();
writers.put(String.class, (o, v) -> {
o.writeByte((byte) 0);
o.writeString((String) v);
});
writers.put(Integer.class, (o, v) -> {
o.writeByte((byte) 1);
o.writeInt((Integer) v);
});
writers.put(Long.class, (o, v) -> {
o.writeByte((byte) 2);
o.writeLong((Long) v);
});
writers.put(Float.class, (o, v) -> {
o.writeByte((byte) 3);
o.writeFloat((float) v);
});
writers.put(Double.class, (o, v) -> {
o.writeByte((byte) 4);
o.writeDouble((double) v);
});
writers.put(Boolean.class, (o, v) -> {
o.writeByte((byte) 5);
o.writeBoolean((boolean) v);
});
writers.put(byte[].class, (o, v) -> {
o.writeByte((byte) 6);
final byte[] bytes = (byte[]) v;
o.writeVInt(bytes.length);
o.writeBytes(bytes);
});
writers.put(List.class, (o, v) -> {
o.writeByte((byte) 7);
final List list = (List) v;
o.writeVInt(list.size());
for (Object item : list) {
o.writeGenericValue(item);
}
});
writers.put(Object[].class, (o, v) -> {
o.writeByte((byte) 8);
final Object[] list = (Object[]) v;
o.writeVInt(list.length);
for (Object item : list) {
o.writeGenericValue(item);
}
});
writers.put(Map.class, (o, v) -> {
if (v instanceof LinkedHashMap) {
o.writeByte((byte) 9);
} else {
o.writeByte((byte) 10);
}
@SuppressWarnings("unchecked")
final Map<String, Object> map = (Map<String, Object>) v;
o.writeVInt(map.size());
for (Map.Entry<String, Object> entry : map.entrySet()) {
o.writeString(entry.getKey());
o.writeGenericValue(entry.getValue());
}
});
writers.put(Byte.class, (o, v) -> {
o.writeByte((byte) 11);
o.writeByte((Byte) v);
});
writers.put(Date.class, (o, v) -> {
o.writeByte((byte) 12);
o.writeLong(((Date) v).getTime());
});
writers.put(ReadableInstant.class, (o, v) -> {
o.writeByte((byte) 13);
final ReadableInstant instant = (ReadableInstant) v;
o.writeString(instant.getZone().getID());
o.writeLong(instant.getMillis());
});
writers.put(BytesReference.class, (o, v) -> {
o.writeByte((byte) 14);
o.writeBytesReference((BytesReference) v);
});
writers.put(Text.class, (o, v) -> {
o.writeByte((byte) 15);
o.writeText((Text) v);
});
writers.put(Short.class, (o, v) -> {
o.writeByte((byte) 16);
o.writeShort((Short) v);
});
writers.put(int[].class, (o, v) -> {
o.writeByte((byte) 17);
o.writeIntArray((int[]) v);
});
writers.put(long[].class, (o, v) -> {
o.writeByte((byte) 18);
o.writeLongArray((long[]) v);
});
writers.put(float[].class, (o, v) -> {
o.writeByte((byte) 19);
o.writeFloatArray((float[]) v);
});
writers.put(double[].class, (o, v) -> {
o.writeByte((byte) 20);
o.writeDoubleArray((double[]) v);
});
writers.put(BytesRef.class, (o, v) -> {
o.writeByte((byte) 21);
o.writeBytesRef((BytesRef) v);
});
writers.put(GeoPoint.class, (o, v) -> {
o.writeByte((byte) 22);
o.writeGeoPoint((GeoPoint) v);
});
WRITERS = Collections.unmodifiableMap(writers);
}
public void writeGenericValue(@Nullable Object value) throws IOException {
if (value == null) {
writeByte((byte) -1);
return;
}
Class type = value.getClass();
if (type == String.class) {
writeByte((byte) 0);
writeString((String) value);
} else if (type == Integer.class) {
writeByte((byte) 1);
writeInt((Integer) value);
} else if (type == Long.class) {
writeByte((byte) 2);
writeLong((Long) value);
} else if (type == Float.class) {
writeByte((byte) 3);
writeFloat((Float) value);
} else if (type == Double.class) {
writeByte((byte) 4);
writeDouble((Double) value);
} else if (type == Boolean.class) {
writeByte((byte) 5);
writeBoolean((Boolean) value);
} else if (type == byte[].class) {
writeByte((byte) 6);
writeVInt(((byte[]) value).length);
writeBytes(((byte[]) value));
} else if (value instanceof List) {
writeByte((byte) 7);
List list = (List) value;
writeVInt(list.size());
for (Object o : list) {
writeGenericValue(o);
}
final Class type;
if (value instanceof List) {
type = List.class;
} else if (value instanceof Object[]) {
writeByte((byte) 8);
Object[] list = (Object[]) value;
writeVInt(list.length);
for (Object o : list) {
writeGenericValue(o);
}
type = Object[].class;
} else if (value instanceof Map) {
if (value instanceof LinkedHashMap) {
writeByte((byte) 9);
} else {
writeByte((byte) 10);
}
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) value;
writeVInt(map.size());
for (Map.Entry<String, Object> entry : map.entrySet()) {
writeString(entry.getKey());
writeGenericValue(entry.getValue());
}
} else if (type == Byte.class) {
writeByte((byte) 11);
writeByte((Byte) value);
} else if (type == Date.class) {
writeByte((byte) 12);
writeLong(((Date) value).getTime());
type = Map.class;
} else if (value instanceof ReadableInstant) {
writeByte((byte) 13);
writeString(((ReadableInstant) value).getZone().getID());
writeLong(((ReadableInstant) value).getMillis());
type = ReadableInstant.class;
} else if (value instanceof BytesReference) {
writeByte((byte) 14);
writeBytesReference((BytesReference) value);
} else if (value instanceof Text) {
writeByte((byte) 15);
writeText((Text) value);
} else if (type == Short.class) {
writeByte((byte) 16);
writeShort((Short) value);
} else if (type == int[].class) {
writeByte((byte) 17);
writeIntArray((int[]) value);
} else if (type == long[].class) {
writeByte((byte) 18);
writeLongArray((long[]) value);
} else if (type == float[].class) {
writeByte((byte) 19);
writeFloatArray((float[]) value);
} else if (type == double[].class) {
writeByte((byte) 20);
writeDoubleArray((double[]) value);
} else if (value instanceof BytesRef) {
writeByte((byte) 21);
writeBytesRef((BytesRef) value);
} else if (type == GeoPoint.class) {
writeByte((byte) 22);
writeGeoPoint((GeoPoint) value);
type = BytesReference.class;
} else {
throw new IOException("Can't write type [" + type + "]");
type = value.getClass();
}
final Writer writer = WRITERS.get(type);
if (writer != null) {
writer.write(this, value);
} else {
throw new IOException("can not write type [" + type + "]");
}
}

View File

@ -840,6 +840,10 @@ public abstract class Engine implements Closeable {
public long endTime() {
return this.endTime;
}
abstract String type();
abstract String id();
}
public static class Index extends Operation {
@ -863,10 +867,12 @@ public abstract class Engine implements Closeable {
return this.doc;
}
@Override
public String type() {
return this.doc.type();
}
@Override
public String id() {
return this.doc.id();
}
@ -929,10 +935,12 @@ public abstract class Engine implements Closeable {
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found());
}
@Override
public String type() {
return this.type;
}
@Override
public String id() {
return this.id;
}

View File

@ -314,7 +314,7 @@ public class InternalEngine extends Engine {
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
if (get.realtime()) {
VersionValue versionValue = versionMap.getUnderLock(get.uid().bytes());
VersionValue versionValue = versionMap.getUnderLock(get.uid());
if (versionValue != null) {
if (versionValue.delete()) {
return GetResult.NOT_EXISTS;
@ -336,6 +336,59 @@ public class InternalEngine extends Engine {
}
}
private boolean checkVersionConflict(
final Operation op,
final long currentVersion,
final long expectedVersion,
final boolean deleted) {
if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
if (op.origin().isRecovery()) {
// version conflict, but okay
return true;
} else {
// fatal version conflict
throw new VersionConflictEngineException(shardId, op.type(), op.id(),
op.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
}
}
return false;
}
private long checkDeletedAndGCed(VersionValue versionValue) {
long currentVersion;
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
return currentVersion;
}
private static VersionValueSupplier NEW_VERSION_VALUE = (u, t, l) -> new VersionValue(u, l);
@FunctionalInterface
private interface VersionValueSupplier {
VersionValue apply(long updatedVersion, long time, Translog.Location location);
}
private <T extends Engine.Operation> void maybeAddToTranslog(
final T op,
final long updatedVersion,
final Function<T, Translog.Operation> toTranslogOp,
final VersionValueSupplier toVersionValue) throws IOException {
if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op));
op.setTranslogLocation(translogLocation);
versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), op.getTranslogLocation()));
} else {
// we do not replay in to the translog, so there is no
// translog location; that is okay because real-time
// gets are not possible during recovery and we will
// flush when the recovery is complete
versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null));
}
}
@Override
public boolean index(Index index) {
final boolean created;
@ -361,58 +414,56 @@ public class InternalEngine extends Engine {
lastWriteNanos = index.startTime();
final long currentVersion;
final boolean deleted;
VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes());
final VersionValue versionValue = versionMap.getUnderLock(index.uid());
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(index.uid());
deleted = currentVersion == Versions.NOT_FOUND;
} else {
currentVersion = checkDeletedAndGCed(versionValue);
deleted = versionValue.delete();
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long expectedVersion = index.version();
if (isVersionConflictForWrites(index, currentVersion, deleted, expectedVersion)) {
if (!index.origin().isRecovery()) {
throw new VersionConflictEngineException(shardId, index.type(), index.id(),
index.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
}
return false;
}
long updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
final long expectedVersion = index.version();
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) return false;
final boolean created;
index.updateVersion(updatedVersion);
final long updatedVersion = updateVersion(index, currentVersion, expectedVersion);
if (currentVersion == Versions.NOT_FOUND) {
// document does not exists, we can optimize for create
created = true;
index(index, indexWriter);
} else {
created = update(index, versionValue, indexWriter);
}
final boolean created = indexOrUpdate(index, currentVersion, versionValue);
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location translogLocation = translog.add(new Translog.Index(index));
index.setTranslogLocation(translogLocation);
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, index.getTranslogLocation()));
} else {
// we do not replay in to the translog, so there is no
// translog location; that is okay because real-time
// gets are not possible during recovery and we will
// flush when the recovery is complete
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, null));
}
maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE);
return created;
}
}
private static boolean update(Index index, VersionValue versionValue, IndexWriter indexWriter) throws IOException {
boolean created;
private long updateVersion(Engine.Operation op, long currentVersion, long expectedVersion) {
final long updatedVersion = op.versionType().updateVersion(currentVersion, expectedVersion);
op.updateVersion(updatedVersion);
return updatedVersion;
}
private boolean indexOrUpdate(final Index index, final long currentVersion, final VersionValue versionValue) throws IOException {
final boolean created;
if (currentVersion == Versions.NOT_FOUND) {
// document does not exists, we can optimize for create
created = true;
index(index, indexWriter);
} else {
created = update(index, versionValue, indexWriter);
}
return created;
}
private static void index(final Index index, final IndexWriter indexWriter) throws IOException {
if (index.docs().size() > 1) {
indexWriter.addDocuments(index.docs());
} else {
indexWriter.addDocument(index.docs().get(0));
}
}
private static boolean update(final Index index, final VersionValue versionValue, final IndexWriter indexWriter) throws IOException {
final boolean created;
if (versionValue != null) {
created = versionValue.delete(); // we have a delete which is not GC'ed...
} else {
@ -426,18 +477,6 @@ public class InternalEngine extends Engine {
return created;
}
private static void index(Index index, IndexWriter indexWriter) throws IOException {
if (index.docs().size() > 1) {
indexWriter.addDocuments(index.docs());
} else {
indexWriter.addDocument(index.docs().get(0));
}
}
private boolean isVersionConflictForWrites(Index index, long currentVersion, boolean deleted, long expectedVersion) {
return index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted);
}
@Override
public void delete(Delete delete) throws EngineException {
try (ReleasableLock lock = readLock.acquire()) {
@ -465,59 +504,44 @@ public class InternalEngine extends Engine {
lastWriteNanos = delete.startTime();
final long currentVersion;
final boolean deleted;
VersionValue versionValue = versionMap.getUnderLock(delete.uid().bytes());
final VersionValue versionValue = versionMap.getUnderLock(delete.uid());
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(delete.uid());
deleted = currentVersion == Versions.NOT_FOUND;
} else {
currentVersion = checkDeletedAndGCed(versionValue);
deleted = versionValue.delete();
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long updatedVersion;
long expectedVersion = delete.version();
if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
if (delete.origin().isRecovery()) {
return;
} else {
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
delete.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
}
}
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
final boolean found;
if (currentVersion == Versions.NOT_FOUND) {
// doc does not exist and no prior deletes
found = false;
} else if (versionValue != null && versionValue.delete()) {
// a "delete on delete", in this case, we still increment the version, log it, and return that version
found = false;
} else {
// we deleted a currently existing document
indexWriter.deleteDocuments(delete.uid());
found = true;
}
final long expectedVersion = delete.version();
if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) return;
final long updatedVersion = updateVersion(delete, currentVersion, expectedVersion);
final boolean found = deleteIfFound(delete, currentVersion, deleted, versionValue);
delete.updateVersion(updatedVersion, found);
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
delete.setTranslogLocation(translogLocation);
versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), delete.getTranslogLocation()));
} else {
// we do not replay in to the translog, so there is no
// translog location; that is okay because real-time
// gets are not possible during recovery and we will
// flush when the recovery is complete
versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null));
}
maybeAddToTranslog(delete, updatedVersion, Translog.Delete::new, DeleteVersionValue::new);
}
}
private boolean deleteIfFound(Delete delete, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException {
final boolean found;
if (currentVersion == Versions.NOT_FOUND) {
// doc does not exist and no prior deletes
found = false;
} else if (versionValue != null && deleted) {
// a "delete on delete", in this case, we still increment the version, log it, and return that version
found = false;
} else {
// we deleted a currently existing document
indexWriter.deleteDocuments(delete.uid());
found = true;
}
return found;
}
@Override
public void refresh(String source) throws EngineException {
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.engine;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
@ -126,21 +127,21 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
}
/** Returns the live version (add or delete) for this uid. */
VersionValue getUnderLock(BytesRef uid) {
VersionValue getUnderLock(final Term uid) {
Maps currentMaps = maps;
// First try to get the "live" value:
VersionValue value = currentMaps.current.get(uid);
VersionValue value = currentMaps.current.get(uid.bytes());
if (value != null) {
return value;
}
value = currentMaps.old.get(uid);
value = currentMaps.old.get(uid.bytes());
if (value != null) {
return value;
}
return tombstones.get(uid);
return tombstones.get(uid.bytes());
}
/** Adds this uid/version to the pending adds map. */

View File

@ -92,23 +92,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
}
if (task instanceof CancellableTask) {
CancellableTask cancellableTask = (CancellableTask) task;
CancellableTaskHolder holder = new CancellableTaskHolder(cancellableTask);
CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder);
assert oldHolder == null;
// Check if this task was banned before we start it
if (task.getParentTaskId().isSet() && banedParents.isEmpty() == false) {
String reason = banedParents.get(task.getParentTaskId());
if (reason != null) {
try {
holder.cancel(reason);
throw new IllegalStateException("Task cancelled before it started: " + reason);
} finally {
// let's clean up the registration
unregister(task);
}
}
}
registerCancellableTask(task);
} else {
Task previousTask = tasks.put(task.getId(), task);
assert previousTask == null;
@ -116,6 +100,26 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
return task;
}
private void registerCancellableTask(Task task) {
CancellableTask cancellableTask = (CancellableTask) task;
CancellableTaskHolder holder = new CancellableTaskHolder(cancellableTask);
CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder);
assert oldHolder == null;
// Check if this task was banned before we start it
if (task.getParentTaskId().isSet() && banedParents.isEmpty() == false) {
String reason = banedParents.get(task.getParentTaskId());
if (reason != null) {
try {
holder.cancel(reason);
throw new IllegalStateException("Task cancelled before it started: " + reason);
} finally {
// let's clean up the registration
unregister(task);
}
}
}
}
/**
* Cancels a task
* <p>