Merge branch 'master' into close-index-api-refactoring

This commit is contained in:
Tanguy Leroux 2019-01-09 10:52:46 +01:00
commit 096a83183e
6 changed files with 39 additions and 27 deletions

View File

@ -51,10 +51,6 @@ public class SnapshotException extends ElasticsearchException {
}
}
public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg) {
this(repositoryName, snapshotId, msg, null);
}
public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg, final Throwable cause) {
super("[" + repositoryName + ":" + snapshotId + "] " + msg, cause);
this.repositoryName = repositoryName;

View File

@ -131,25 +131,17 @@ public final class SnapshotId implements Comparable<SnapshotId>, Writeable, ToXC
}
public static SnapshotId fromXContent(XContentParser parser) throws IOException {
// the new format from 5.0 which contains the snapshot name and uuid
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
String name = null;
String uuid = null;
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
String currentFieldName = parser.currentName();
parser.nextToken();
if (NAME.equals(currentFieldName)) {
name = parser.text();
} else if (UUID.equals(currentFieldName)) {
uuid = parser.text();
}
String name = null;
String uuid = null;
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
String currentFieldName = parser.currentName();
parser.nextToken();
if (NAME.equals(currentFieldName)) {
name = parser.text();
} else if (UUID.equals(currentFieldName)) {
uuid = parser.text();
}
return new SnapshotId(name, uuid);
} else {
// the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
final String name = parser.text();
return new SnapshotId(name, name);
}
return new SnapshotId(name, uuid);
}
}

View File

@ -94,7 +94,7 @@ import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE
public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener {
private static final Logger logger = LogManager.getLogger(SnapshotShardsService.class);
public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status";
private static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status";
private final ClusterService clusterService;

View File

@ -515,7 +515,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
cleanupAfterError(e);
}
public void onNoLongerMaster(String source) {
public void onNoLongerMaster() {
userCreateSnapshotListener.onFailure(e);
}
@ -1073,7 +1073,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onNoLongerMaster(String source) {
if (listener != null) {
listener.onNoLongerMaster(source);
listener.onNoLongerMaster();
}
}
@ -1423,8 +1423,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
builder.put(shardId,
new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
} else if (primary.relocating() || primary.initializing()) {
// The WAITING state was introduced in V1.2.0 -
// don't use it if there are nodes with older version in the cluster
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING));
} else if (!primary.started()) {
builder.put(shardId,

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.cluster.coordination;
import com.carrotsearch.randomizedtesting.RandomizedContext;
import org.apache.logging.log4j.CloseableThreadContext;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -70,6 +71,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Function;
@ -128,6 +130,24 @@ public class CoordinatorTests extends ESTestCase {
resetPortCounter();
}
// check that runRandomly leads to reproducible results
public void testRepeatableTests() throws Exception {
final Callable<Long> test = () -> {
final Cluster cluster = new Cluster(randomIntBetween(1, 5));
cluster.runRandomly();
final long afterRunRandomly = value(cluster.getAnyNode().getLastAppliedClusterState());
cluster.stabilise();
final long afterStabilisation = value(cluster.getAnyNode().getLastAppliedClusterState());
return afterRunRandomly ^ afterStabilisation;
};
final long seed = randomLong();
logger.info("First run with seed [{}]", seed);
final long result1 = RandomizedContext.current().runWithPrivateRandomness(seed, test);
logger.info("Second run with seed [{}]", seed);
final long result2 = RandomizedContext.current().runWithPrivateRandomness(seed, test);
assertEquals(result1, result2);
}
public void testCanUpdateClusterStateAfterStabilisation() {
final Cluster cluster = new Cluster(randomIntBetween(1, 5));
cluster.runRandomly();

View File

@ -72,6 +72,9 @@ public class TimeoutCheckerTests extends FileStructureTestCase {
} finally {
TimeoutChecker.watchdog.unregister();
}
} finally {
// ensure the interrupted flag is cleared to stop it making subsequent tests fail
Thread.interrupted();
}
}
@ -89,6 +92,9 @@ public class TimeoutCheckerTests extends FileStructureTestCase {
assertEquals("Aborting grok captures test during [should timeout] as it has taken longer than the timeout of [" +
timeout + "]", e.getMessage());
});
} finally {
// ensure the interrupted flag is cleared to stop it making subsequent tests fail
Thread.interrupted();
}
}
}