SOLR-13742: Allow optional redaction of data saved by 'bin/solr autoscaling -save'.

Fix some unwanted side-effects in snapshots + add more robust unit tests.
This commit is contained in:
Andrzej Bialecki 2019-09-07 12:09:09 +02:00
parent da158ab229
commit 6f22bf0964
15 changed files with 13107 additions and 71 deletions

View File

@ -153,6 +153,8 @@ Improvements
* SOLR-13728: If a partial update (aka atomic update) is attempted on a document that has child docs, then ensure
the schema supports it (_root_ stored/docValues) by throwing an exception. (David Smiley)
* SOLR-13742: Allow optional redaction of data saved by 'bin/solr autoscaling -save'. (ab)
Bug Fixes
----------------------

View File

@ -16,6 +16,9 @@
*/
package org.apache.solr.cloud.autoscaling.sim;
import java.lang.invoke.MethodHandles;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@ -25,11 +28,13 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
import org.apache.solr.client.solrj.cloud.autoscaling.Cell;
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
import org.apache.solr.client.solrj.cloud.autoscaling.Row;
@ -41,11 +46,17 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.RedactionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Various utility methods useful for autoscaling simulations and snapshots.
*/
public class SimUtils {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final Set<String> COMMON_REPLICA_TAGS = new HashSet<>(Arrays.asList(
Variable.Type.CORE_IDX.metricsAttribute,
@ -231,10 +242,13 @@ public class SimUtils {
for (Row row : rows) {
Map<String, Object> nodeStat = nodeStats.computeIfAbsent(row.node, n -> new LinkedHashMap<>());
nodeStat.put("isLive", row.isLive());
nodeStat.put("freedisk", row.getVal("freedisk", 0));
nodeStat.put("totaldisk", row.getVal("totaldisk", 0));
for (Cell cell : row.getCells()) {
nodeStat.put(cell.getName(), cell.getValue());
}
// nodeStat.put("freedisk", row.getVal("freedisk", 0));
// nodeStat.put("totaldisk", row.getVal("totaldisk", 0));
int cores = ((Number)row.getVal("cores", 0)).intValue();
nodeStat.put("cores", cores);
// nodeStat.put("cores", cores);
coreStats.computeIfAbsent(cores, num -> new AtomicInteger()).incrementAndGet();
Map<String, Map<String, Map<String, Object>>> collReplicas = new TreeMap<>();
// check consistency
@ -351,4 +365,32 @@ public class SimUtils {
params.add(CoreAdminParams.ACTION, a);
return params;
}
/**
* Prepare collection and node / host names for redaction.
* @param clusterState cluster state
*/
public static RedactionUtils.RedactionContext getRedactionContext(ClusterState clusterState) {
RedactionUtils.RedactionContext ctx = new RedactionUtils.RedactionContext();
TreeSet<String> names = new TreeSet<>(clusterState.getLiveNodes());
for (String nodeName : names) {
String urlString = Utils.getBaseUrlForNodeName(nodeName, "http");
try {
URL u = new URL(urlString);
// protocol format
String hostPort = u.getHost() + ":" + u.getPort();
ctx.addName(u.getHost() + ":" + u.getPort(), RedactionUtils.NODE_REDACTION_PREFIX);
// node name format
ctx.addEquivalentName(hostPort, u.getHost() + "_" + u.getPort() + "_", RedactionUtils.NODE_REDACTION_PREFIX);
} catch (MalformedURLException e) {
log.warn("Invalid URL for node name " + nodeName + ", replacing including protocol and path", e);
ctx.addName(urlString, RedactionUtils.NODE_REDACTION_PREFIX);
ctx.addEquivalentName(urlString, Utils.getBaseUrlForNodeName(nodeName, "https"), RedactionUtils.NODE_REDACTION_PREFIX);
}
}
names.clear();
names.addAll(clusterState.getCollectionStates().keySet());
names.forEach(n -> ctx.addName(n, RedactionUtils.COLL_REDACTION_PREFIX));
return ctx;
}
}

View File

@ -46,11 +46,13 @@ import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
import org.apache.solr.client.solrj.request.V2Request;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ObjectCache;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.RedactionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -71,8 +73,9 @@ public class SnapshotCloudManager implements SolrCloudManager {
public static final String DISTRIB_STATE_KEY = "distribState";
public static final String AUTOSCALING_STATE_KEY = "autoscalingState";
public static final String STATISTICS_STATE_KEY = "statistics";
public static final String AUTOSCALING_JSON_KEY = "autoscaling";
private static final List<String> REQUIRED_KEYS = Arrays.asList(
public static final List<String> REQUIRED_KEYS = Arrays.asList(
MANAGER_STATE_KEY,
CLUSTER_STATE_KEY,
NODE_STATE_KEY,
@ -93,16 +96,25 @@ public class SnapshotCloudManager implements SolrCloudManager {
(Map<String, Object>)snapshot.getOrDefault(MANAGER_STATE_KEY, Collections.emptyMap()),
(Map<String, Object>)snapshot.getOrDefault(CLUSTER_STATE_KEY, Collections.emptyMap()),
(Map<String, Object>)snapshot.getOrDefault(NODE_STATE_KEY, Collections.emptyMap()),
(Map<String, Object>)snapshot.getOrDefault(DISTRIB_STATE_KEY, Collections.emptyMap())
(Map<String, Object>)snapshot.getOrDefault(DISTRIB_STATE_KEY, Collections.emptyMap()),
(Map<String, Object>)snapshot.getOrDefault(AUTOSCALING_JSON_KEY, Collections.emptyMap())
);
}
public void saveSnapshot(File targetDir, boolean withAutoscaling) throws Exception {
Map<String, Object> snapshot = getSnapshot(withAutoscaling);
public void saveSnapshot(File targetDir, boolean withAutoscaling, boolean redact) throws Exception {
Map<String, Object> snapshot = getSnapshot(withAutoscaling, redact);
ClusterState clusterState = getClusterStateProvider().getClusterState();
RedactionUtils.RedactionContext ctx = SimUtils.getRedactionContext(clusterState);
targetDir.mkdirs();
for (Map.Entry<String, Object> e : snapshot.entrySet()) {
FileOutputStream out = new FileOutputStream(new File(targetDir, e.getKey() + ".json"));
IOUtils.write(Utils.toJSON(e.getValue()), out);
if (redact) {
String data = Utils.toJSONString(e.getValue());
data = RedactionUtils.redactNames(ctx.getRedactions(), data);
IOUtils.write(data.getBytes("UTF-8"), out);
} else {
IOUtils.write(Utils.toJSON(e.getValue()), out);
}
out.flush();
out.close();
}
@ -116,15 +128,19 @@ public class SnapshotCloudManager implements SolrCloudManager {
throw new Exception("Source path is not a directory: " + sourceDir);
}
Map<String, Object> snapshot = new HashMap<>();
List<String> allKeys = new ArrayList<>(REQUIRED_KEYS);
allKeys.add(AUTOSCALING_JSON_KEY);
int validData = 0;
for (String key : REQUIRED_KEYS) {
for (String key : allKeys) {
File src = new File(sourceDir, key + ".json");
if (src.exists()) {
InputStream is = new FileInputStream(src);
Map<String, Object> data = (Map<String, Object>)Utils.fromJSON(is);
is.close();
snapshot.put(key, data);
validData++;
if (REQUIRED_KEYS.contains(key)) {
validData++;
}
}
}
if (validData < REQUIRED_KEYS.size()) {
@ -134,7 +150,7 @@ public class SnapshotCloudManager implements SolrCloudManager {
}
private void init(Map<String, Object> managerState, Map<String, Object> clusterState, Map<String, Object> nodeState,
Map<String, Object> distribState) throws Exception {
Map<String, Object> distribState, Map<String, Object> autoscalingJson) throws Exception {
Objects.requireNonNull(managerState);
Objects.requireNonNull(clusterState);
Objects.requireNonNull(nodeState);
@ -142,20 +158,24 @@ public class SnapshotCloudManager implements SolrCloudManager {
this.timeSource = TimeSource.get((String)managerState.getOrDefault("timeSource", "simTime:50"));
this.clusterStateProvider = new SnapshotClusterStateProvider(clusterState);
this.nodeStateProvider = new SnapshotNodeStateProvider(nodeState);
this.distribStateManager = new SnapshotDistribStateManager(distribState);
if (autoscalingJson == null || autoscalingJson.isEmpty()) {
this.distribStateManager = new SnapshotDistribStateManager(distribState);
} else {
this.distribStateManager = new SnapshotDistribStateManager(distribState, new AutoScalingConfig(autoscalingJson));
}
SimUtils.checkConsistency(this, null);
}
public Map<String, Object> getSnapshot(boolean withAutoscaling) throws Exception {
public Map<String, Object> getSnapshot(boolean withAutoscaling, boolean redact) throws Exception {
Map<String, Object> snapshot = new LinkedHashMap<>(4);
Map<String, Object> managerState = new HashMap<>();
managerState.put("timeSource", timeSource.toString());
snapshot.put(MANAGER_STATE_KEY, managerState);
RedactionUtils.RedactionContext ctx = redact ? SimUtils.getRedactionContext(clusterStateProvider.getClusterState()) : null;
snapshot.put(CLUSTER_STATE_KEY, clusterStateProvider.getSnapshot());
snapshot.put(NODE_STATE_KEY, nodeStateProvider.getSnapshot());
snapshot.put(DISTRIB_STATE_KEY, distribStateManager.getSnapshot());
snapshot.put(DISTRIB_STATE_KEY, distribStateManager.getSnapshot(ctx));
if (withAutoscaling) {
AutoScalingConfig config = distribStateManager.getAutoScalingConfig();
Policy.Session session = config.getPolicy().createSession(this);

View File

@ -18,11 +18,15 @@ package org.apache.solr.cloud.autoscaling.sim;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.solr.client.solrj.cloud.DistribStateManager;
@ -35,6 +39,7 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.AutoScalingParams;
import org.apache.solr.common.util.Base64;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.RedactionUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Op;
@ -73,6 +78,14 @@ public class SnapshotDistribStateManager implements DistribStateManager {
* @param snapshot previous snapshot created using this class.
*/
public SnapshotDistribStateManager(Map<String, Object> snapshot) {
this(snapshot, null);
}
/**
* Populate this instance from a previously generated snapshot.
* @param snapshot previous snapshot created using this class.
* @param config optional config to override the one from snapshot, may be null
*/
public SnapshotDistribStateManager(Map<String, Object> snapshot, AutoScalingConfig config) {
snapshot.forEach((path, value) -> {
Map<String, Object> map = (Map<String, Object>)value;
Number version = (Number)map.getOrDefault("version", 0);
@ -85,16 +98,34 @@ public class SnapshotDistribStateManager implements DistribStateManager {
}
dataMap.put(path, new VersionedData(version.intValue(), bytes, mode, owner));
});
if (config != null) { // overwrite existing
VersionedData vd = new VersionedData(config.getZkVersion(), Utils.toJSON(config), CreateMode.PERSISTENT, "0");
dataMap.put(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, vd);
}
log.debug("- loaded snapshot of {} resources", dataMap.size());
}
// content of these nodes is a UTF-8 String and it needs to be redacted
private static final Set<Pattern> REDACTED = new HashSet<>() {{
add(Pattern.compile("/aliases\\.json"));
add(Pattern.compile("/autoscaling\\.json"));
add(Pattern.compile("/clusterstate\\.json"));
add(Pattern.compile("/collections/.*?/state\\.json"));
add(Pattern.compile("/collections/.*?/leaders/shard.*?/leader"));
add(Pattern.compile("/overseer_elect/leader"));
}};
/**
* Create a snapshot of all content in this instance.
*/
public Map<String, Object> getSnapshot() {
public Map<String, Object> getSnapshot(RedactionUtils.RedactionContext ctx) {
Map<String, Object> snapshot = new LinkedHashMap<>();
dataMap.forEach((path, vd) -> {
Map<String, Object> data = new HashMap<>();
if (vd.getData() != null && ctx != null && REDACTED.stream().anyMatch(p -> p.matcher(path).matches())) {
String str = new String(vd.getData(), Charset.forName("UTF-8"));
str = RedactionUtils.redactNames(ctx.getRedactions(), str);
vd = new VersionedData(vd.getVersion(), str.getBytes(Charset.forName("UTF-8")), vd.getMode(), vd.getOwner());
}
vd.toMap(data);
snapshot.put(path, data);
});

View File

@ -158,12 +158,26 @@ public class SnapshotNodeStateProvider implements NodeStateProvider {
@Override
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
return nodeValues.getOrDefault(node, Collections.emptyMap());
return new LinkedHashMap<>(nodeValues.getOrDefault(node, Collections.emptyMap()));
}
@Override
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
return replicaInfos.getOrDefault(node, Collections.emptyMap());
Map<String, Map<String, List<ReplicaInfo>>> result = new LinkedHashMap<>();
Map<String, Map<String, List<ReplicaInfo>>> infos = replicaInfos.getOrDefault(node, Collections.emptyMap());
// deep copy
infos.forEach((coll, shards) -> {
shards.forEach((shard, replicas) -> {
replicas.forEach(ri -> {
List<ReplicaInfo> myReplicas = result
.computeIfAbsent(coll, c -> new LinkedHashMap<>())
.computeIfAbsent(shard, s -> new ArrayList<>());
ReplicaInfo myReplica = (ReplicaInfo)ri.clone();
myReplicas.add(myReplica);
});
});
});
return result;
}
public ReplicaInfo getReplicaInfo(String collection, String coreNode) {
@ -171,7 +185,7 @@ public class SnapshotNodeStateProvider implements NodeStateProvider {
for (List<ReplicaInfo> perShard : perNode.getOrDefault(collection, Collections.emptyMap()).values()) {
for (ReplicaInfo ri : perShard) {
if (ri.getName().equals(coreNode)) {
return ri;
return (ReplicaInfo)ri.clone();
}
}
}

View File

@ -17,16 +17,20 @@
package org.apache.solr.util;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.TreeMap;
import java.util.regex.Pattern;
public class RedactionUtils {
public static final String SOLR_REDACTION_SYSTEM_PATTERN_PROP = "solr.redaction.system.pattern";
private static Pattern pattern = Pattern.compile(System.getProperty(SOLR_REDACTION_SYSTEM_PATTERN_PROP, ".*password.*"), Pattern.CASE_INSENSITIVE);
private static final String REDACT_STRING = "--REDACTED--";
public static final String NODE_REDACTION_PREFIX = "N_";
public static final String COLL_REDACTION_PREFIX = "COLL_";
private static boolean redactSystemProperty = Boolean.parseBoolean(System.getProperty("solr.redaction.system.enabled", "true"));
@ -52,27 +56,72 @@ public class RedactionUtils {
}
/**
* Replace actual names found in a string with meaningless randomized names.
* @param names actual names
* @param redactionPrefix prefix to use for redacted names
* @param data string to redact
* @return redacted string where all actual names have been replaced.
* A helper class to build unique mappings from original to redacted names.
*/
public static String redactNames(Collection<String> names, String redactionPrefix, String data) {
Set<String> uniqueNames = new TreeSet<>(names);
Set<Integer> uniqueCode = new HashSet<>();
// minimal(ish) hash
int codeShift = 0;
int codeSpace = names.size();
for (String name : uniqueNames) {
public static final class RedactionContext {
private Map<String, String> redactions = new HashMap<>();
Map<String, Set<Integer>> uniqueCodes = new HashMap<>();
// minimal(ish) hash per prefix
Map<String, Integer> codeSpaces = new HashMap<>();
/**
* Add a name to be redacted.
* @param name original name
* @param redactionPrefix prefix for the redacted name
*/
public void addName(String name, String redactionPrefix) {
if (redactions.containsKey(name)) {
return;
}
int codeSpace = codeSpaces.computeIfAbsent(redactionPrefix, p -> 4);
int code = Math.abs(name.hashCode() % codeSpace);
Set<Integer> uniqueCode = uniqueCodes.computeIfAbsent(redactionPrefix, p -> new HashSet<>());
while (uniqueCode.contains(code)) {
codeShift++;
codeSpace = names.size() << codeShift;
codeSpace = codeSpace << 1;
codeSpaces.put(redactionPrefix, codeSpace);
code = Math.abs(name.hashCode() % codeSpace);
}
uniqueCode.add(code);
data = data.replaceAll("\\Q" + name + "\\E", redactionPrefix + Integer.toString(code, Character.MAX_RADIX));
redactions.put(name, redactionPrefix + Integer.toString(code, Character.MAX_RADIX));
}
/**
* Add a name that needs to be mapped to the same redacted format as another one.
* @param original original name already mapped (will be added automatically if missing)
* @param equivalent another name that needs to be mapped to the same redacted name
* @param redactionPrefix prefix for the redacted name
*/
public void addEquivalentName(String original, String equivalent, String redactionPrefix) {
if (!redactions.containsKey(original)) {
addName(original, redactionPrefix);
}
String redaction = redactions.get(original);
redactions.put(equivalent, redaction);
}
/**
* Get a map of original to redacted names.
*/
public Map<String, String> getRedactions() {
return redactions;
}
}
/**
* Replace actual names found in a string with redacted names.
* @param redactions a map of original to redacted names
* @param data string to redact
* @return redacted string where all actual names have been replaced.
*/
public static String redactNames(Map<String, String> redactions, String data) {
// replace the longest first to avoid partial replacements
Map<String, String> sorted = new TreeMap<>(Comparator
.comparing(String::length)
.reversed()
.thenComparing(String::compareTo));
sorted.putAll(redactions);
for (Map.Entry<String, String> entry : sorted.entrySet()) {
data = data.replaceAll("\\Q" + entry.getKey() + "\\E", entry.getValue());
}
return data;
}

View File

@ -26,7 +26,6 @@ import java.io.InputStream;
import java.io.PrintStream;
import java.lang.invoke.MethodHandles;
import java.net.ConnectException;
import java.net.MalformedURLException;
import java.net.Socket;
import java.net.SocketException;
import java.net.URL;
@ -863,8 +862,6 @@ public class SolrCLI implements CLIO {
public static class AutoscalingTool extends ToolBase {
static final String NODE_REDACTION_PREFIX = "N_";
static final String COLL_REDACTION_PREFIX = "COLL_";
public AutoscalingTool() {
this(CLIO.getOutStream());
@ -987,9 +984,10 @@ public class SolrCLI implements CLIO {
}
}
}
boolean redact = cli.hasOption("r");
if (cli.hasOption("save")) {
File targetDir = new File(cli.getOptionValue("save"));
cloudManager.saveSnapshot(targetDir, true);
cloudManager.saveSnapshot(targetDir, true, redact);
CLIO.err("- saved autoscaling snapshot to " + targetDir.getAbsolutePath());
}
HashSet<String> liveNodes = new HashSet<>();
@ -999,7 +997,6 @@ public class SolrCLI implements CLIO {
boolean withSortedNodes = cli.hasOption("n");
boolean withClusterState = cli.hasOption("c");
boolean withStats = cli.hasOption("stats");
boolean redact = cli.hasOption("r");
if (cli.hasOption("all")) {
withSuggestions = true;
withDiagnostics = true;
@ -1008,25 +1005,11 @@ public class SolrCLI implements CLIO {
withStats = true;
}
// prepare to redact also host names / IPs in base_url and other properties
Set<String> redactNames = new HashSet<>();
for (String nodeName : liveNodes) {
String urlString = Utils.getBaseUrlForNodeName(nodeName, "http");
try {
URL u = new URL(urlString);
// protocol format
redactNames.add(u.getHost() + ":" + u.getPort());
// node name format
redactNames.add(u.getHost() + "_" + u.getPort() + "_");
} catch (MalformedURLException e) {
log.warn("Invalid URL for node name " + nodeName + ", replacing including protocol and path", e);
redactNames.add(urlString);
redactNames.add(Utils.getBaseUrlForNodeName(nodeName, "https"));
}
}
// redact collection names too
Set<String> redactCollections = new HashSet<>();
ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
clusterState.forEachCollection(coll -> redactCollections.add(coll.getName()));
RedactionUtils.RedactionContext ctx = null;
if (redact) {
ctx = SimUtils.getRedactionContext(clusterState);
}
if (!withSuggestions && !withDiagnostics) {
withSuggestions = true;
}
@ -1044,13 +1027,12 @@ public class SolrCLI implements CLIO {
}
Map<String, Object> simulationResults = new HashMap<>();
simulate(cloudManager, config, simulationResults, saveSimulated, withClusterState,
withStats, withSuggestions, withSortedNodes, withDiagnostics, iterations);
withStats, withSuggestions, withSortedNodes, withDiagnostics, iterations, redact);
results.put("simulation", simulationResults);
}
String data = Utils.toJSONString(results);
if (redact) {
data = RedactionUtils.redactNames(redactCollections, COLL_REDACTION_PREFIX, data);
data = RedactionUtils.redactNames(redactNames, NODE_REDACTION_PREFIX, data);
data = RedactionUtils.redactNames(ctx.getRedactions(), data);
}
stdout.println(data);
}
@ -1115,7 +1097,7 @@ public class SolrCLI implements CLIO {
boolean withStats,
boolean withSuggestions,
boolean withSortedNodes,
boolean withDiagnostics, int iterations) throws Exception {
boolean withDiagnostics, int iterations, boolean redact) throws Exception {
File saveDir = null;
if (saveSimulated != null) {
saveDir = new File(saveSimulated);
@ -1146,10 +1128,10 @@ public class SolrCLI implements CLIO {
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(simCloudManager, config);
if (saveDir != null) {
File target = new File(saveDir, "step" + loop + "_start");
snapshotCloudManager.saveSnapshot(target, true);
snapshotCloudManager.saveSnapshot(target, true, redact);
}
if (verbose) {
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(false);
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(false, redact);
snapshot.remove(SnapshotCloudManager.DISTRIB_STATE_KEY);
snapshot.remove(SnapshotCloudManager.MANAGER_STATE_KEY);
perStep.put("snapshotStart", snapshot);
@ -1215,10 +1197,10 @@ public class SolrCLI implements CLIO {
snapshotCloudManager = new SnapshotCloudManager(simCloudManager, config);
if (saveDir != null) {
File target = new File(saveDir, "step" + loop + "_stop");
snapshotCloudManager.saveSnapshot(target, true);
snapshotCloudManager.saveSnapshot(target, true, redact);
}
if (verbose) {
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(false);
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(false, redact);
snapshot.remove(SnapshotCloudManager.DISTRIB_STATE_KEY);
snapshot.remove(SnapshotCloudManager.MANAGER_STATE_KEY);
perStep.put("snapshotStop", snapshot);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,206 @@
{
"/clusterstate.json":{
"owner":"0",
"mode":"PERSISTENT",
"version":0},
"/live_nodes":{
"owner":"0",
"mode":"PERSISTENT",
"version":0},
"/live_nodes/N_11_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_65p_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_8_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_74_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1i_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_4g_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_2_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_a_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1c_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_16_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_6c_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_v_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_3a_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1d_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_u_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_7_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_t_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_6i_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_d4_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_17_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1f_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_do_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_3_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_303_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_29_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_9o_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_7e_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1m_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_4_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_m_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_dj_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_e_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_13_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_g_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_2w_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_z_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_cs_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_3a7_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_aw_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_0_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_3to_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_4f_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_1h_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_2u_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_b9_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_6_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/live_nodes/N_5_solr":{
"owner":"0",
"mode":"EPHEMERAL",
"version":0},
"/autoscaling.json":{
"owner":"0",
"mode":"PERSISTENT",
"data":"ewogICJjbHVzdGVyLXByZWZlcmVuY2VzIjpbCiAgICB7CiAgICAgICJtaW5pbWl6ZSI6ImNvcmVzIiwKICAgICAgInByZWNpc2lvbiI6MX0sCiAgICB7CiAgICAgICJtYXhpbWl6ZSI6ImZyZWVkaXNrIiwKICAgICAgInByZWNpc2lvbiI6MTB9XSwKICAiY2x1c3Rlci1wb2xpY3kiOlsKICAgIHsKICAgICAgInJlcGxpY2EiOiIjQUxMIiwKICAgICAgImNvbGxlY3Rpb24iOiJDT0xMXzIiLAogICAgICAic3lzcHJvcC5wb29sIjoicG9vbC0wMSJ9LAogICAgewogICAgICAicmVwbGljYSI6IiNBTEwiLAogICAgICAiY29sbGVjdGlvbiI6IkNPTExfMSIsCiAgICAgICJzeXNwcm9wLnBvb2wiOiJwb29sLTAyIn0sCiAgICB7CiAgICAgICJyZXBsaWNhIjoiI0FMTCIsCiAgICAgICJjb2xsZWN0aW9uIjoiQ09MTF8wIiwKICAgICAgInN5c3Byb3AucG9vbCI6InBvb2wtMDIifSwKICAgIHsKICAgICAgInJlcGxpY2EiOiI8MiIsCiAgICAgICJzaGFyZCI6IiNFQUNIIiwKICAgICAgIm5vZGUiOiIjQU5ZIn0sCiAgICB7CiAgICAgICJyZXBsaWNhIjoiI0VRVUFMIiwKICAgICAgInNoYXJkIjoiI0VBQ0giLAogICAgICAic3lzcHJvcC5heiI6IiNFQUNIIn1dLAogICJ0cmlnZ2VycyI6e30sCiAgImxpc3RlbmVycyI6e30sCiAgInByb3BlcnRpZXMiOnt9fQ==",
"version":0}}

View File

@ -0,0 +1 @@
{"timeSource":"SimTimeSource:50.0"}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,11 +17,14 @@
package org.apache.solr.cloud.autoscaling.sim;
import java.io.File;
import java.io.FileInputStream;
import java.lang.invoke.MethodHandles;
import java.nio.charset.Charset;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -31,10 +34,14 @@ import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils;
import org.apache.solr.client.solrj.cloud.DistribStateManager;
import org.apache.solr.client.solrj.cloud.NodeStateProvider;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
import org.apache.solr.client.solrj.cloud.autoscaling.Suggestion;
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.SolrCloudTestCase;
@ -66,6 +73,10 @@ public class TestSnapshotCloudManager extends SolrCloudTestCase {
.configure();
CollectionAdminRequest.createCollection(CollectionAdminParams.SYSTEM_COLL, null, 1, 2, 0, 1)
.process(cluster.getSolrClient());
CollectionAdminRequest.createCollection("coll1", null, 1, 1)
.process(cluster.getSolrClient());
CollectionAdminRequest.createCollection("coll10", null, 1, 1)
.process(cluster.getSolrClient());
realManager = cluster.getJettySolrRunner(cluster.getJettySolrRunners().size() - 1).getCoreContainer()
.getZkController().getSolrCloudManager();
}
@ -73,7 +84,7 @@ public class TestSnapshotCloudManager extends SolrCloudTestCase {
@Test
public void testSnapshots() throws Exception {
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null);
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(true);
Map<String, Object> snapshot = snapshotCloudManager.getSnapshot(true, false);
SnapshotCloudManager snapshotCloudManager1 = new SnapshotCloudManager(snapshot);
SimSolrCloudTestCase.assertClusterStateEquals(realManager.getClusterStateProvider().getClusterState(), snapshotCloudManager.getClusterStateProvider().getClusterState());
SimSolrCloudTestCase.assertClusterStateEquals(realManager.getClusterStateProvider().getClusterState(), snapshotCloudManager1.getClusterStateProvider().getClusterState());
@ -88,19 +99,52 @@ public class TestSnapshotCloudManager extends SolrCloudTestCase {
Path tmpPath = createTempDir();
File tmpDir = tmpPath.toFile();
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null);
snapshotCloudManager.saveSnapshot(tmpDir, true);
snapshotCloudManager.saveSnapshot(tmpDir, true, false);
SnapshotCloudManager snapshotCloudManager1 = SnapshotCloudManager.readSnapshot(tmpDir);
SimSolrCloudTestCase.assertClusterStateEquals(snapshotCloudManager.getClusterStateProvider().getClusterState(), snapshotCloudManager1.getClusterStateProvider().getClusterState());
assertNodeStateProvider(snapshotCloudManager, snapshotCloudManager1);
assertDistribStateManager(snapshotCloudManager.getDistribStateManager(), snapshotCloudManager1.getDistribStateManager());
}
@Test
public void testRedaction() throws Exception {
Path tmpPath = createTempDir();
File tmpDir = tmpPath.toFile();
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null);
Set<String> redacted = new HashSet<>(realManager.getClusterStateProvider().getLiveNodes());
redacted.addAll(realManager.getClusterStateProvider().getClusterState().getCollectionStates().keySet());
snapshotCloudManager.saveSnapshot(tmpDir, true, true);
for (String key : SnapshotCloudManager.REQUIRED_KEYS) {
File src = new File(tmpDir, key + ".json");
assertTrue(src.toString() + " doesn't exist", src.exists());
String data = IOUtils.toString(new FileInputStream(src), Charset.forName("UTF-8"));
assertFalse("empty data in " + src, data.trim().isEmpty());
for (String redactedName : redacted) {
assertFalse("redacted name " + redactedName + " found in " + src, data.contains(redactedName));
}
}
}
@Test
public void testComplexSnapshot() throws Exception {
File snapshotDir = new File(TEST_HOME(), "simSnapshot");
SnapshotCloudManager snapshotCloudManager = SnapshotCloudManager.readSnapshot(snapshotDir);
assertEquals(48, snapshotCloudManager.getClusterStateProvider().getLiveNodes().size());
assertEquals(16, snapshotCloudManager.getClusterStateProvider().getClusterState().getCollectionStates().size());
try (SimCloudManager simCloudManager = SimCloudManager.createCluster(snapshotCloudManager, null, TimeSource.get("simTime:50"))) {
List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(simCloudManager.getDistribStateManager().getAutoScalingConfig(), simCloudManager);
assertEquals(1, suggestions.size());
Suggester.SuggestionInfo suggestion = suggestions.get(0);
assertEquals(Suggestion.Type.improvement.toString(), suggestion.toMap(new HashMap<>()).get("type").toString());
}
}
@Test
public void testSimulatorFromSnapshot() throws Exception {
Path tmpPath = createTempDir();
File tmpDir = tmpPath.toFile();
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null);
snapshotCloudManager.saveSnapshot(tmpDir, true);
snapshotCloudManager.saveSnapshot(tmpDir, true, false);
SnapshotCloudManager snapshotCloudManager1 = SnapshotCloudManager.readSnapshot(tmpDir);
try (SimCloudManager simCloudManager = SimCloudManager.createCluster(snapshotCloudManager1, null, TimeSource.get("simTime:50"))) {
SimSolrCloudTestCase.assertClusterStateEquals(snapshotCloudManager.getClusterStateProvider().getClusterState(), simCloudManager.getClusterStateProvider().getClusterState());

View File

@ -88,7 +88,7 @@ public class ReplicaInfo implements MapWriter {
}
public Object clone() {
return new ReplicaInfo(name, core, collection, shard, type, node, variables);
return new ReplicaInfo(name, core, collection, shard, type, node, new HashMap<>(variables));
}
@Override