SOLR-8995: Use lambda to simplify CollectionsHandler

This commit is contained in:
Noble Paul 2016-07-20 22:00:55 +05:30
parent 9aa639d45e
commit fb4de6adb1
1 changed files with 380 additions and 479 deletions

View File

@ -37,7 +37,6 @@ import org.apache.commons.lang.StringUtils;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
@ -204,7 +203,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
log.info("Invoked Collection Action :{} with params {} and sendToOCPQueue={}", action.toLower(), req.getParamString(), operation.sendToOCPQueue);
SolrResponse response = null;
Map<String, Object> props = operation.call(req, rsp, this);
Map<String, Object> props = operation.execute(req, rsp, this);
String asyncId = req.getParams().get(ASYNC);
if (props != null) {
if (asyncId != null) {
@ -335,7 +334,34 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
public static final String SYSTEM_COLL = ".system";
enum CollectionOperation {
private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL, zk);
try {
String path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/schema.xml";
byte[] data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSchema.xml"));
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/solrconfig.xml";
data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSolrConfig.xml"));
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
}
private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
status.add("state", state.getKey());
status.add("msg", msg);
results.add("status", status);
}
enum CollectionOperation implements CollectionOp {
/**
* very simple currently, you can pass a template collection, and the new collection is created on
* every node the template collection is on
@ -343,284 +369,194 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
* we might also want to think about error handling (add the request to a zk queue and involve overseer?)
* as well as specific replicas= options
*/
CREATE_OP(CREATE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
throws KeeperException, InterruptedException {
Map<String, Object> props = req.getParams().required().getAll(null, NAME);
props.put("fromApi", "true");
req.getParams().getAll(props,
REPLICATION_FACTOR,
COLL_CONF,
NUM_SLICES,
MAX_SHARDS_PER_NODE,
CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE,
SHARDS_PROP,
STATE_FORMAT,
AUTO_ADD_REPLICAS,
RULE,
SNITCH);
CREATE_OP(CREATE, (req, rsp, h) -> {
Map<String, Object> props = req.getParams().required().getAll(null, NAME);
props.put("fromApi", "true");
req.getParams().getAll(props,
REPLICATION_FACTOR,
COLL_CONF,
NUM_SLICES,
MAX_SHARDS_PER_NODE,
CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE,
SHARDS_PROP,
STATE_FORMAT,
AUTO_ADD_REPLICAS,
RULE,
SNITCH);
if (props.get(STATE_FORMAT) == null) {
props.put(STATE_FORMAT, "2");
}
addMapObject(props, RULE);
addMapObject(props, SNITCH);
verifyRuleParams(h.coreContainer, props);
final String collectionName = SolrIdentifierValidator.validateCollectionName((String)props.get(NAME));
final String shardsParam = (String) props.get(SHARDS_PROP);
if (StringUtils.isNotEmpty(shardsParam)) {
verifyShardsParam(shardsParam);
}
if (SYSTEM_COLL.equals(collectionName)) {
//We must always create a .system collection with only a single shard
props.put(NUM_SLICES, 1);
props.remove(SHARDS_PROP);
createSysConfigSet(h.coreContainer);
}
copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
return copyPropertiesWithPrefix(req.getParams(), props, "router.");
if (props.get(STATE_FORMAT) == null) {
props.put(STATE_FORMAT, "2");
}
addMapObject(props, RULE);
addMapObject(props, SNITCH);
verifyRuleParams(h.coreContainer, props);
final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
final String shardsParam = (String) props.get(SHARDS_PROP);
if (StringUtils.isNotEmpty(shardsParam)) {
verifyShardsParam(shardsParam);
}
if (SYSTEM_COLL.equals(collectionName)) {
//We must always create a .system collection with only a single shard
props.put(NUM_SLICES, 1);
props.remove(SHARDS_PROP);
createSysConfigSet(h.coreContainer);
}
copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
return copyPropertiesWithPrefix(req.getParams(), props, "router.");
private void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL, zk);
}),
DELETE_OP(DELETE, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
try {
String path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/schema.xml";
byte[] data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSchema.xml"));
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/solrconfig.xml";
data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSolrConfig.xml"));
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
RELOAD_OP(RELOAD, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
}
SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
String collection = req.getParams().required().get("collection");
String shard = req.getParams().required().get("shard");
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
DocCollection docCollection = clusterState.getCollection(collection);
ZkNodeProps leaderProps = docCollection.getLeader(shard);
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()).build()) {
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
RequestSyncShard reqSyncShard = new RequestSyncShard();
reqSyncShard.setCollection(collection);
reqSyncShard.setShard(shard);
reqSyncShard.setCoreName(nodeProps.getCoreName());
client.request(reqSyncShard);
}
},
DELETE_OP(DELETE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws Exception {
return req.getParams().required().getAll(null, NAME);
return null;
}),
CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
final String aliasName = SolrIdentifierValidator.validateAliasName(req.getParams().get(NAME));
return req.getParams().required().getAll(null, NAME, "collections");
}),
DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true, (req, rsp, h) -> {
String name = req.getParams().required().get(COLLECTION_PROP);
// TODO : add support for multiple shards
String shard = req.getParams().get(SHARD_ID_PROP);
String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
String splitKey = req.getParams().get("split.key");
if (splitKey == null && shard == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
}
},
RELOAD_OP(RELOAD) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws Exception {
return req.getParams().required().getAll(null, NAME);
if (splitKey != null && shard != null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Only one of 'shard' or 'split.key' should be specified");
}
},
SYNCSHARD_OP(SYNCSHARD) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
throws Exception {
String collection = req.getParams().required().get("collection");
String shard = req.getParams().required().get("shard");
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
DocCollection docCollection = clusterState.getCollection(collection);
ZkNodeProps leaderProps = docCollection.getLeader(shard);
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()).build()) {
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
RequestSyncShard reqSyncShard = new CoreAdminRequest.RequestSyncShard();
reqSyncShard.setCollection(collection);
reqSyncShard.setShard(shard);
reqSyncShard.setCoreName(nodeProps.getCoreName());
client.request(reqSyncShard);
}
return null;
if (splitKey != null && rangesStr != null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Only one of 'ranges' or 'split.key' should be specified");
}
},
CREATEALIAS_OP(CREATEALIAS) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws Exception {
final String aliasName = SolrIdentifierValidator.validateAliasName(req.getParams().get(NAME));
return req.getParams().required().getAll(null, NAME, "collections");
}
},
DELETEALIAS_OP(DELETEALIAS) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws Exception {
return req.getParams().required().getAll(null, NAME);
Map<String, Object> map = req.getParams().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
"split.key",
CoreAdminParams.RANGES);
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
}),
DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP);
req.getParams().getAll(map,
DELETE_INDEX,
DELETE_DATA_DIR,
DELETE_INSTANCE_DIR);
return map;
}),
FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
forceLeaderElection(req, h);
return null;
}),
CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP);
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(req.getParams().get(COLLECTION_PROP)).get(DOC_ROUTER)).get(NAME)))
throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
req.getParams().getAll(map,
REPLICATION_FACTOR,
CREATE_NODE_SET);
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
}),
DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
REPLICA_PROP);
req.getParams().getAll(map,
DELETE_INDEX,
DELETE_DATA_DIR,
DELETE_INSTANCE_DIR);
return req.getParams().getAll(map, ONLY_IF_DOWN);
}),
MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null, COLLECTION_PROP, "split.key", "target.collection");
return req.getParams().getAll(map, "forward.timeout");
}),
ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
if (!KNOWN_ROLES.contains(map.get("role")))
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
return map;
}),
REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
if (!KNOWN_ROLES.contains(map.get("role")))
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
return map;
}),
CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
String name = req.getParams().required().get(NAME);
String val = req.getParams().get(VALUE_LONG);
ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
cp.setClusterProperty(name, val);
return null;
}),
REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
req.getParams().required().check(REQUESTID);
final CoreContainer coreContainer1 = h.coreContainer;
final String requestId = req.getParams().get(REQUESTID);
final ZkController zkController = coreContainer1.getZkController();
final NamedList<Object> results = new NamedList<>();
if (zkController.getOverseerCompletedMap().contains(requestId)) {
final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks");
} else if (zkController.getOverseerFailureMap().contains(requestId)) {
final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks");
} else if (zkController.getOverseerRunningMap().contains(requestId)) {
addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks");
} else if (h.overseerCollectionQueueContains(requestId)) {
addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks");
} else {
addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
}
},
SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
throws Exception {
String name = req.getParams().required().get(COLLECTION_PROP);
// TODO : add support for multiple shards
String shard = req.getParams().get(SHARD_ID_PROP);
String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
String splitKey = req.getParams().get("split.key");
if (splitKey == null && shard == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
}
if (splitKey != null && shard != null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Only one of 'shard' or 'split.key' should be specified");
}
if (splitKey != null && rangesStr != null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Only one of 'ranges' or 'split.key' should be specified");
}
Map<String, Object> map = req.getParams().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
"split.key",
CoreAdminParams.RANGES);
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
}
},
DELETESHARD_OP(DELETESHARD) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP);
req.getParams().getAll(map,
DELETE_INDEX,
DELETE_DATA_DIR,
DELETE_INSTANCE_DIR);
return map;
}
},
FORCELEADER_OP(FORCELEADER) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
forceLeaderElection(req, handler);
return null;
}
},
CREATESHARD_OP(CREATESHARD) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP);
ClusterState clusterState = handler.coreContainer.getZkController().getClusterState();
final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(req.getParams().get(COLLECTION_PROP)).get(DOC_ROUTER)).get(NAME)))
throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
req.getParams().getAll(map,
REPLICATION_FACTOR,
CREATE_NODE_SET);
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
}
},
DELETEREPLICA_OP(DELETEREPLICA) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
REPLICA_PROP);
req.getParams().getAll(map,
DELETE_INDEX,
DELETE_DATA_DIR,
DELETE_INSTANCE_DIR);
return req.getParams().getAll(map, ONLY_IF_DOWN);
}
},
MIGRATE_OP(MIGRATE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null, COLLECTION_PROP, "split.key", "target.collection");
return req.getParams().getAll(map, "forward.timeout");
}
},
ADDROLE_OP(ADDROLE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
if (!KNOWN_ROLES.contains(map.get("role")))
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
return map;
}
},
REMOVEROLE_OP(REMOVEROLE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
if (!KNOWN_ROLES.contains(map.get("role")))
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
return map;
}
},
CLUSTERPROP_OP(CLUSTERPROP) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
String name = req.getParams().required().get(NAME);
String val = req.getParams().get(VALUE_LONG);
ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
cp.setClusterProperty(name, val);
return null;
}
},
REQUESTSTATUS_OP(REQUESTSTATUS) {
final SolrResponse response = new OverseerSolrResponse(results);
rsp.getValues().addAll(response.getResponse());
return null;
}),
DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
@SuppressWarnings("unchecked")
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
req.getParams().required().check(REQUESTID);
final CoreContainer coreContainer = h.coreContainer;
final String requestId = req.getParams().get(REQUESTID);
final ZkController zkController = coreContainer.getZkController();
final NamedList<Object> results = new NamedList<>();
if (zkController.getOverseerCompletedMap().contains(requestId)) {
final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks");
} else if (zkController.getOverseerFailureMap().contains(requestId)) {
final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks");
} else if (zkController.getOverseerRunningMap().contains(requestId)) {
addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks");
} else if (h.overseerCollectionQueueContains(requestId)) {
addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks");
} else {
addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
}
final SolrResponse response = new OverseerSolrResponse(results);
rsp.getValues().addAll(response.getResponse());
return null;
}
private void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
status.add("state", state.getKey());
status.add("msg", msg);
results.add("status", status);
}
},
DELETESTATUS_OP(DELETESTATUS) {
@SuppressWarnings("unchecked")
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
final CoreContainer coreContainer = h.coreContainer;
final String requestId = req.getParams().get(REQUESTID);
final ZkController zkController = coreContainer.getZkController();
@ -652,263 +588,217 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
return null;
}
},
ADDREPLICA_OP(ADDREPLICA) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
throws Exception {
Map<String, Object> props = req.getParams().getAll(null,
COLLECTION_PROP,
"node",
SHARD_ID_PROP,
_ROUTE_,
CoreAdminParams.NAME,
INSTANCE_DIR,
DATA_DIR);
return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
}
},
OVERSEERSTATUS_OP(OVERSEERSTATUS) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
return new LinkedHashMap<>();
}
},
}),
ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
Map<String, Object> props = req.getParams().getAll(null,
COLLECTION_PROP,
"node",
SHARD_ID_PROP,
_ROUTE_,
CoreAdminParams.NAME,
INSTANCE_DIR,
DATA_DIR);
return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
}),
OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> (Map) new LinkedHashMap<>()),
/**
* Handle list collection request.
* Do list collection request to zk host
*/
LIST_OP(LIST) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
NamedList<Object> results = new NamedList<>();
Map<String, DocCollection> collections = handler.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
List<String> collectionList = new ArrayList<>(collections.keySet());
results.add("collections", collectionList);
SolrResponse response = new OverseerSolrResponse(results);
rsp.getValues().addAll(response.getResponse());
return null;
}
},
LIST_OP(LIST, (req, rsp, h) -> {
NamedList<Object> results = new NamedList<>();
Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
List<String> collectionList = new ArrayList<>(collections.keySet());
results.add("collections", collectionList);
SolrResponse response = new OverseerSolrResponse(results);
rsp.getValues().addAll(response.getResponse());
return null;
}),
/**
* Handle cluster status request.
* Can return status per specific collection/shard or per all collections.
*/
CLUSTERSTATUS_OP(CLUSTERSTATUS) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws KeeperException, InterruptedException {
Map<String, Object> all = req.getParams().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
_ROUTE_);
new ClusterStatus(handler.coreContainer.getZkController().getZkStateReader(),
new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
return null;
CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
Map<String, Object> all = req.getParams().getAll(null,
COLLECTION_PROP,
SHARD_ID_PROP,
_ROUTE_);
new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
return null;
}),
ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP,
SHARD_ID_PROP,
REPLICA_PROP,
PROPERTY_VALUE_PROP);
req.getParams().getAll(map, SHARD_UNIQUE);
String property = (String) map.get(PROPERTY_PROP);
if (!property.startsWith(COLL_PROP_PREFIX)) {
property = COLL_PROP_PREFIX + property;
}
},
ADDREPLICAPROP_OP(ADDREPLICAPROP) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP,
SHARD_ID_PROP,
REPLICA_PROP,
PROPERTY_VALUE_PROP);
req.getParams().getAll(map, SHARD_UNIQUE);
String property = (String) map.get(PROPERTY_PROP);
if (!property.startsWith(COLL_PROP_PREFIX)) {
property = COLL_PROP_PREFIX + property;
}
boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
// Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
// in a slice on properties that are known to only be one-per-slice and error out if so.
if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
uniquePerSlice == false) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Overseer replica property command received for property " + property +
" with the " + SHARD_UNIQUE +
" parameter set to something other than 'true'. No action taken.");
}
return map;
// Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
// in a slice on properties that are known to only be one-per-slice and error out if so.
if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
uniquePerSlice == false) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Overseer replica property command received for property " + property +
" with the " + SHARD_UNIQUE +
" parameter set to something other than 'true'. No action taken.");
}
},
DELETEREPLICAPROP_OP(DELETEREPLICAPROP) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP,
SHARD_ID_PROP,
REPLICA_PROP);
return req.getParams().getAll(map, PROPERTY_PROP);
return map;
}),
DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP,
SHARD_ID_PROP,
REPLICA_PROP);
return req.getParams().getAll(map, PROPERTY_PROP);
}),
BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP);
Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
prop = COLL_PROP_PREFIX + prop;
}
},
BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> map = req.getParams().required().getAll(null,
COLLECTION_PROP,
PROPERTY_PROP);
Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
prop = COLL_PROP_PREFIX + prop;
}
if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
+ " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
" Property: " + prop + " shardUnique: " + Boolean.toString(shardUnique));
}
return req.getParams().getAll(map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
+ " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
" Property: " + prop + " shardUnique: " + Boolean.toString(shardUnique));
}
},
REBALANCELEADERS_OP(REBALANCELEADERS) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
new RebalanceLeaders(req,rsp,h).execute();
return null;
return req.getParams().getAll(map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
}),
REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
new RebalanceLeaders(req, rsp, h).execute();
return null;
}),
MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
Map<String, Object> m = req.getParams().getAll(null, MODIFIABLE_COLL_PROPS);
if (m.isEmpty()) throw new SolrException(ErrorCode.BAD_REQUEST,
formatString("no supported values provided rule, snitch, maxShardsPerNode, replicationFactor, collection.configName"));
req.getParams().required().getAll(m, COLLECTION_PROP);
addMapObject(m, RULE);
addMapObject(m, SNITCH);
for (String prop : MODIFIABLE_COLL_PROPS) DocCollection.verifyProp(m, prop);
verifyRuleParams(h.coreContainer, m);
return m;
}),
MIGRATESTATEFORMAT_OP(MIGRATESTATEFORMAT, (req, rsp, h) -> req.getParams().required().getAll(null, COLLECTION_PROP)),
BACKUP_OP(BACKUP, (req, rsp, h) -> {
req.getParams().required().check(NAME, COLLECTION_PROP);
String collectionName = req.getParams().get(COLLECTION_PROP);
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
if (!clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
}
},
MODIFYCOLLECTION_OP(MODIFYCOLLECTION) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
Map<String, Object> m = req.getParams().getAll(null, MODIFIABLE_COLL_PROPS);
if (m.isEmpty()) throw new SolrException(ErrorCode.BAD_REQUEST,
formatString("no supported values provided rule, snitch, maxShardsPerNode, replicationFactor, collection.configName"));
req.getParams().required().getAll(m, COLLECTION_PROP);
addMapObject(m, RULE);
addMapObject(m, SNITCH);
for (String prop : MODIFIABLE_COLL_PROPS) DocCollection.verifyProp(m, prop);
verifyRuleParams(h.coreContainer, m);
return m;
}
},
MIGRATESTATEFORMAT_OP(MIGRATESTATEFORMAT) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
throws Exception {
return req.getParams().required().getAll(null, COLLECTION_PROP);
}
},
BACKUP_OP(BACKUP) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
req.getParams().required().check(NAME, COLLECTION_PROP);
CoreContainer cc = h.coreContainer;
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
String collectionName = req.getParams().get(COLLECTION_PROP);
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
if (!clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
}
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
if (location == null) {
//Refresh the cluster property file to make sure the value set for location is the latest
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
CoreContainer cc = h.coreContainer;
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
// Check if the location is specified in the cluster property.
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
if (location == null) {
//Refresh the cluster property file to make sure the value set for location is the latest
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
// Check if the location is specified in the cluster property.
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
if (location == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
+ " parameter or as a default repository property or as a cluster property.");
}
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
+ " parameter or as a default repository property or as a cluster property.");
}
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
}
} catch (IOException ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
}
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
return params;
}
},
RESTORE_OP(RESTORE) {
@Override
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
req.getParams().required().check(NAME, COLLECTION_PROP);
String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
//We always want to restore into an collection name which doesn't exist yet.
if (clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
}
} catch (IOException ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
}
CoreContainer cc = h.coreContainer;
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
return params;
}),
RESTORE_OP(RESTORE, (req, rsp, h) -> {
req.getParams().required().check(NAME, COLLECTION_PROP);
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
//We always want to restore into an collection name which doesn't exist yet.
if (clusterState.hasCollection(collectionName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
}
CoreContainer cc = h.coreContainer;
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
if (location == null) {
//Refresh the cluster property file to make sure the value set for location is the latest
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
// Check if the location is specified in the cluster property.
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", null);
if (location == null) {
//Refresh the cluster property file to make sure the value set for location is the latest
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
// Check if the location is specified in the cluster property.
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", null);
if (location == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
+ " parameter or as a default repository property or as a cluster property.");
}
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
+ " parameter or as a default repository property or as a cluster property.");
}
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
}
} catch (IOException ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
}
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
// from CREATE_OP:
req.getParams().getAll(params, COLL_CONF, REPLICATION_FACTOR, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS);
copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
return params;
}
};
// Check if the specified location is valid for this repository.
URI uri = repository.createURI(location);
try {
if (!repository.exists(uri)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
}
} catch (IOException ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
}
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
params.put(CoreAdminParams.BACKUP_LOCATION, location);
// from CREATE_OP:
req.getParams().getAll(params, COLL_CONF, REPLICATION_FACTOR, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS);
copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
return params;
});
public final CollectionOp fun;
CollectionAction action;
long timeOut;
boolean sendToOCPQueue;
CollectionOperation(CollectionAction action) {
this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true);
CollectionOperation(CollectionAction action, CollectionOp fun) {
this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true, fun);
}
CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue) {
CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue, CollectionOp fun) {
this.action = action;
this.timeOut = timeOut;
this.sendToOCPQueue = sendToOCPQueue;
this.fun = fun;
}
/**
* All actions must implement this method. If a non null map is returned , the action name is added to
* the map and sent to overseer for processing. If it returns a null, the call returns immediately
*/
abstract Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
public static CollectionOperation get(CollectionAction action) {
for (CollectionOperation op : values()) {
@ -916,6 +806,12 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
throw new SolrException(ErrorCode.SERVER_ERROR, "No such action" + action);
}
@Override
public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
throws Exception {
return fun.execute(req, rsp, h);
}
}
private static void forceLeaderElection(SolrQueryRequest req, CollectionsHandler handler) {
@ -1096,6 +992,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
}
interface CollectionOp {
Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
}
public static final List<String> MODIFIABLE_COLL_PROPS = Arrays.asList(
RULE,
SNITCH,