mirror of https://github.com/apache/lucene.git
SOLR-13292: Provide extended per-segment status of a collection.
This commit is contained in:
parent
83ab355772
commit
f6f5f995ef
|
@ -53,6 +53,8 @@ New Features
|
|||
|
||||
* SOLR-13271: Read-only mode for SolrCloud collections (ab, shalin)
|
||||
|
||||
* SOLR-13292: Provide extended per-segment status of a collection. (ab)
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.handler.admin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.io.SolrClientCache;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.RoutingRule;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Report low-level details of collection.
|
||||
*/
|
||||
public class ColStatus {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final ClusterState clusterState;
|
||||
private final ZkNodeProps props;
|
||||
private final SolrClientCache solrClientCache;
|
||||
|
||||
public static final String CORE_INFO_PROP = SegmentsInfoRequestHandler.WITH_CORE_INFO;
|
||||
public static final String FIELD_INFO_PROP = SegmentsInfoRequestHandler.WITH_FIELD_INFO;
|
||||
public static final String SIZE_INFO_PROP = SegmentsInfoRequestHandler.WITH_SIZE_INFO;
|
||||
public static final String SEGMENTS_PROP = "segments";
|
||||
|
||||
public ColStatus(HttpClient httpClient, ClusterState clusterState, ZkNodeProps props) {
|
||||
this.props = props;
|
||||
this.solrClientCache = new SolrClientCache(httpClient);
|
||||
this.clusterState = clusterState;
|
||||
}
|
||||
|
||||
public void getColStatus(NamedList<Object> results) {
|
||||
Collection<String> collections;
|
||||
String col = props.getStr(ZkStateReader.COLLECTION_PROP);
|
||||
if (col == null) {
|
||||
collections = new HashSet<>(clusterState.getCollectionsMap().keySet());
|
||||
} else {
|
||||
collections = Collections.singleton(col);
|
||||
}
|
||||
boolean withFieldInfo = props.getBool(FIELD_INFO_PROP, false);
|
||||
boolean withSegments = props.getBool(SEGMENTS_PROP, false);
|
||||
boolean withCoreInfo = props.getBool(CORE_INFO_PROP, false);
|
||||
boolean withSizeInfo = props.getBool(SIZE_INFO_PROP, false);
|
||||
if (withFieldInfo || withSizeInfo) {
|
||||
withSegments = true;
|
||||
}
|
||||
for (String collection : collections) {
|
||||
DocCollection coll = clusterState.getCollectionOrNull(collection);
|
||||
if (coll == null) {
|
||||
continue;
|
||||
}
|
||||
SimpleOrderedMap<Object> colMap = new SimpleOrderedMap<>();
|
||||
colMap.add("stateFormat", coll.getStateFormat());
|
||||
colMap.add("znodeVersion", coll.getZNodeVersion());
|
||||
Map<String, Object> props = new TreeMap<>(coll.getProperties());
|
||||
props.remove("shards");
|
||||
colMap.add("properties", props);
|
||||
colMap.add("activeShards", coll.getActiveSlices().size());
|
||||
colMap.add("inactiveShards", coll.getSlices().size() - coll.getActiveSlices().size());
|
||||
results.add(collection, colMap);
|
||||
|
||||
Set<String> nonCompliant = new TreeSet<>();
|
||||
|
||||
SimpleOrderedMap<Object> shards = new SimpleOrderedMap<>();
|
||||
for (Slice s : coll.getSlices()) {
|
||||
SimpleOrderedMap<Object> sliceMap = new SimpleOrderedMap<>();
|
||||
shards.add(s.getName(), sliceMap);
|
||||
SimpleOrderedMap<Object> replicaMap = new SimpleOrderedMap<>();
|
||||
int totalReplicas = s.getReplicas().size();
|
||||
int activeReplicas = 0;
|
||||
int downReplicas = 0;
|
||||
int recoveringReplicas = 0;
|
||||
int recoveryFailedReplicas = 0;
|
||||
for (Replica r : s.getReplicas()) {
|
||||
switch (r.getState()) {
|
||||
case ACTIVE:
|
||||
activeReplicas++;
|
||||
break;
|
||||
case DOWN:
|
||||
downReplicas++;
|
||||
break;
|
||||
case RECOVERING:
|
||||
recoveringReplicas++;
|
||||
break;
|
||||
case RECOVERY_FAILED:
|
||||
recoveryFailedReplicas++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
replicaMap.add("total", totalReplicas);
|
||||
replicaMap.add("active", activeReplicas);
|
||||
replicaMap.add("down", downReplicas);
|
||||
replicaMap.add("recovering", recoveringReplicas);
|
||||
replicaMap.add("recovery_failed", recoveryFailedReplicas);
|
||||
sliceMap.add("state", s.getState().toString());
|
||||
sliceMap.add("range", s.getRange().toString());
|
||||
Map<String, RoutingRule> rules = s.getRoutingRules();
|
||||
if (rules != null && !rules.isEmpty()) {
|
||||
sliceMap.add("routingRules", rules);
|
||||
}
|
||||
sliceMap.add("replicas", replicaMap);
|
||||
Replica leader = s.getLeader();
|
||||
if (leader == null) { // pick the first one
|
||||
leader = s.getReplicas().size() > 0 ? s.getReplicas().iterator().next() : null;
|
||||
}
|
||||
if (leader == null) {
|
||||
continue;
|
||||
}
|
||||
SimpleOrderedMap<Object> leaderMap = new SimpleOrderedMap<>();
|
||||
sliceMap.add("leader", leaderMap);
|
||||
leaderMap.add("coreNode", leader.getName());
|
||||
leaderMap.addAll(leader.getProperties());
|
||||
String url = ZkCoreNodeProps.getCoreUrl(leader);
|
||||
try (SolrClient client = solrClientCache.getHttpSolrClient(url)) {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.add(CommonParams.QT, "/admin/segments");
|
||||
params.add(FIELD_INFO_PROP, "true");
|
||||
params.add(CORE_INFO_PROP, String.valueOf(withCoreInfo));
|
||||
params.add(SIZE_INFO_PROP, String.valueOf(withSizeInfo));
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
NamedList<Object> rsp = client.request(req);
|
||||
rsp.remove("responseHeader");
|
||||
leaderMap.add("segInfos", rsp);
|
||||
NamedList<Object> segs = (NamedList<Object>)rsp.get("segments");
|
||||
if (segs != null) {
|
||||
for (Map.Entry<String, Object> entry : segs) {
|
||||
NamedList<Object> fields = (NamedList<Object>)((NamedList<Object>)entry.getValue()).get("fields");
|
||||
if (fields != null) {
|
||||
for (Map.Entry<String, Object> fEntry : fields) {
|
||||
Object nc = ((NamedList<Object>)fEntry.getValue()).get("nonCompliant");
|
||||
if (nc != null) {
|
||||
nonCompliant.add(fEntry.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!withFieldInfo) {
|
||||
((NamedList<Object>)entry.getValue()).remove("fields");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!withSegments) {
|
||||
rsp.remove("segments");
|
||||
}
|
||||
if (!withFieldInfo) {
|
||||
rsp.remove("fieldInfoLegend");
|
||||
}
|
||||
} catch (SolrServerException | IOException e) {
|
||||
log.warn("Error getting details of replica segments from " + url, e);
|
||||
}
|
||||
}
|
||||
if (nonCompliant.isEmpty()) {
|
||||
nonCompliant.add("(NONE)");
|
||||
}
|
||||
colMap.add("schemaNonCompliant", nonCompliant);
|
||||
colMap.add("shards", shards);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -520,6 +520,22 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
return copyPropertiesWithPrefix(req.getParams(), props, "router.");
|
||||
|
||||
}),
|
||||
COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
|
||||
Map<String, Object> props = copy(req.getParams(), null,
|
||||
COLLECTION_PROP,
|
||||
ColStatus.CORE_INFO_PROP,
|
||||
ColStatus.SEGMENTS_PROP,
|
||||
ColStatus.FIELD_INFO_PROP,
|
||||
ColStatus.SIZE_INFO_PROP);
|
||||
// make sure we can get the name if there's "name" but not "collection"
|
||||
if (props.containsKey(CoreAdminParams.NAME) && !props.containsKey(COLLECTION_PROP)) {
|
||||
props.put(COLLECTION_PROP, props.get(CoreAdminParams.NAME));
|
||||
}
|
||||
new ColStatus(h.coreContainer.getUpdateShardHandler().getDefaultHttpClient(),
|
||||
h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
|
||||
.getColStatus(rsp.getValues());
|
||||
return null;
|
||||
}),
|
||||
DELETE_OP(DELETE, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
|
||||
|
||||
RELOAD_OP(RELOAD, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
|
||||
|
|
|
@ -17,72 +17,187 @@
|
|||
package org.apache.solr.handler.admin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafMetaData;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.MergePolicy.MergeSpecification;
|
||||
import org.apache.lucene.index.MergePolicy.OneMerge;
|
||||
import org.apache.lucene.index.MergeTrigger;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.solr.common.luke.FieldFlag;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.handler.RequestHandlerBase;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.update.SolrIndexWriter;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.lucene.index.IndexOptions.DOCS;
|
||||
import static org.apache.lucene.index.IndexOptions.DOCS_AND_FREQS;
|
||||
import static org.apache.lucene.index.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
|
||||
import static org.apache.solr.common.params.CommonParams.NAME;
|
||||
|
||||
/**
|
||||
* This handler exposes information about last commit generation segments
|
||||
*/
|
||||
public class SegmentsInfoRequestHandler extends RequestHandlerBase {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String WITH_FIELD_INFO = "fieldInfo";
|
||||
public static final String WITH_CORE_INFO = "coreInfo";
|
||||
public static final String WITH_SIZE_INFO = "sizeInfo";
|
||||
|
||||
private static final List<String> FI_LEGEND;
|
||||
|
||||
static {
|
||||
FI_LEGEND = Arrays.asList(
|
||||
FieldFlag.INDEXED.toString(),
|
||||
FieldFlag.DOC_VALUES.toString(),
|
||||
"xxx - DocValues type",
|
||||
FieldFlag.TERM_VECTOR_STORED.toString(),
|
||||
FieldFlag.OMIT_NORMS.toString(),
|
||||
FieldFlag.OMIT_TF.toString(),
|
||||
FieldFlag.OMIT_POSITIONS.toString(),
|
||||
FieldFlag.STORE_OFFSETS_WITH_POSITIONS.toString(),
|
||||
"p - field has payloads",
|
||||
"s - field uses soft deletes",
|
||||
":x:x:x - point data dim : index dim : num bytes");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp)
|
||||
throws Exception {
|
||||
rsp.add("segments", getSegmentsInfo(req, rsp));
|
||||
getSegmentsInfo(req, rsp);
|
||||
rsp.setHttpCaching(false);
|
||||
}
|
||||
|
||||
private SimpleOrderedMap<Object> getSegmentsInfo(SolrQueryRequest req, SolrQueryResponse rsp)
|
||||
private static final double GB = 1024.0 * 1024.0 * 1024.0;
|
||||
|
||||
private void getSegmentsInfo(SolrQueryRequest req, SolrQueryResponse rsp)
|
||||
throws Exception {
|
||||
boolean withFieldInfo = req.getParams().getBool(WITH_FIELD_INFO, false);
|
||||
boolean withCoreInfo = req.getParams().getBool(WITH_CORE_INFO, false);
|
||||
boolean withSizeInfo = req.getParams().getBool(WITH_SIZE_INFO, false);
|
||||
SolrIndexSearcher searcher = req.getSearcher();
|
||||
|
||||
SegmentInfos infos =
|
||||
SegmentInfos.readLatestCommit(searcher.getIndexReader().directory());
|
||||
|
||||
List<String> mergeCandidates = getMergeCandidatesNames(req, infos);
|
||||
|
||||
SimpleOrderedMap<Object> segmentInfos = new SimpleOrderedMap<>();
|
||||
|
||||
SolrCore core = req.getCore();
|
||||
RefCounted<IndexWriter> iwRef = core.getSolrCoreState().getIndexWriter(core);
|
||||
SimpleOrderedMap<Object> infosInfo = new SimpleOrderedMap<>();
|
||||
Version minVersion = infos.getMinSegmentLuceneVersion();
|
||||
if (minVersion != null) {
|
||||
infosInfo.add("minSegmentLuceneVersion", minVersion.toString());
|
||||
}
|
||||
Version commitVersion = infos.getCommitLuceneVersion();
|
||||
if (commitVersion != null) {
|
||||
infosInfo.add("commitLuceneVersion", commitVersion.toString());
|
||||
}
|
||||
infosInfo.add("numSegments", infos.size());
|
||||
infosInfo.add("segmentsFileName", infos.getSegmentsFileName());
|
||||
infosInfo.add("totalMaxDoc", infos.totalMaxDoc());
|
||||
infosInfo.add("userData", infos.userData);
|
||||
if (withCoreInfo) {
|
||||
SimpleOrderedMap<Object> coreInfo = new SimpleOrderedMap<>();
|
||||
infosInfo.add("core", coreInfo);
|
||||
coreInfo.add("startTime", core.getStartTimeStamp().getTime() + "(" + core.getStartTimeStamp() + ")");
|
||||
coreInfo.add("dataDir", core.getDataDir());
|
||||
coreInfo.add("indexDir", core.getIndexDir());
|
||||
coreInfo.add("sizeInGB", (double)core.getIndexSize() / GB);
|
||||
|
||||
if (iwRef != null) {
|
||||
try {
|
||||
IndexWriter iw = iwRef.get();
|
||||
String iwConfigStr = iw.getConfig().toString();
|
||||
SimpleOrderedMap<Object> iwConfig = new SimpleOrderedMap<>();
|
||||
// meh ...
|
||||
String[] lines = iwConfigStr.split("\\n");
|
||||
for (String line : lines) {
|
||||
String[] parts = line.split("=");
|
||||
if (parts.length < 2) {
|
||||
continue;
|
||||
}
|
||||
iwConfig.add(parts[0], parts[1]);
|
||||
}
|
||||
coreInfo.add("indexWriterConfig", iwConfig);
|
||||
} finally {
|
||||
iwRef.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
SimpleOrderedMap<Object> segmentInfo = null;
|
||||
List<SegmentCommitInfo> sortable = new ArrayList<>();
|
||||
sortable.addAll(infos.asList());
|
||||
// Order by the number of live docs. The display is logarithmic so it is a little jumbled visually
|
||||
sortable.sort((s1, s2) -> {
|
||||
return (s2.info.maxDoc() - s2.getDelCount()) - (s1.info.maxDoc() - s1.getDelCount());
|
||||
});
|
||||
sortable.sort((s1, s2) ->
|
||||
(s2.info.maxDoc() - s2.getDelCount()) - (s1.info.maxDoc() - s1.getDelCount())
|
||||
);
|
||||
|
||||
List<String> mergeCandidates = new ArrayList<>();
|
||||
SimpleOrderedMap<Object> runningMerges = getMergeInformation(req, infos, mergeCandidates);
|
||||
List<LeafReaderContext> leafContexts = searcher.getIndexReader().leaves();
|
||||
IndexSchema schema = req.getSchema();
|
||||
for (SegmentCommitInfo segmentCommitInfo : sortable) {
|
||||
segmentInfo = getSegmentInfo(segmentCommitInfo);
|
||||
segmentInfo = getSegmentInfo(segmentCommitInfo, withSizeInfo, withFieldInfo, leafContexts, schema);
|
||||
if (mergeCandidates.contains(segmentCommitInfo.info.name)) {
|
||||
segmentInfo.add("mergeCandidate", true);
|
||||
}
|
||||
segmentInfos.add((String) segmentInfo.get(NAME), segmentInfo);
|
||||
}
|
||||
|
||||
return segmentInfos;
|
||||
rsp.add("info", infosInfo);
|
||||
if (runningMerges.size() > 0) {
|
||||
rsp.add("runningMerges", runningMerges);
|
||||
}
|
||||
if (withFieldInfo) {
|
||||
rsp.add("fieldInfoLegend", FI_LEGEND);
|
||||
}
|
||||
rsp.add("segments", segmentInfos);
|
||||
}
|
||||
|
||||
private SimpleOrderedMap<Object> getSegmentInfo(
|
||||
SegmentCommitInfo segmentCommitInfo) throws IOException {
|
||||
SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
|
||||
List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
|
||||
SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();
|
||||
|
||||
segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
|
||||
segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
|
||||
segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
|
||||
segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
|
||||
segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
|
||||
segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
|
||||
Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
|
||||
|
@ -91,15 +206,224 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
|
|||
segmentInfoMap.add("source",
|
||||
segmentCommitInfo.info.getDiagnostics().get("source"));
|
||||
segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
|
||||
// don't open a new SegmentReader - try to find the right one from the leaf contexts
|
||||
SegmentReader seg = null;
|
||||
for (LeafReaderContext lrc : leafContexts) {
|
||||
LeafReader leafReader = lrc.reader();
|
||||
// unwrap
|
||||
while (leafReader instanceof FilterLeafReader) {
|
||||
leafReader = ((FilterLeafReader)leafReader).getDelegate();
|
||||
}
|
||||
if (leafReader instanceof SegmentReader) {
|
||||
SegmentReader sr = (SegmentReader)leafReader;
|
||||
if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
|
||||
seg = sr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (seg != null) {
|
||||
LeafMetaData metaData = seg.getMetaData();
|
||||
if (metaData != null) {
|
||||
segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
|
||||
segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
|
||||
if (metaData.getSort() != null) {
|
||||
segmentInfoMap.add("sort", metaData.getSort().toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
|
||||
segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
|
||||
}
|
||||
if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
|
||||
segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
|
||||
}
|
||||
if (withSizeInfo) {
|
||||
Directory dir = segmentCommitInfo.info.dir;
|
||||
List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
|
||||
.map(f -> {
|
||||
long size = -1;
|
||||
try {
|
||||
size = dir.fileLength(f);
|
||||
} catch (IOException e) {
|
||||
}
|
||||
return new Pair<String, Long>(f, size);
|
||||
}).sorted((p1, p2) -> {
|
||||
if (p1.second() > p2.second()) {
|
||||
return -1;
|
||||
} else if (p1.second() < p2.second()) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}).collect(Collectors.toList());
|
||||
if (!files.isEmpty()) {
|
||||
SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
|
||||
for (int i = 0; i < Math.min(files.size(), 5); i++) {
|
||||
Pair<String, Long> p = files.get(i);
|
||||
topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
|
||||
}
|
||||
segmentInfoMap.add("largestFiles", topFiles);
|
||||
}
|
||||
}
|
||||
if (seg != null && withSizeInfo) {
|
||||
SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
|
||||
ram.add("total", seg.ramBytesUsed());
|
||||
for (Accountable ac : seg.getChildResources()) {
|
||||
accountableToMap(ac, ram::add);
|
||||
}
|
||||
segmentInfoMap.add("ramBytesUsed", ram);
|
||||
}
|
||||
if (withFieldInfos) {
|
||||
if (seg == null) {
|
||||
log.debug("Skipping segment info - not available as a SegmentReader: " + segmentCommitInfo);
|
||||
} else {
|
||||
FieldInfos fis = seg.getFieldInfos();
|
||||
SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
|
||||
for (FieldInfo fi : fis) {
|
||||
fields.add(fi.name, getFieldInfo(seg, fi, schema));
|
||||
}
|
||||
segmentInfoMap.add("fields", fields);
|
||||
}
|
||||
}
|
||||
|
||||
return segmentInfoMap;
|
||||
}
|
||||
|
||||
private List<String> getMergeCandidatesNames(SolrQueryRequest req, SegmentInfos infos) throws IOException {
|
||||
List<String> result = new ArrayList<String>();
|
||||
private void accountableToMap(Accountable accountable, BiConsumer<String, Object> consumer) {
|
||||
Collection<Accountable> children = accountable.getChildResources();
|
||||
if (children != null && !children.isEmpty()) {
|
||||
LinkedHashMap<String, Object> map = new LinkedHashMap<>();
|
||||
map.put("total", accountable.ramBytesUsed());
|
||||
for (Accountable child : children) {
|
||||
accountableToMap(child, map::put);
|
||||
}
|
||||
consumer.accept(accountable.toString(), map);
|
||||
} else {
|
||||
consumer.accept(accountable.toString(), accountable.ramBytesUsed());
|
||||
}
|
||||
}
|
||||
|
||||
private SimpleOrderedMap<Object> getFieldInfo(SegmentReader reader, FieldInfo fi, IndexSchema schema) {
|
||||
SimpleOrderedMap<Object> fieldFlags = new SimpleOrderedMap<>();
|
||||
StringBuilder flags = new StringBuilder();
|
||||
IndexOptions opts = fi.getIndexOptions();
|
||||
flags.append( (opts != IndexOptions.NONE) ? FieldFlag.INDEXED.getAbbreviation() : '-' );
|
||||
DocValuesType dvt = fi.getDocValuesType();
|
||||
if (dvt != DocValuesType.NONE) {
|
||||
flags.append(FieldFlag.DOC_VALUES.getAbbreviation());
|
||||
switch (dvt) {
|
||||
case NUMERIC:
|
||||
flags.append("num");
|
||||
break;
|
||||
case BINARY:
|
||||
flags.append("bin");
|
||||
break;
|
||||
case SORTED:
|
||||
flags.append("srt");
|
||||
break;
|
||||
case SORTED_NUMERIC:
|
||||
flags.append("srn");
|
||||
break;
|
||||
case SORTED_SET:
|
||||
flags.append("srs");
|
||||
break;
|
||||
default:
|
||||
flags.append("???"); // should not happen
|
||||
}
|
||||
} else {
|
||||
flags.append("----");
|
||||
}
|
||||
flags.append( (fi.hasVectors()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
|
||||
flags.append( (fi.omitsNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
|
||||
|
||||
flags.append( (DOCS == opts ) ?
|
||||
FieldFlag.OMIT_TF.getAbbreviation() : '-' );
|
||||
|
||||
flags.append((DOCS_AND_FREQS == opts) ?
|
||||
FieldFlag.OMIT_POSITIONS.getAbbreviation() : '-');
|
||||
|
||||
flags.append((DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS == opts) ?
|
||||
FieldFlag.STORE_OFFSETS_WITH_POSITIONS.getAbbreviation() : '-');
|
||||
|
||||
flags.append( (fi.hasPayloads() ? "p" : "-"));
|
||||
flags.append( (fi.isSoftDeletesField() ? "s" : "-"));
|
||||
if (fi.getPointDataDimensionCount() > 0 || fi.getPointIndexDimensionCount() > 0) {
|
||||
flags.append(":");
|
||||
flags.append(fi.getPointDataDimensionCount() + ":");
|
||||
flags.append(fi.getPointIndexDimensionCount() + ":");
|
||||
flags.append(fi.getPointNumBytes());
|
||||
}
|
||||
|
||||
fieldFlags.add("flags", flags.toString());
|
||||
try {
|
||||
Terms terms = reader.terms(fi.name);
|
||||
if (terms != null) {
|
||||
fieldFlags.add("docCount", terms.getDocCount());
|
||||
fieldFlags.add("sumDocFreq", terms.getSumDocFreq());
|
||||
fieldFlags.add("sumTotalTermFreq", terms.getSumTotalTermFreq());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.debug("Exception retrieving term stats for field " + fi.name, e);
|
||||
}
|
||||
|
||||
// probably too much detail?
|
||||
// Map<String, String> attributes = fi.attributes();
|
||||
// if (!attributes.isEmpty()) {
|
||||
// fieldFlags.add("attributes", attributes);
|
||||
// }
|
||||
|
||||
// check compliance of the index with the current schema
|
||||
SchemaField sf = schema.getFieldOrNull(fi.name);
|
||||
boolean hasPoints = fi.getPointDataDimensionCount() > 0 || fi.getPointIndexDimensionCount() > 0;
|
||||
|
||||
if (sf != null) {
|
||||
fieldFlags.add("schemaType", sf.getType().getTypeName());
|
||||
SimpleOrderedMap<Object> nonCompliant = new SimpleOrderedMap<>();
|
||||
if (sf.hasDocValues() &&
|
||||
fi.getDocValuesType() == DocValuesType.NONE &&
|
||||
fi.getIndexOptions() != IndexOptions.NONE) {
|
||||
nonCompliant.add("docValues", "schema=" + sf.getType().getUninversionType(sf) + ", segment=false");
|
||||
}
|
||||
if (!sf.hasDocValues() &&
|
||||
fi.getDocValuesType() != DocValuesType.NONE &&
|
||||
fi.getIndexOptions() != IndexOptions.NONE) {
|
||||
nonCompliant.add("docValues", "schema=false, segment=" + fi.getDocValuesType().toString());
|
||||
}
|
||||
if (!sf.isPolyField()) { // difficult to find all sub-fields in a general way
|
||||
if (sf.indexed() != ((fi.getIndexOptions() != IndexOptions.NONE) || hasPoints)) {
|
||||
nonCompliant.add("indexed", "schema=" + sf.indexed() + ", segment=" + fi.getIndexOptions());
|
||||
}
|
||||
}
|
||||
if (sf.omitNorms() != (fi.omitsNorms() || hasPoints)) {
|
||||
nonCompliant.add("omitNorms", "schema=" + sf.omitNorms() + ", segment=" + fi.omitsNorms());
|
||||
}
|
||||
if (sf.storeTermVector() != fi.hasVectors()) {
|
||||
nonCompliant.add("termVectors", "schema=" + sf.storeTermVector() + ", segment=" + fi.hasVectors());
|
||||
}
|
||||
if (sf.storeOffsetsWithPositions() != (fi.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS)) {
|
||||
nonCompliant.add("storeOffsetsWithPositions", "schema=" + sf.storeOffsetsWithPositions() + ", segment=" + fi.getIndexOptions());
|
||||
}
|
||||
|
||||
if (nonCompliant.size() > 0) {
|
||||
nonCompliant.add("schemaField", sf.toString());
|
||||
fieldFlags.add("nonCompliant", nonCompliant);
|
||||
}
|
||||
} else {
|
||||
fieldFlags.add("schemaType", "(UNKNOWN)");
|
||||
}
|
||||
return fieldFlags;
|
||||
}
|
||||
|
||||
// returns a map of currently running merges, and populates a list of candidate segments for merge
|
||||
private SimpleOrderedMap<Object> getMergeInformation(SolrQueryRequest req, SegmentInfos infos, List<String> mergeCandidates) throws IOException {
|
||||
SimpleOrderedMap<Object> result = new SimpleOrderedMap<>();
|
||||
RefCounted<IndexWriter> refCounted = req.getCore().getSolrCoreState().getIndexWriter(req.getCore());
|
||||
try {
|
||||
IndexWriter indexWriter = refCounted.get();
|
||||
if (indexWriter instanceof SolrIndexWriter) {
|
||||
result.addAll(((SolrIndexWriter)indexWriter).getRunningMerges());
|
||||
}
|
||||
//get chosen merge policy
|
||||
MergePolicy mp = indexWriter.getConfig().getMergePolicy();
|
||||
//Find merges
|
||||
|
@ -108,7 +432,7 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
|
|||
for (OneMerge merge : findMerges.merges) {
|
||||
//TODO: add merge grouping
|
||||
for (SegmentCommitInfo mergeSegmentInfo : merge.segments) {
|
||||
result.add(mergeSegmentInfo.info.name);
|
||||
mergeCandidates.add(mergeSegmentInfo.info.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,10 @@ package org.apache.solr.update;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
|
@ -88,6 +90,8 @@ public class SolrIndexWriter extends IndexWriter {
|
|||
|
||||
private final SolrMetricManager metricManager;
|
||||
private final String registryName;
|
||||
// merge diagnostics.
|
||||
private final Map<String, Long> runningMerges = new ConcurrentHashMap<>();
|
||||
|
||||
public static SolrIndexWriter create(SolrCore core, String name, String path, DirectoryFactory directoryFactory, boolean create, IndexSchema schema, SolrIndexConfig config, IndexDeletionPolicy delPolicy, Codec codec) throws IOException {
|
||||
|
||||
|
@ -192,12 +196,18 @@ public class SolrIndexWriter extends IndexWriter {
|
|||
// we override this method to collect metrics for merges.
|
||||
@Override
|
||||
public void merge(MergePolicy.OneMerge merge) throws IOException {
|
||||
String segString = merge.segString();
|
||||
long totalNumDocs = merge.totalNumDocs();
|
||||
runningMerges.put(segString, totalNumDocs);
|
||||
if (!mergeTotals) {
|
||||
super.merge(merge);
|
||||
try {
|
||||
super.merge(merge);
|
||||
} finally {
|
||||
runningMerges.remove(segString);
|
||||
}
|
||||
return;
|
||||
}
|
||||
long deletedDocs = 0;
|
||||
long totalNumDocs = merge.totalNumDocs();
|
||||
for (SegmentCommitInfo info : merge.segments) {
|
||||
totalNumDocs -= info.getDelCount();
|
||||
deletedDocs += info.getDelCount();
|
||||
|
@ -226,6 +236,7 @@ public class SolrIndexWriter extends IndexWriter {
|
|||
mergeErrors.inc();
|
||||
throw t;
|
||||
} finally {
|
||||
runningMerges.remove(segString);
|
||||
context.stop();
|
||||
if (major) {
|
||||
runningMajorMerges.decrementAndGet();
|
||||
|
@ -239,6 +250,10 @@ public class SolrIndexWriter extends IndexWriter {
|
|||
}
|
||||
}
|
||||
|
||||
public Map<String, Object> getRunningMerges() {
|
||||
return Collections.unmodifiableMap(runningMerges);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doAfterFlush() throws IOException {
|
||||
if (flushMeter != null) { // this is null when writer is used only for snapshot cleanup
|
||||
|
|
|
@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
|
@ -601,6 +602,35 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
|
|||
fail("Timed out waiting for cluster property value");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testColStatus() throws Exception {
|
||||
final String collectionName = "collectionStatusTest";
|
||||
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
|
||||
.process(cluster.getSolrClient());
|
||||
|
||||
cluster.waitForActiveCollection(collectionName, 2, 4);
|
||||
|
||||
SolrClient client = cluster.getSolrClient();
|
||||
// index some docs
|
||||
for (int i = 0; i < 10; i++) {
|
||||
client.add(collectionName, new SolrInputDocument("id", String.valueOf(i)));
|
||||
}
|
||||
client.commit(collectionName);
|
||||
|
||||
CollectionAdminRequest.ColStatus req = CollectionAdminRequest.collectionStatus(collectionName);
|
||||
req.setWithFieldInfo(true);
|
||||
req.setWithCoreInfo(true);
|
||||
req.setWithSegments(true);
|
||||
req.setWithSizeInfo(true);
|
||||
CollectionAdminResponse rsp = req.process(cluster.getSolrClient());
|
||||
assertEquals(0, rsp.getStatus());
|
||||
NamedList<Object> segInfos = (NamedList<Object>) rsp.getResponse().findRecursive(collectionName, "shards", "shard1", "leader", "segInfos");
|
||||
assertNotNull(Utils.toJSONString(rsp), segInfos.findRecursive("info", "core", "startTime"));
|
||||
assertNotNull(Utils.toJSONString(rsp), segInfos.get("fieldInfoLegend"));
|
||||
assertNotNull(Utils.toJSONString(rsp), segInfos.findRecursive("segments", "_0", "fields", "id", "flags"));
|
||||
assertNotNull(Utils.toJSONString(rsp), segInfos.findRecursive("segments", "_0", "ramBytesUsed"));
|
||||
}
|
||||
|
||||
private static final int NUM_DOCS = 10;
|
||||
|
||||
@Test
|
||||
|
|
|
@ -120,4 +120,31 @@ public class SegmentsInfoRequestHandlerTest extends SolrTestCaseJ4 {
|
|||
//#Deletes
|
||||
DEL_COUNT+"=sum(//lst[@name='segments']/lst/int[@name='delCount'])");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCoreInfo() {
|
||||
assertQ("Missing core info",
|
||||
req("qt", "/admin/segments", "coreInfo", "true"),
|
||||
"boolean(//lst[@name='info']/lst[@name='core'])");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFieldInfo() throws Exception {
|
||||
String[] segmentNamePatterns = new String[NUM_SEGMENTS];
|
||||
h.getCore().withSearcher((searcher) -> {
|
||||
int i = 0;
|
||||
for (SegmentCommitInfo sInfo : SegmentInfos.readLatestCommit(searcher.getIndexReader().directory())) {
|
||||
assertTrue("Unexpected number of segment in the index: " + i, i < NUM_SEGMENTS);
|
||||
segmentNamePatterns[i] = "boolean(//lst[@name='segments']/lst[@name='" + sInfo.info.name + "']/lst[@name='fields']/lst[@name='id']/str[@name='flags'])";
|
||||
i++;
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
assertQ("Unexpected field infos returned",
|
||||
req("qt","/admin/segments", "fieldInfo", "true"),
|
||||
segmentNamePatterns);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -1334,6 +1334,192 @@ http://localhost:8983/solr/admin/collections?action=COLLECTIONPROP&name=coll&pro
|
|||
</response>
|
||||
----
|
||||
|
||||
[[colstatus]]
|
||||
== COLSTATUS: Detailed low-level status of collection's indexes
|
||||
The COLSTATUS command provides a detailed description of the collection status, including low-level index
|
||||
information about segments and field data.
|
||||
|
||||
This command also checks the compliance of Lucene index field types with the current Solr collection
|
||||
schema and indicates the names of non-compliant fields, ie. Lucene fields with field types incompatible
|
||||
(or different) from the corresponding Solr field types declared in the current schema. Such incompatibilities may
|
||||
result from incompatible schema changes or after migration of
|
||||
data to a different major Solr release.
|
||||
|
||||
`/admin/collections?action=COLSTATUS&collection=coll&coreInfo=true&segments=true&fieldInfo=true&sizeInfo=true`
|
||||
|
||||
=== COLSTATUS Parameters
|
||||
|
||||
`collection`::
|
||||
Collection name (optional). If missing then it means all collections.
|
||||
|
||||
`coreInfo`::
|
||||
Optional boolean. If true then additional information will be provided about
|
||||
SolrCore of shard leaders.
|
||||
|
||||
`segments`::
|
||||
Optional boolean. If true then segment information will be provided.
|
||||
|
||||
`fieldInfo`::
|
||||
Optional boolean. If true then detailed Lucene field information will be provided
|
||||
and their corresponding Solr schema types.
|
||||
|
||||
`sizeInfo`::
|
||||
Optional boolean. If true then additional information about the index files
|
||||
size and their RAM usage will be provided.
|
||||
|
||||
=== COLSTATUS Response
|
||||
The response will include an overview of the collection status, the number of
|
||||
active / inactive shards and replicas, and additional index information
|
||||
of shard leaders.
|
||||
|
||||
=== Examples using COLSTATUS
|
||||
|
||||
*Input*
|
||||
|
||||
[source,text]
|
||||
----
|
||||
http://localhost:8983/solr/admin/collections?action=COLSTATUS&collection=gettingstarted&fieldInfo=true&sizeInfo=true
|
||||
----
|
||||
|
||||
*Output*
|
||||
|
||||
[source,json]
|
||||
----
|
||||
{
|
||||
"responseHeader": {
|
||||
"status": 0,
|
||||
"QTime": 50
|
||||
},
|
||||
"gettingstarted": {
|
||||
"stateFormat": 2,
|
||||
"znodeVersion": 16,
|
||||
"properties": {
|
||||
"autoAddReplicas": "false",
|
||||
"maxShardsPerNode": "-1",
|
||||
"nrtReplicas": "2",
|
||||
"pullReplicas": "0",
|
||||
"replicationFactor": "2",
|
||||
"router": {
|
||||
"name": "compositeId"
|
||||
},
|
||||
"tlogReplicas": "0"
|
||||
},
|
||||
"activeShards": 2,
|
||||
"inactiveShards": 0,
|
||||
"schemaNonCompliant": [
|
||||
"(NONE)"
|
||||
],
|
||||
"shards": {
|
||||
"shard1": {
|
||||
"state": "active",
|
||||
"range": "80000000-ffffffff",
|
||||
"replicas": {
|
||||
"total": 2,
|
||||
"active": 2,
|
||||
"down": 0,
|
||||
"recovering": 0,
|
||||
"recovery_failed": 0
|
||||
},
|
||||
"leader": {
|
||||
"coreNode": "core_node4",
|
||||
"core": "gettingstarted_shard1_replica_n1",
|
||||
"base_url": "http://192.168.0.80:8983/solr",
|
||||
"node_name": "192.168.0.80:8983_solr",
|
||||
"state": "active",
|
||||
"type": "NRT",
|
||||
"force_set_state": "false",
|
||||
"leader": "true",
|
||||
"segInfos": {
|
||||
"info": {
|
||||
"minSegmentLuceneVersion": "9.0.0",
|
||||
"commitLuceneVersion": "9.0.0",
|
||||
"numSegments": 40,
|
||||
"segmentsFileName": "segments_w",
|
||||
"totalMaxDoc": 686953,
|
||||
"userData": {
|
||||
"commitCommandVer": "1627350608019193856",
|
||||
"commitTimeMSec": "1551962478819"
|
||||
}
|
||||
},
|
||||
"fieldInfoLegend": [
|
||||
"I - Indexed",
|
||||
"D - DocValues",
|
||||
"xxx - DocValues type",
|
||||
"V - TermVector Stored",
|
||||
"O - Omit Norms",
|
||||
"F - Omit Term Frequencies & Positions",
|
||||
"P - Omit Positions",
|
||||
"H - Store Offsets with Positions",
|
||||
"p - field has payloads",
|
||||
"s - field uses soft deletes",
|
||||
":x:x:x - point data dim : index dim : num bytes"
|
||||
],
|
||||
"segments": {
|
||||
"_i": {
|
||||
"name": "_i",
|
||||
"delCount": 738,
|
||||
"softDelCount": 0,
|
||||
"hasFieldUpdates": false,
|
||||
"sizeInBytes": 109398213,
|
||||
"size": 70958,
|
||||
"age": "2019-03-07T12:34:24.761Z",
|
||||
"source": "merge",
|
||||
"version": "9.0.0",
|
||||
"createdVersionMajor": 9,
|
||||
"minVersion": "9.0.0",
|
||||
"diagnostics": {
|
||||
"os": "Mac OS X",
|
||||
"java.vendor": "Oracle Corporation",
|
||||
"java.version": "1.8.0_191",
|
||||
"java.vm.version": "25.191-b12",
|
||||
"lucene.version": "9.0.0",
|
||||
"mergeMaxNumSegments": "-1",
|
||||
"os.arch": "x86_64",
|
||||
"java.runtime.version": "1.8.0_191-b12",
|
||||
"source": "merge",
|
||||
"mergeFactor": "10",
|
||||
"os.version": "10.14.3",
|
||||
"timestamp": "1551962064761"
|
||||
},
|
||||
"attributes": {
|
||||
"Lucene50StoredFieldsFormat.mode": "BEST_SPEED"
|
||||
},
|
||||
"largestFiles": {
|
||||
"_i.fdt": "42.5 MB",
|
||||
"_i_Lucene80_0.dvd": "35.3 MB",
|
||||
"_i_Lucene50_0.pos": "11.1 MB",
|
||||
"_i_Lucene50_0.doc": "10 MB",
|
||||
"_i_Lucene50_0.tim": "4.3 MB"
|
||||
},
|
||||
"ramBytesUsed": {
|
||||
"total": 49153,
|
||||
"postings [PerFieldPostings(segment=_i formats=1)]": {
|
||||
"total": 31023,
|
||||
...
|
||||
"fields": {
|
||||
"dc": {
|
||||
"flags": "I-----------",
|
||||
"schemaType": "text_general"
|
||||
},
|
||||
"dc_str": {
|
||||
"flags": "-Dsrs-------",
|
||||
"schemaType": "strings"
|
||||
},
|
||||
"dc.title": {
|
||||
"flags": "I-----------",
|
||||
"docCount": 70958,
|
||||
"sumDocFreq": 646756,
|
||||
"sumTotalTermFreq": 671817,
|
||||
"schemaType": "text_general"
|
||||
},
|
||||
"dc.date": {
|
||||
"flags": "-Dsrn-------:1:1:8",
|
||||
"schemaType": "pdates"
|
||||
},
|
||||
...
|
||||
----
|
||||
|
||||
|
||||
[[migrate]]
|
||||
== MIGRATE: Migrate Documents to Another Collection
|
||||
|
||||
|
|
|
@ -783,6 +783,54 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a SolrRequest for low-level detailed status of the collection.
|
||||
*/
|
||||
public static ColStatus collectionStatus(String collection) {
|
||||
return new ColStatus(collection);
|
||||
}
|
||||
|
||||
public static class ColStatus extends AsyncCollectionSpecificAdminRequest {
|
||||
protected Boolean withSegments = null;
|
||||
protected Boolean withFieldInfo = null;
|
||||
protected Boolean withCoreInfo = null;
|
||||
protected Boolean withSizeInfo = null;
|
||||
|
||||
private ColStatus(String collection) {
|
||||
super(CollectionAction.COLSTATUS, collection);
|
||||
}
|
||||
|
||||
public ColStatus setWithSegments(boolean withSegments) {
|
||||
this.withSegments = withSegments;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ColStatus setWithFieldInfo(boolean withFieldInfo) {
|
||||
this.withFieldInfo = withFieldInfo;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ColStatus setWithCoreInfo(boolean withCoreInfo) {
|
||||
this.withCoreInfo = withCoreInfo;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ColStatus setWithSizeInfo(boolean withSizeInfo) {
|
||||
this.withSizeInfo = withSizeInfo;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrParams getParams() {
|
||||
ModifiableSolrParams params = (ModifiableSolrParams)super.getParams();
|
||||
params.setNonNull("segments", withSegments.toString());
|
||||
params.setNonNull("fieldInfo", withFieldInfo.toString());
|
||||
params.setNonNull("coreInfo", withCoreInfo.toString());
|
||||
params.setNonNull("sizeInfo", withSizeInfo.toString());
|
||||
return params;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a SolrRequest to delete a collection
|
||||
*/
|
||||
|
|
|
@ -67,4 +67,8 @@ public enum FieldFlag {
|
|||
public String getDisplay() {
|
||||
return display;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return abbreviation + " - " + display;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,7 +121,8 @@ public interface CollectionParams {
|
|||
MOCK_REPLICA_TASK(false, LockLevel.REPLICA),
|
||||
NONE(false, LockLevel.NONE),
|
||||
// TODO: not implemented yet
|
||||
MERGESHARDS(true, LockLevel.SHARD)
|
||||
MERGESHARDS(true, LockLevel.SHARD),
|
||||
COLSTATUS(true, LockLevel.NONE)
|
||||
;
|
||||
public final boolean isWrite;
|
||||
|
||||
|
|
Loading…
Reference in New Issue