mirror of https://github.com/apache/lucene.git
SOLR-14485: Fix or suppress 11 resource leak warnings in apache/solr/cloud
This commit is contained in:
parent
98ef96ccbb
commit
34e5e6c127
|
@ -222,9 +222,11 @@ Other Changes
|
|||
|
||||
* SOLR-7880: Update commons-cli to 1.4 (Erick Erickson)
|
||||
|
||||
* SOLR-14226Fix or suppress 14 resource leak warnings in apache/solr/core (Andras Salaman via
|
||||
* SOLR-14226: Fix or suppress 14 resource leak warnings in apache/solr/core (Andras Salaman via
|
||||
Erick Erickson)
|
||||
|
||||
* SOLR-14485: Fix or suppress 11 resource leak warnings in apache/solr/cloud (Andras Salaman via Erick Erickson)
|
||||
|
||||
================== 8.5.1 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -641,9 +641,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
// System.out.println("Attempting to PeerSync from " + leaderUrl
|
||||
// + " i am:" + zkController.getNodeName());
|
||||
PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
|
||||
leader.getCoreUrl(), ulog.getNumRecordsToKeep());
|
||||
boolean syncSuccess = peerSyncWithLeader.sync(recentVersions).isSuccess();
|
||||
boolean syncSuccess;
|
||||
try (PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
|
||||
leader.getCoreUrl(), ulog.getNumRecordsToKeep())) {
|
||||
syncSuccess = peerSyncWithLeader.sync(recentVersions).isSuccess();
|
||||
}
|
||||
if (syncSuccess) {
|
||||
SolrQueryRequest req = new LocalSolrQueryRequest(core,
|
||||
new ModifiableSolrParams());
|
||||
|
|
|
@ -154,7 +154,7 @@ public class SyncStrategy {
|
|||
}
|
||||
|
||||
private PeerSync.PeerSyncResult syncWithReplicas(ZkController zkController, SolrCore core,
|
||||
ZkNodeProps props, String collection, String shardId, boolean peerSyncOnlyWithActive) {
|
||||
ZkNodeProps props, String collection, String shardId, boolean peerSyncOnlyWithActive) throws Exception {
|
||||
List<ZkCoreNodeProps> nodes = zkController.getZkStateReader()
|
||||
.getReplicaProps(collection, shardId,core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
|
||||
|
||||
|
@ -179,8 +179,9 @@ public class SyncStrategy {
|
|||
// Fingerprinting here is off because the we currently rely on having at least one of the nodes return "true", and if replicas are out-of-sync
|
||||
// we still need to pick one as leader. A followup sync from the replica to the new leader (with fingerprinting on) should then fail and
|
||||
// initiate recovery-by-replication.
|
||||
PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().getNumRecordsToKeep(), true, peerSyncOnlyWithActive, false);
|
||||
return peerSync.sync();
|
||||
try (PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().getNumRecordsToKeep(), true, peerSyncOnlyWithActive, false)) {
|
||||
return peerSync.sync();
|
||||
}
|
||||
}
|
||||
|
||||
private void syncToMe(ZkController zkController, String collection,
|
||||
|
|
|
@ -790,12 +790,14 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
if (metricsHistoryHandler != null) {
|
||||
metricsHistoryHandler.handleRequest(queryRequest, queryResponse);
|
||||
} else {
|
||||
queryRequest.close();
|
||||
throw new UnsupportedOperationException("must add at least 1 node first");
|
||||
}
|
||||
} else {
|
||||
if (metricsHandler != null) {
|
||||
metricsHandler.handleRequest(queryRequest, queryResponse);
|
||||
} else {
|
||||
queryRequest.close();
|
||||
throw new UnsupportedOperationException("must add at least 1 node first");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -405,8 +405,9 @@ public class SimScenario implements AutoCloseable {
|
|||
throw new IOException(SimAction.SAVE_SNAPSHOT + " must specify 'path'");
|
||||
}
|
||||
boolean redact = Boolean.parseBoolean(params.get("redact", "false"));
|
||||
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(scenario.cluster, null);
|
||||
snapshotCloudManager.saveSnapshot(new File(path), true, redact);
|
||||
try (SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(scenario.cluster, null)) {
|
||||
snapshotCloudManager.saveSnapshot(new File(path), true, redact);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -738,10 +739,10 @@ public class SimScenario implements AutoCloseable {
|
|||
}
|
||||
}
|
||||
final AutoScalingConfig.TriggerListenerConfig listenerConfig = new AutoScalingConfig.TriggerListenerConfig(name, cfgMap);
|
||||
TriggerListener listener = new SimWaitListener(scenario.cluster.getTimeSource(), listenerConfig);
|
||||
if (scenario.context.containsKey("_sim_waitListener_" + trigger)) {
|
||||
throw new IOException("currently only one listener can be set per trigger. Trigger name: " + trigger);
|
||||
}
|
||||
TriggerListener listener = new SimWaitListener(scenario.cluster.getTimeSource(), listenerConfig);
|
||||
scenario.context.put("_sim_waitListener_" + trigger, listener);
|
||||
scenario.cluster.getOverseerTriggerThread().getScheduledTriggers().addAdditionalListener(listener);
|
||||
}
|
||||
|
@ -977,6 +978,7 @@ public class SimScenario implements AutoCloseable {
|
|||
RedactionUtils.RedactionContext ctx = SimUtils.getRedactionContext(snapshotCloudManager.getClusterStateProvider().getClusterState());
|
||||
data = RedactionUtils.redactNames(ctx.getRedactions(), data);
|
||||
}
|
||||
snapshotCloudManager.close();
|
||||
scenario.console.println(data);
|
||||
}
|
||||
}
|
||||
|
@ -988,6 +990,7 @@ public class SimScenario implements AutoCloseable {
|
|||
* @throws Exception on syntax errors
|
||||
*/
|
||||
public static SimScenario load(String data) throws Exception {
|
||||
@SuppressWarnings("resource")
|
||||
SimScenario scenario = new SimScenario();
|
||||
String[] lines = data.split("\\r?\\n");
|
||||
for (int i = 0; i < lines.length; i++) {
|
||||
|
|
|
@ -260,14 +260,15 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
|
|||
ZkStateReader reader = new ZkStateReader(zkClient);
|
||||
LeaderElector overseerElector = new LeaderElector(zkClient);
|
||||
UpdateShardHandler updateShardHandler = new UpdateShardHandler(UpdateShardHandlerConfig.DEFAULT);
|
||||
// TODO: close Overseer
|
||||
Overseer overseer = new Overseer((HttpShardHandler) new HttpShardHandlerFactory().getShardHandler(), updateShardHandler, "/admin/cores",
|
||||
reader, null, new CloudConfig.CloudConfigBuilder("127.0.0.1", 8983, "solr").build());
|
||||
overseer.close();
|
||||
ElectionContext ec = new OverseerElectionContext(zkClient, overseer,
|
||||
address.replaceAll("/", "_"));
|
||||
overseerElector.setup(ec);
|
||||
overseerElector.joinElection(ec, false);
|
||||
try (HttpShardHandlerFactory hshf = new HttpShardHandlerFactory()) {
|
||||
Overseer overseer = new Overseer((HttpShardHandler) hshf.getShardHandler(), updateShardHandler, "/admin/cores",
|
||||
reader, null, new CloudConfig.CloudConfigBuilder("127.0.0.1", 8983, "solr").build());
|
||||
overseer.close();
|
||||
ElectionContext ec = new OverseerElectionContext(zkClient, overseer,
|
||||
address.replaceAll("/", "_"));
|
||||
overseerElector.setup(ec);
|
||||
overseerElector.joinElection(ec, false);
|
||||
}
|
||||
reader.close();
|
||||
return zkClient;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,9 @@ public class ZkNodePropsTest extends SolrTestCaseJ4 {
|
|||
|
||||
props.forEach((s, o) -> assertEquals(o, props2.get(s)));
|
||||
SimplePostTool.BAOS baos = new SimplePostTool.BAOS();
|
||||
new JavaBinCodec().marshal(zkProps.getProperties(), baos);
|
||||
try (JavaBinCodec jbc = new JavaBinCodec()) {
|
||||
jbc.marshal(zkProps.getProperties(), baos);
|
||||
}
|
||||
bytes = baos.toByteArray();
|
||||
System.out.println("BIN size : " + bytes.length);
|
||||
ZkNodeProps props3 = ZkNodeProps.load(bytes);
|
||||
|
|
|
@ -121,10 +121,11 @@ public class TestSnapshotCloudManager extends SolrCloudTestCase {
|
|||
public void testRedaction() throws Exception {
|
||||
Path tmpPath = createTempDir();
|
||||
File tmpDir = tmpPath.toFile();
|
||||
SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null);
|
||||
Set<String> redacted = new HashSet<>(realManager.getClusterStateProvider().getLiveNodes());
|
||||
redacted.addAll(realManager.getClusterStateProvider().getClusterState().getCollectionStates().keySet());
|
||||
snapshotCloudManager.saveSnapshot(tmpDir, true, true);
|
||||
try (SnapshotCloudManager snapshotCloudManager = new SnapshotCloudManager(realManager, null)) {
|
||||
redacted.addAll(realManager.getClusterStateProvider().getClusterState().getCollectionStates().keySet());
|
||||
snapshotCloudManager.saveSnapshot(tmpDir, true, true);
|
||||
}
|
||||
for (String key : SnapshotCloudManager.REQUIRED_KEYS) {
|
||||
File src = new File(tmpDir, key + ".json");
|
||||
assertTrue(src.toString() + " doesn't exist", src.exists());
|
||||
|
|
Loading…
Reference in New Issue