mirror of https://github.com/apache/nifi.git
NIFI-9847: Switched LifecycleState to use a WeakHashMap to track Acti… (#5917)
* NIFI-9847: Switched LifecycleState to use a WeakHashMap to track ActiveProcessSessionFactory instances, instead of a regular Set that removed the instance after calling onTrigger. This was necessary for processors such as MergeRecord that may stash away an ActiveProcessSessionFactory for later use, as we need to be able to force rollback on processor termination * NIFI-9847: Fixed checkstyle violation
This commit is contained in:
parent
0f8183dd95
commit
940fd8e81c
|
@ -1818,7 +1818,7 @@ public class StandardProcessorNode extends ProcessorNode implements Connectable
|
||||||
deactivateThread();
|
deactivateThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduleState.decrementActiveThreadCount(null);
|
scheduleState.decrementActiveThreadCount();
|
||||||
hasActiveThreads = false;
|
hasActiveThreads = false;
|
||||||
scheduledState.set(ScheduledState.STOPPED);
|
scheduledState.set(ScheduledState.STOPPED);
|
||||||
future.complete(null);
|
future.complete(null);
|
||||||
|
|
|
@ -1329,11 +1329,11 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
if (repoRecord.getOriginalQueue() != null && repoRecord.getOriginalQueue().getIdentifier() != null) {
|
if (repoRecord.getOriginalQueue() != null && repoRecord.getOriginalQueue().getIdentifier() != null) {
|
||||||
details.append("queue=")
|
details.append("queue=")
|
||||||
.append(repoRecord.getOriginalQueue().getIdentifier())
|
.append(repoRecord.getOriginalQueue().getIdentifier())
|
||||||
.append("/");
|
.append(", ");
|
||||||
}
|
}
|
||||||
details.append("filename=")
|
details.append("filename=")
|
||||||
.append(repoRecord.getCurrent().getAttribute(CoreAttributes.FILENAME.key()))
|
.append(repoRecord.getCurrent().getAttribute(CoreAttributes.FILENAME.key()))
|
||||||
.append("/uuid=")
|
.append(", uuid=")
|
||||||
.append(repoRecord.getCurrent().getAttribute(CoreAttributes.UUID.key()));
|
.append(repoRecord.getCurrent().getAttribute(CoreAttributes.UUID.key()));
|
||||||
}
|
}
|
||||||
if (records.size() > MAX_ROLLBACK_FLOWFILES_TO_LOG) {
|
if (records.size() > MAX_ROLLBACK_FLOWFILES_TO_LOG) {
|
||||||
|
@ -1341,7 +1341,7 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
details.append(", ");
|
details.append(", ");
|
||||||
}
|
}
|
||||||
details.append(records.size() - MAX_ROLLBACK_FLOWFILES_TO_LOG)
|
details.append(records.size() - MAX_ROLLBACK_FLOWFILES_TO_LOG)
|
||||||
.append(" additional Flowfiles not listed");
|
.append(" additional FlowFiles not listed");
|
||||||
} else if (filesListed == 0) {
|
} else if (filesListed == 0) {
|
||||||
details.append("none");
|
details.append("none");
|
||||||
}
|
}
|
||||||
|
@ -1440,8 +1440,6 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void migrate(final ProcessSession newOwner, final Collection<FlowFile> flowFiles) {
|
public void migrate(final ProcessSession newOwner, final Collection<FlowFile> flowFiles) {
|
||||||
verifyTaskActive();
|
|
||||||
|
|
||||||
if (Objects.requireNonNull(newOwner) == this) {
|
if (Objects.requireNonNull(newOwner) == this) {
|
||||||
throw new IllegalArgumentException("Cannot migrate FlowFiles from a Process Session to itself");
|
throw new IllegalArgumentException("Cannot migrate FlowFiles from a Process Session to itself");
|
||||||
}
|
}
|
||||||
|
@ -1457,7 +1455,18 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
migrate((StandardProcessSession) newOwner, flowFiles);
|
migrate((StandardProcessSession) newOwner, flowFiles);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void migrate(final StandardProcessSession newOwner, Collection<FlowFile> flowFiles) {
|
private synchronized void migrate(final StandardProcessSession newOwner, Collection<FlowFile> flowFiles) {
|
||||||
|
// This method will update many member variables/internal state of both `this` and `newOwner`. These member variables may also be updated during
|
||||||
|
// session rollback, such as when a Processor is terminated. As such, we need to ensure that we synchronize on both `this` and `newOwner` so that
|
||||||
|
// neither can be rolled back while we are in the process of migrating FlowFiles from one session to another.
|
||||||
|
//
|
||||||
|
// We must also ensure that we verify that both sessions are in an amenable state to perform this transference after obtaining the synchronization lock.
|
||||||
|
// We synchronize on 'this' by marking the method synchronized. Because the only way in which one Process Session will call into another is via this migrate() method,
|
||||||
|
// we do not need to worry about the order in which the synchronized lock is obtained.
|
||||||
|
synchronized (newOwner) {
|
||||||
|
verifyTaskActive();
|
||||||
|
newOwner.verifyTaskActive();
|
||||||
|
|
||||||
// We don't call validateRecordState() here because we want to allow migration of FlowFiles that have already been marked as removed or transferred, etc.
|
// We don't call validateRecordState() here because we want to allow migration of FlowFiles that have already been marked as removed or transferred, etc.
|
||||||
flowFiles = flowFiles.stream().map(this::getMostRecent).collect(Collectors.toList());
|
flowFiles = flowFiles.stream().map(this::getMostRecent).collect(Collectors.toList());
|
||||||
|
|
||||||
|
@ -1507,8 +1516,8 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
final ProvenanceEventBuilder eventBuilder = entry.getValue();
|
final ProvenanceEventBuilder eventBuilder = entry.getValue();
|
||||||
for (final String childId : eventBuilder.getChildFlowFileIds()) {
|
for (final String childId : eventBuilder.getChildFlowFileIds()) {
|
||||||
if (flowFileIds.contains(childId)) {
|
if (flowFileIds.contains(childId)) {
|
||||||
throw new FlowFileHandlingException("Cannot migrate " + eventFlowFile + " to a new session because it was forked from a Parent FlowFile, but the parent is not being migrated. "
|
throw new FlowFileHandlingException("Cannot migrate " + eventFlowFile + " to a new session because it was forked from a Parent FlowFile, " +
|
||||||
+ "If any FlowFile is forked, the parent and all children must be migrated at the same time.");
|
"but the parent is not being migrated. If any FlowFile is forked, the parent and all children must be migrated at the same time.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1640,6 +1649,7 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
|
|
||||||
provenanceReporter.migrate(newOwner.provenanceReporter, flowFileIds);
|
provenanceReporter.migrate(newOwner.provenanceReporter, flowFileIds);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private String summarizeEvents(final Checkpoint checkpoint) {
|
private String summarizeEvents(final Checkpoint checkpoint) {
|
||||||
|
@ -1793,11 +1803,7 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
flowFilesIn++;
|
flowFilesIn++;
|
||||||
contentSizeIn += flowFile.getSize();
|
contentSizeIn += flowFile.getSize();
|
||||||
|
|
||||||
Set<FlowFileRecord> set = unacknowledgedFlowFiles.get(connection.getFlowFileQueue());
|
final Set<FlowFileRecord> set = unacknowledgedFlowFiles.computeIfAbsent(connection.getFlowFileQueue(), k -> new HashSet<>());
|
||||||
if (set == null) {
|
|
||||||
set = new HashSet<>();
|
|
||||||
unacknowledgedFlowFiles.put(connection.getFlowFileQueue(), set);
|
|
||||||
}
|
|
||||||
set.add(flowFile);
|
set.add(flowFile);
|
||||||
|
|
||||||
incrementConnectionOutputCounts(connection, flowFile);
|
incrementConnectionOutputCounts(connection, flowFile);
|
||||||
|
|
|
@ -23,7 +23,9 @@ import org.apache.nifi.processor.exception.TerminatedTaskException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.WeakHashMap;
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
@ -36,7 +38,7 @@ public class LifecycleState {
|
||||||
private final AtomicBoolean mustCallOnStoppedMethods = new AtomicBoolean(false);
|
private final AtomicBoolean mustCallOnStoppedMethods = new AtomicBoolean(false);
|
||||||
private volatile long lastStopTime = -1;
|
private volatile long lastStopTime = -1;
|
||||||
private volatile boolean terminated = false;
|
private volatile boolean terminated = false;
|
||||||
private final Set<ActiveProcessSessionFactory> activeProcessSessionFactories = Collections.synchronizedSet(new HashSet<>());
|
private final Map<ActiveProcessSessionFactory, Object> activeProcessSessionFactories = new WeakHashMap<>();
|
||||||
|
|
||||||
public synchronized int incrementActiveThreadCount(final ActiveProcessSessionFactory sessionFactory) {
|
public synchronized int incrementActiveThreadCount(final ActiveProcessSessionFactory sessionFactory) {
|
||||||
if (terminated) {
|
if (terminated) {
|
||||||
|
@ -44,21 +46,29 @@ public class LifecycleState {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sessionFactory != null) {
|
if (sessionFactory != null) {
|
||||||
activeProcessSessionFactories.add(sessionFactory);
|
// If a session factory is provided, add it to our WeakHashMap. The value that we use is not relevant,
|
||||||
|
// as this just serves, essentially, as a WeakHashSet, but there is no WeakHashSet implementation.
|
||||||
|
// We need to keep track of any ActiveProcessSessionFactory that has been created for this component,
|
||||||
|
// as long as the session factory has not been garbage collected. This is important because when we offload
|
||||||
|
// a node, we will terminate all active processors and we need the ability to terminate any active sessions
|
||||||
|
// at that time. We cannot simply store a Set of all ActiveProcessSessionFactories and then remove them in the
|
||||||
|
// decrementActiveThreadCount because a Processor may choose to continue using the ProcessSessionFactory even after
|
||||||
|
// returning from its onTrigger method.
|
||||||
|
//
|
||||||
|
// For example, it may stash the ProcessSessionFactory away in a member variable in order to aggregate FlowFiles across
|
||||||
|
// many onTrigger invocations. In this case, we need the ability to force the rollback of any created session upon Processor
|
||||||
|
// termination.
|
||||||
|
activeProcessSessionFactories.put(sessionFactory, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
return activeThreadCount.incrementAndGet();
|
return activeThreadCount.incrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized int decrementActiveThreadCount(final ActiveProcessSessionFactory sessionFactory) {
|
public synchronized int decrementActiveThreadCount() {
|
||||||
if (terminated) {
|
if (terminated) {
|
||||||
return activeThreadCount.get();
|
return activeThreadCount.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sessionFactory != null) {
|
|
||||||
activeProcessSessionFactories.remove(sessionFactory);
|
|
||||||
}
|
|
||||||
|
|
||||||
return activeThreadCount.decrementAndGet();
|
return activeThreadCount.decrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,8 +95,7 @@ public class LifecycleState {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return new StringBuilder().append("activeThreads:").append(activeThreadCount.get()).append("; ")
|
return "LifecycleState[activeThreads= " + activeThreadCount.get() + ", scheduled=" + scheduled.get() + "]";
|
||||||
.append("scheduled:").append(scheduled.get()).append("; ").toString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -123,7 +132,8 @@ public class LifecycleState {
|
||||||
this.terminated = true;
|
this.terminated = true;
|
||||||
activeThreadCount.set(0);
|
activeThreadCount.set(0);
|
||||||
|
|
||||||
for (final ActiveProcessSessionFactory factory : activeProcessSessionFactories) {
|
// Terminate any active sessions.
|
||||||
|
for (final ActiveProcessSessionFactory factory : activeProcessSessionFactories.keySet()) {
|
||||||
factory.terminateActiveSessions();
|
factory.terminateActiveSessions();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,4 +43,9 @@ public class FlowFileQueueContents {
|
||||||
public QueueSize getSwapSize() {
|
public QueueSize getSwapSize() {
|
||||||
return swapSize;
|
return swapSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "FlowFileQueueContents[swapLocations=" + swapLocations + ", swapSize=" + swapSize + ", activeFlowFiles=" + activeFlowFiles + "]";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -501,7 +501,12 @@ public class SwappablePriorityQueue {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void acknowledge(final Collection<FlowFileRecord> flowFiles) {
|
public void acknowledge(final Collection<FlowFileRecord> flowFiles) {
|
||||||
logger.trace("{} Acknowledging {}", this, flowFiles);
|
if (logger.isTraceEnabled()) {
|
||||||
|
for (final FlowFileRecord flowFile : flowFiles) {
|
||||||
|
logger.trace("{} Acknowledging {}", this, flowFile);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
final long totalSize = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum();
|
final long totalSize = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum();
|
||||||
incrementUnacknowledgedQueueSize(-flowFiles.size(), -totalSize);
|
incrementUnacknowledgedQueueSize(-flowFiles.size(), -totalSize);
|
||||||
}
|
}
|
||||||
|
@ -627,8 +632,10 @@ public class SwappablePriorityQueue {
|
||||||
writeLock.unlock("poll(int, Set)");
|
writeLock.unlock("poll(int, Set)");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!records.isEmpty()) {
|
if (!records.isEmpty() && logger.isTraceEnabled()) {
|
||||||
logger.trace("{} poll() returning {}", this, records);
|
for (final FlowFileRecord flowFile : records) {
|
||||||
|
logger.trace("{} poll() returning {}", this, flowFile);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return records;
|
return records;
|
||||||
|
@ -690,8 +697,10 @@ public class SwappablePriorityQueue {
|
||||||
this.activeQueue.addAll(unselected);
|
this.activeQueue.addAll(unselected);
|
||||||
incrementActiveQueueSize(-flowFilesPulled, -bytesPulled);
|
incrementActiveQueueSize(-flowFilesPulled, -bytesPulled);
|
||||||
|
|
||||||
if (!selectedFlowFiles.isEmpty()) {
|
if (!selectedFlowFiles.isEmpty() && logger.isTraceEnabled()) {
|
||||||
logger.trace("{} poll() returning {}", this, selectedFlowFiles);
|
for (final FlowFileRecord flowFile : selectedFlowFiles) {
|
||||||
|
logger.trace("{} poll() returning {}", this, flowFile);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return selectedFlowFiles;
|
return selectedFlowFiles;
|
||||||
|
|
|
@ -244,7 +244,10 @@ public class SocketLoadBalancedFlowFileQueue extends AbstractFlowFileQueue imple
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug("Setting queue {} on node {} as offloaded", this, clusterCoordinator.getLocalNodeIdentifier());
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("Setting queue {} on node {} as offloaded. Current size: {}, Partition Sizes: {}", this, clusterCoordinator.getLocalNodeIdentifier(), size(), getPartitionSizes());
|
||||||
|
}
|
||||||
|
|
||||||
offloaded = true;
|
offloaded = true;
|
||||||
|
|
||||||
partitionWriteLock.lock();
|
partitionWriteLock.lock();
|
||||||
|
@ -271,11 +274,30 @@ public class SocketLoadBalancedFlowFileQueue extends AbstractFlowFileQueue imple
|
||||||
|
|
||||||
// Update our partitioner so that we don't keep any data on the local partition
|
// Update our partitioner so that we don't keep any data on the local partition
|
||||||
setFlowFilePartitioner(new NonLocalPartitionPartitioner());
|
setFlowFilePartitioner(new NonLocalPartitionPartitioner());
|
||||||
|
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("Queue {} has now updated Partition on node {} for offload. Current size: {}, Partition Sizes: {}",
|
||||||
|
this, clusterCoordinator.getLocalNodeIdentifier(), size(), getPartitionSizes());
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
partitionWriteLock.unlock();
|
partitionWriteLock.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Map<QueuePartition, QueueSize> getPartitionSizes() {
|
||||||
|
partitionReadLock.lock();
|
||||||
|
try {
|
||||||
|
final Map<QueuePartition, QueueSize> sizeMap = new HashMap<>();
|
||||||
|
for (final QueuePartition partition : queuePartitions) {
|
||||||
|
sizeMap.put(partition, partition.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
return sizeMap;
|
||||||
|
} finally {
|
||||||
|
partitionReadLock.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void resetOffloadedQueue() {
|
public void resetOffloadedQueue() {
|
||||||
if (clusterCoordinator == null) {
|
if (clusterCoordinator == null) {
|
||||||
|
@ -899,7 +921,7 @@ public class SocketLoadBalancedFlowFileQueue extends AbstractFlowFileQueue imple
|
||||||
final List<FlowFileRecord> flowFileList = (flowFiles instanceof List) ? (List<FlowFileRecord>) flowFiles : new ArrayList<>(flowFiles);
|
final List<FlowFileRecord> flowFileList = (flowFiles instanceof List) ? (List<FlowFileRecord>) flowFiles : new ArrayList<>(flowFiles);
|
||||||
partitionMap = Collections.singletonMap(partition, flowFileList);
|
partitionMap = Collections.singletonMap(partition, flowFileList);
|
||||||
|
|
||||||
logger.debug("Partitioner is static so Partitioned FlowFiles as: {}", partitionMap);
|
logger.debug("Partitioner {} is static so Partitioned FlowFiles as: {}", partitioner, partitionMap);
|
||||||
return partitionMap;
|
return partitionMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,8 @@ import org.apache.nifi.controller.repository.FlowFileSwapManager;
|
||||||
import org.apache.nifi.controller.repository.SwapSummary;
|
import org.apache.nifi.controller.repository.SwapSummary;
|
||||||
import org.apache.nifi.events.EventReporter;
|
import org.apache.nifi.events.EventReporter;
|
||||||
import org.apache.nifi.flowfile.FlowFilePrioritizer;
|
import org.apache.nifi.flowfile.FlowFilePrioritizer;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -39,7 +41,9 @@ import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
public class StandardRebalancingPartition implements RebalancingPartition {
|
public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
private final String SWAP_PARTITION_NAME = "rebalance";
|
private static final Logger logger = LoggerFactory.getLogger(StandardRebalancingPartition.class);
|
||||||
|
private static final String SWAP_PARTITION_NAME = "rebalance";
|
||||||
|
|
||||||
private final String queueIdentifier;
|
private final String queueIdentifier;
|
||||||
private final BlockingSwappablePriorityQueue queue;
|
private final BlockingSwappablePriorityQueue queue;
|
||||||
private final LoadBalancedFlowFileQueue flowFileQueue;
|
private final LoadBalancedFlowFileQueue flowFileQueue;
|
||||||
|
@ -127,11 +131,13 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
|
|
||||||
private synchronized void rebalanceFromQueue() {
|
private synchronized void rebalanceFromQueue() {
|
||||||
if (stopped) {
|
if (stopped) {
|
||||||
|
logger.debug("Will not rebalance from queue because {} is stopped", this);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a task is already defined, do nothing. There's already a thread running.
|
// If a task is already defined, do nothing. There's already a thread running.
|
||||||
if (rebalanceTask != null) {
|
if (rebalanceTask != null) {
|
||||||
|
logger.debug("Rebalance Task already exists for {}", this);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,6 +146,7 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
final Thread rebalanceThread = new Thread(this.rebalanceTask);
|
final Thread rebalanceThread = new Thread(this.rebalanceTask);
|
||||||
rebalanceThread.setName("Rebalance queued data for Connection " + queueIdentifier);
|
rebalanceThread.setName("Rebalance queued data for Connection " + queueIdentifier);
|
||||||
rebalanceThread.start();
|
rebalanceThread.start();
|
||||||
|
logger.debug("No Rebalance Task currently exists for {}. Starting new Rebalance Thread {}", this, rebalanceThread);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -148,12 +155,16 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.debug("Adding {} to Rebalance queue for {}", queueContents, this);
|
||||||
|
|
||||||
queue.inheritQueueContents(queueContents);
|
queue.inheritQueueContents(queueContents);
|
||||||
rebalanceFromQueue();
|
rebalanceFromQueue();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rebalance(final Collection<FlowFileRecord> flowFiles) {
|
public void rebalance(final Collection<FlowFileRecord> flowFiles) {
|
||||||
|
logger.debug("Adding {} to Rebalance queue for {}", flowFiles, this);
|
||||||
|
|
||||||
queue.putAll(flowFiles);
|
queue.putAll(flowFiles);
|
||||||
rebalanceFromQueue();
|
rebalanceFromQueue();
|
||||||
}
|
}
|
||||||
|
@ -163,7 +174,7 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
return queue.packageForRebalance(newPartitionName);
|
return queue.packageForRebalance(newPartitionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized boolean complete() {
|
private synchronized boolean isComplete() {
|
||||||
if (!queue.isEmpty()) {
|
if (!queue.isEmpty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -201,7 +212,8 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
if (polled == null) {
|
if (polled == null) {
|
||||||
flowFileQueue.handleExpiredRecords(expiredRecords);
|
flowFileQueue.handleExpiredRecords(expiredRecords);
|
||||||
|
|
||||||
if (complete()) {
|
if (isComplete()) {
|
||||||
|
logger.debug("Rebalance Task completed for {}", this);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
|
@ -217,6 +229,8 @@ public class StandardRebalancingPartition implements RebalancingPartition {
|
||||||
|
|
||||||
flowFileQueue.handleExpiredRecords(expiredRecords);
|
flowFileQueue.handleExpiredRecords(expiredRecords);
|
||||||
|
|
||||||
|
logger.debug("{} Rebalancing {}", this, toDistribute);
|
||||||
|
|
||||||
// Transfer all of the FlowFiles that we got back to the FlowFileQueue itself. This will cause the data to be
|
// Transfer all of the FlowFiles that we got back to the FlowFileQueue itself. This will cause the data to be
|
||||||
// re-partitioned and binned appropriately. We also then need to ensure that we acknowledge the data from our
|
// re-partitioned and binned appropriately. We also then need to ensure that we acknowledge the data from our
|
||||||
// own SwappablePriorityQueue to ensure that the sizes are kept in check.
|
// own SwappablePriorityQueue to ensure that the sizes are kept in check.
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class EventDrivenSchedulingAgent extends AbstractSchedulingAgent {
|
||||||
// reaching the maximum number of threads. we won't know this until we atomically increment the thread count
|
// reaching the maximum number of threads. we won't know this until we atomically increment the thread count
|
||||||
// on the Schedule State, so we check it here. in this case, we cannot trigger the Processor, as doing so would
|
// on the Schedule State, so we check it here. in this case, we cannot trigger the Processor, as doing so would
|
||||||
// result in using more than the maximum number of defined threads
|
// result in using more than the maximum number of defined threads
|
||||||
scheduleState.decrementActiveThreadCount(sessionFactory);
|
scheduleState.decrementActiveThreadCount();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ public class EventDrivenSchedulingAgent extends AbstractSchedulingAgent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduleState.decrementActiveThreadCount(sessionFactory);
|
scheduleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,7 +357,7 @@ public class EventDrivenSchedulingAgent extends AbstractSchedulingAgent {
|
||||||
// reaching the maximum number of threads. we won't know this until we atomically increment the thread count
|
// reaching the maximum number of threads. we won't know this until we atomically increment the thread count
|
||||||
// on the Schedule State, so we check it here. in this case, we cannot trigger the Processor, as doing so would
|
// on the Schedule State, so we check it here. in this case, we cannot trigger the Processor, as doing so would
|
||||||
// result in using more than the maximum number of defined threads
|
// result in using more than the maximum number of defined threads
|
||||||
scheduleState.decrementActiveThreadCount(sessionFactory);
|
scheduleState.decrementActiveThreadCount();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ public class EventDrivenSchedulingAgent extends AbstractSchedulingAgent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduleState.decrementActiveThreadCount(sessionFactory);
|
scheduleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -343,7 +343,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onTaskComplete() {
|
public void onTaskComplete() {
|
||||||
lifecycleState.decrementActiveThreadCount(null);
|
lifecycleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -383,7 +383,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onTaskComplete() {
|
public void onTaskComplete() {
|
||||||
lifecycleState.decrementActiveThreadCount(null);
|
lifecycleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -283,7 +283,7 @@ public class ConnectableTask {
|
||||||
logger.error("", e);
|
logger.error("", e);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
scheduleState.decrementActiveThreadCount(activeSessionFactory);
|
scheduleState.decrementActiveThreadCount();
|
||||||
Thread.currentThread().setName(originalThreadName);
|
Thread.currentThread().setName(originalThreadName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class ReportingTaskWrapper implements Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
lifecycleState.decrementActiveThreadCount(null);
|
lifecycleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class StatelessSchedulingAgent implements SchedulingAgent {
|
||||||
try {
|
try {
|
||||||
taskNode.getReportingTask().onTrigger(taskNode.getReportingContext());
|
taskNode.getReportingTask().onTrigger(taskNode.getReportingContext());
|
||||||
} finally {
|
} finally {
|
||||||
scheduleState.decrementActiveThreadCount(null);
|
scheduleState.decrementActiveThreadCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (final Throwable t) {
|
} catch (final Throwable t) {
|
||||||
|
|
Loading…
Reference in New Issue