mirror of https://github.com/apache/nifi.git
NIFI-8021: Fixed bug in ConsumeKafka_2_6 and ConsumeKafkaRecord_2_6 where explicit partition assignment causes issues with more than 1 concurrent task. Also fixed bug that prevented more nifi nodes than partitions because it didn't properly handle empty string for the list of partitions
Signed-off-by: Pierre Villard <pierre.villard.fr@gmail.com> This closes #4744.
This commit is contained in:
parent
ea80dad16a
commit
783633cac5
|
@ -344,8 +344,10 @@ public class ConsumeKafkaRecord_2_6 extends AbstractProcessor {
|
||||||
|
|
||||||
final ConsumerPool consumerPool = createConsumerPool(context, getLogger());
|
final ConsumerPool consumerPool = createConsumerPool(context, getLogger());
|
||||||
|
|
||||||
final int numAssignedPartitions = ConsumerPartitionsUtil.getPartitionAssignmentCount(context.getAllProperties());
|
final boolean explicitAssignment = ConsumerPartitionsUtil.isPartitionAssignmentExplicit(context.getAllProperties());
|
||||||
if (numAssignedPartitions > 0) {
|
if (explicitAssignment) {
|
||||||
|
final int numAssignedPartitions = ConsumerPartitionsUtil.getPartitionAssignmentCount(context.getAllProperties());
|
||||||
|
|
||||||
// Request from Kafka the number of partitions for the topics that we are consuming from. Then ensure that we have
|
// Request from Kafka the number of partitions for the topics that we are consuming from. Then ensure that we have
|
||||||
// all of the partitions assigned.
|
// all of the partitions assigned.
|
||||||
final int partitionCount = consumerPool.getPartitionCount();
|
final int partitionCount = consumerPool.getPartitionCount();
|
||||||
|
|
|
@ -318,8 +318,10 @@ public class ConsumeKafka_2_6 extends AbstractProcessor {
|
||||||
|
|
||||||
final ConsumerPool consumerPool = createConsumerPool(context, getLogger());
|
final ConsumerPool consumerPool = createConsumerPool(context, getLogger());
|
||||||
|
|
||||||
final int numAssignedPartitions = ConsumerPartitionsUtil.getPartitionAssignmentCount(context.getAllProperties());
|
final boolean explicitAssignment = ConsumerPartitionsUtil.isPartitionAssignmentExplicit(context.getAllProperties());
|
||||||
if (numAssignedPartitions > 0) {
|
if (explicitAssignment) {
|
||||||
|
final int numAssignedPartitions = ConsumerPartitionsUtil.getPartitionAssignmentCount(context.getAllProperties());
|
||||||
|
|
||||||
// Request from Kafka the number of partitions for the topics that we are consuming from. Then ensure that we have
|
// Request from Kafka the number of partitions for the topics that we are consuming from. Then ensure that we have
|
||||||
// all of the partitions assigned.
|
// all of the partitions assigned.
|
||||||
final int partitionCount = consumerPool.getPartitionCount();
|
final int partitionCount = consumerPool.getPartitionCount();
|
||||||
|
|
|
@ -34,8 +34,8 @@ import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ArrayBlockingQueue;
|
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ public class ConsumerPool implements Closeable {
|
||||||
final Charset headerCharacterSet,
|
final Charset headerCharacterSet,
|
||||||
final Pattern headerNamePattern,
|
final Pattern headerNamePattern,
|
||||||
final int[] partitionsToConsume) {
|
final int[] partitionsToConsume) {
|
||||||
this.pooledLeases = new ArrayBlockingQueue<>(maxConcurrentLeases);
|
this.pooledLeases = new LinkedBlockingQueue<>();
|
||||||
this.maxWaitMillis = maxWaitMillis;
|
this.maxWaitMillis = maxWaitMillis;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
this.demarcatorBytes = demarcator;
|
this.demarcatorBytes = demarcator;
|
||||||
|
@ -119,6 +119,7 @@ public class ConsumerPool implements Closeable {
|
||||||
this.headerNamePattern = headerNamePattern;
|
this.headerNamePattern = headerNamePattern;
|
||||||
this.separateByKey = separateByKey;
|
this.separateByKey = separateByKey;
|
||||||
this.partitionsToConsume = partitionsToConsume;
|
this.partitionsToConsume = partitionsToConsume;
|
||||||
|
enqueueLeases(partitionsToConsume);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ConsumerPool(
|
public ConsumerPool(
|
||||||
|
@ -136,7 +137,7 @@ public class ConsumerPool implements Closeable {
|
||||||
final Charset headerCharacterSet,
|
final Charset headerCharacterSet,
|
||||||
final Pattern headerNamePattern,
|
final Pattern headerNamePattern,
|
||||||
final int[] partitionsToConsume) {
|
final int[] partitionsToConsume) {
|
||||||
this.pooledLeases = new ArrayBlockingQueue<>(maxConcurrentLeases);
|
this.pooledLeases = new LinkedBlockingQueue<>();
|
||||||
this.maxWaitMillis = maxWaitMillis;
|
this.maxWaitMillis = maxWaitMillis;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
this.demarcatorBytes = demarcator;
|
this.demarcatorBytes = demarcator;
|
||||||
|
@ -153,6 +154,7 @@ public class ConsumerPool implements Closeable {
|
||||||
this.headerNamePattern = headerNamePattern;
|
this.headerNamePattern = headerNamePattern;
|
||||||
this.separateByKey = separateByKey;
|
this.separateByKey = separateByKey;
|
||||||
this.partitionsToConsume = partitionsToConsume;
|
this.partitionsToConsume = partitionsToConsume;
|
||||||
|
enqueueLeases(partitionsToConsume);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ConsumerPool(
|
public ConsumerPool(
|
||||||
|
@ -171,7 +173,7 @@ public class ConsumerPool implements Closeable {
|
||||||
final boolean separateByKey,
|
final boolean separateByKey,
|
||||||
final String keyEncoding,
|
final String keyEncoding,
|
||||||
final int[] partitionsToConsume) {
|
final int[] partitionsToConsume) {
|
||||||
this.pooledLeases = new ArrayBlockingQueue<>(maxConcurrentLeases);
|
this.pooledLeases = new LinkedBlockingQueue<>();
|
||||||
this.maxWaitMillis = maxWaitMillis;
|
this.maxWaitMillis = maxWaitMillis;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
this.demarcatorBytes = null;
|
this.demarcatorBytes = null;
|
||||||
|
@ -188,6 +190,7 @@ public class ConsumerPool implements Closeable {
|
||||||
this.separateByKey = separateByKey;
|
this.separateByKey = separateByKey;
|
||||||
this.keyEncoding = keyEncoding;
|
this.keyEncoding = keyEncoding;
|
||||||
this.partitionsToConsume = partitionsToConsume;
|
this.partitionsToConsume = partitionsToConsume;
|
||||||
|
enqueueLeases(partitionsToConsume);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ConsumerPool(
|
public ConsumerPool(
|
||||||
|
@ -206,7 +209,7 @@ public class ConsumerPool implements Closeable {
|
||||||
final boolean separateByKey,
|
final boolean separateByKey,
|
||||||
final String keyEncoding,
|
final String keyEncoding,
|
||||||
final int[] partitionsToConsume) {
|
final int[] partitionsToConsume) {
|
||||||
this.pooledLeases = new ArrayBlockingQueue<>(maxConcurrentLeases);
|
this.pooledLeases = new LinkedBlockingQueue<>();
|
||||||
this.maxWaitMillis = maxWaitMillis;
|
this.maxWaitMillis = maxWaitMillis;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
this.demarcatorBytes = null;
|
this.demarcatorBytes = null;
|
||||||
|
@ -223,6 +226,7 @@ public class ConsumerPool implements Closeable {
|
||||||
this.separateByKey = separateByKey;
|
this.separateByKey = separateByKey;
|
||||||
this.keyEncoding = keyEncoding;
|
this.keyEncoding = keyEncoding;
|
||||||
this.partitionsToConsume = partitionsToConsume;
|
this.partitionsToConsume = partitionsToConsume;
|
||||||
|
enqueueLeases(partitionsToConsume);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getPartitionCount() {
|
public int getPartitionCount() {
|
||||||
|
@ -282,16 +286,8 @@ public class ConsumerPool implements Closeable {
|
||||||
consumer.subscribe(topicPattern, lease);
|
consumer.subscribe(topicPattern, lease);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
final List<TopicPartition> topicPartitions = new ArrayList<>();
|
logger.debug("Cannot obtain lease to communicate with Kafka. Since partitions are explicitly assigned, cannot create a new lease.");
|
||||||
|
return null;
|
||||||
for (final String topic : topics) {
|
|
||||||
for (final int partition : partitionsToConsume) {
|
|
||||||
final TopicPartition topicPartition = new TopicPartition(topic, partition);
|
|
||||||
topicPartitions.add(topicPartition);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
consumer.assign(topicPartitions);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lease.setProcessSession(session, processContext);
|
lease.setProcessSession(session, processContext);
|
||||||
|
@ -300,6 +296,32 @@ public class ConsumerPool implements Closeable {
|
||||||
return lease;
|
return lease;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private SimpleConsumerLease createConsumerLease(final int partition) {
|
||||||
|
final List<TopicPartition> topicPartitions = new ArrayList<>();
|
||||||
|
for (final String topic : topics) {
|
||||||
|
final TopicPartition topicPartition = new TopicPartition(topic, partition);
|
||||||
|
topicPartitions.add(topicPartition);
|
||||||
|
}
|
||||||
|
|
||||||
|
final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
|
||||||
|
consumerCreatedCountRef.incrementAndGet();
|
||||||
|
consumer.assign(topicPartitions);
|
||||||
|
|
||||||
|
final SimpleConsumerLease lease = new SimpleConsumerLease(consumer);
|
||||||
|
return lease;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void enqueueLeases(final int[] partitionsToConsume) {
|
||||||
|
if (partitionsToConsume == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (final int partition : partitionsToConsume) {
|
||||||
|
final SimpleConsumerLease lease = createConsumerLease(partition);
|
||||||
|
pooledLeases.add(lease);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exposed as protected method for easier unit testing
|
* Exposed as protected method for easier unit testing
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in New Issue