diff --git a/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/java/org/apache/nifi/registry/bundle/extract/nar/docs/TestJacksonExtensionManifestParser.java b/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/java/org/apache/nifi/registry/bundle/extract/nar/docs/TestJacksonExtensionManifestParser.java index eff01081bc..52a2800865 100644 --- a/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/java/org/apache/nifi/registry/bundle/extract/nar/docs/TestJacksonExtensionManifestParser.java +++ b/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/java/org/apache/nifi/registry/bundle/extract/nar/docs/TestJacksonExtensionManifestParser.java @@ -16,10 +16,16 @@ */ package org.apache.nifi.registry.bundle.extract.nar.docs; +import org.apache.nifi.registry.extension.component.manifest.Cardinality; +import org.apache.nifi.registry.extension.component.manifest.Dependency; +import org.apache.nifi.registry.extension.component.manifest.DependentValues; import org.apache.nifi.registry.extension.component.manifest.Extension; import org.apache.nifi.registry.extension.component.manifest.ExtensionManifest; import org.apache.nifi.registry.extension.component.manifest.ExtensionType; +import org.apache.nifi.registry.extension.component.manifest.Property; import org.apache.nifi.registry.extension.component.manifest.ProvidedServiceAPI; +import org.apache.nifi.registry.extension.component.manifest.ResourceDefinition; +import org.apache.nifi.registry.extension.component.manifest.ResourceType; import org.apache.nifi.registry.extension.component.manifest.Restriction; import org.junit.Before; import org.junit.Test; @@ -164,6 +170,83 @@ public class TestJacksonExtensionManifestParser { } + @Test + public void testDocsWithDependentProperties() throws IOException { + final ExtensionManifest extensionManifest = parse("src/test/resources/descriptors/extension-manifest-kafka-2-6-nar.xml"); + assertNotNull(extensionManifest); + assertEquals("1.16.0-SNAPSHOT", extensionManifest.getSystemApiVersion()); + + final List extensionDetails = extensionManifest.getExtensions(); + + final Extension consumeKafkaExtension = extensionDetails.stream() + .filter(e -> e.getName().equals("org.apache.nifi.processors.kafka.pubsub.ConsumeKafka_2_6")) + .findFirst() + .orElse(null); + + assertNotNull(consumeKafkaExtension); + assertEquals(ExtensionType.PROCESSOR, consumeKafkaExtension.getType()); + + final List properties = consumeKafkaExtension.getProperties(); + assertNotNull(properties); + + final Property maxUncommittedOffsetWaitProperty = properties.stream() + .filter(p -> p.getName().equals("max-uncommit-offset-wait")) + .findFirst() + .orElse(null); + assertNotNull(maxUncommittedOffsetWaitProperty); + + final List dependencies = maxUncommittedOffsetWaitProperty.getDependencies(); + assertNotNull(dependencies); + assertEquals(1, dependencies.size()); + + final Dependency dependency = dependencies.get(0); + assertEquals("Commit Offsets", dependency.getPropertyName()); + assertEquals("Commit Offsets", dependency.getPropertyDisplayName()); + + final DependentValues dependentValues = dependency.getDependentValues(); + assertNotNull(dependentValues); + + final List dependentValuesList = dependentValues.getValues(); + assertNotNull(dependentValuesList); + assertEquals(1, dependentValuesList.size()); + assertEquals("true", dependentValuesList.get(0)); + } + + @Test + public void testDocsWithResourceDefinitions() throws IOException { + final ExtensionManifest extensionManifest = parse("src/test/resources/descriptors/extension-manifest-kafka-2-6-nar.xml"); + assertNotNull(extensionManifest); + assertEquals("1.16.0-SNAPSHOT", extensionManifest.getSystemApiVersion()); + + final List extensionDetails = extensionManifest.getExtensions(); + + final Extension consumeKafkaExtension = extensionDetails.stream() + .filter(e -> e.getName().equals("org.apache.nifi.processors.kafka.pubsub.ConsumeKafka_2_6")) + .findFirst() + .orElse(null); + + assertNotNull(consumeKafkaExtension); + assertEquals(ExtensionType.PROCESSOR, consumeKafkaExtension.getType()); + + final List properties = consumeKafkaExtension.getProperties(); + assertNotNull(properties); + + final Property keytabProperty = properties.stream() + .filter(p -> p.getName().equals("sasl.kerberos.keytab")) + .findFirst() + .orElse(null); + assertNotNull(keytabProperty); + + final ResourceDefinition resourceDefinition = keytabProperty.getResourceDefinition(); + assertNotNull(resourceDefinition); + assertEquals(Cardinality.SINGLE, resourceDefinition.getCardinality()); + + final List resourceTypes = resourceDefinition.getResourceTypes(); + assertNotNull(resourceTypes); + assertEquals(1, resourceTypes.size()); + assertEquals(ResourceType.FILE, resourceTypes.get(0)); + } + private ExtensionManifest parse(final String file) throws IOException { try (final InputStream inputStream = new FileInputStream(file)) { return parser.parse(inputStream); diff --git a/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/resources/descriptors/extension-manifest-kafka-2-6-nar.xml b/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/resources/descriptors/extension-manifest-kafka-2-6-nar.xml new file mode 100644 index 0000000000..382df76096 --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-bundle-utils/src/test/resources/descriptors/extension-manifest-kafka-2-6-nar.xml @@ -0,0 +1,3176 @@ + + 1.16.0-SNAPSHOT + + + org.apache.nifi.processors.kafka.pubsub.ConsumeKafka_2_6 + PROCESSOR + + Consumes messages from Apache Kafka specifically built against the Kafka 2.6 Consumer API. The + complementary NiFi processor for sending messages is PublishKafka_2_6. + + + Kafka + Get + Ingest + Ingress + Topic + PubSub + Consume + 2.6 + + + + bootstrap.servers + Kafka Brokers + A comma-separated list of known Kafka Brokers in the format <host>:<port> + + localhost:9092 + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic + Topic Name(s) + The name of the Kafka Topic(s) to pull from. More than one can be supplied if comma + separated. + + + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic_type + Topic Name Format + Specifies whether the Topic(s) provided are a comma separated list of names or a single + regular expression + + names + + + names + names + Topic is a full topic name or comma separated list of names + + + pattern + pattern + Topic is a regex using the Java Pattern syntax + + + true + false + false + NONE + false + false + + + + group.id + Group ID + A Group ID is used to identify consumers that are within the same consumer group. + Corresponds to Kafka's 'group.id' property. + + + + true + false + true + VARIABLE_REGISTRY + false + false + + + + Commit Offsets + Commit Offsets + Specifies whether or not this Processor should commit the offsets to Kafka after + receiving messages. Typically, we want this value set to true so that messages that are received + are not duplicated. However, in certain scenarios, we may want to avoid committing the offsets, + that the data can be processed and later acknowledged by PublishKafkaRecord in order to provide + Exactly Once semantics. See Processor's Usage / Additional Details for more information. + + true + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + max-uncommit-offset-wait + Max Uncommitted Time + Specifies the maximum amount of time allowed to pass before offsets must be committed. + This value impacts how often offsets will be committed. Committing offsets less often increases + throughput but also increases the window of potential data duplication in the event of a + rebalance or JVM restart between commits. This value is also related to maximum poll records and + the use of a message demarcator. When using a message demarcator we can have far more + uncommitted messages than when we're not as there is much less for us to keep track of in + memory. + + 1 secs + + false + false + false + NONE + false + false + + + + Commit Offsets + Commit Offsets + + true + + + + + + honor-transactions + Honor Transactions + Specifies whether or not NiFi should honor transactional guarantees when communicating + with Kafka. If false, the Processor will use an "isolation level" of read_uncomitted. This means + that messages will be received as soon as they are written to Kafka but will be pulled, even if + the producer cancels the transactions. If this value is true, NiFi will not receive any messages + for which the producer's transaction was canceled, but this can result in some latency since the + consumer must wait for the producer to finish its entire transaction instead of pulling as the + messages become available. + + true + + + true + true + + + + false + false + + + + true + false + false + NONE + false + false + + + + message-demarcator + Message Demarcator + Since KafkaConsumer receives messages in batches, you have an option to output + FlowFiles which contains all Kafka messages in a single batch for a given topic and partition + and this property allows you to provide a string (interpreted as UTF-8) to use for demarcating + apart multiple Kafka messages. This is an optional property and if not provided each Kafka + message received will result in a single FlowFile which time it is triggered. To enter special + character such as 'new line' use CTRL+Enter or Shift+Enter depending on the OS + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + separate-by-key + Separate By Key + If true, and the <Message Demarcator> property is set, two messages will only be + added to the same FlowFile if both of the Kafka Messages have identical keys. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + security.protocol + Security Protocol + Protocol used to communicate with brokers. Corresponds to Kafka's 'security.protocol' + property. + + PLAINTEXT + + + PLAINTEXT + PLAINTEXT + PLAINTEXT + + + SSL + SSL + SSL + + + SASL_PLAINTEXT + SASL_PLAINTEXT + SASL_PLAINTEXT + + + SASL_SSL + SASL_SSL + SASL_SSL + + + true + false + false + NONE + false + false + + + + sasl.mechanism + SASL Mechanism + The SASL mechanism to use for authentication. Corresponds to Kafka's 'sasl.mechanism' + property. + + GSSAPI + + + GSSAPI + GSSAPI + The mechanism for authentication via Kerberos. The principal and keytab must be + provided to the processor by using a Keytab Credential service, or by specifying the + properties directly in the processor. + + + + PLAIN + PLAIN + The mechanism for authentication via username and password. The username and + password properties must be populated when using this mechanism. + + + + SCRAM-SHA-256 + SCRAM-SHA-256 + The Salted Challenge Response Authentication Mechanism using SHA-256. The + username and password properties must be set when using this mechanism. + + + + SCRAM-SHA-512 + SCRAM-SHA-512 + The Salted Challenge Response Authentication Mechanism using SHA-512. The + username and password properties must be set when using this mechanism. + + + + true + false + false + NONE + false + false + + + + kerberos-credentials-service + Kerberos Credentials Service + Specifies the Kerberos Credentials Controller Service that should be used for + authenticating with Kerberos + + + + org.apache.nifi.kerberos.KerberosCredentialsService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + kerberos-user-service + Kerberos User Service + Specifies the Kerberos User Controller Service that should be used for authenticating + with Kerberos + + + + org.apache.nifi.kerberos.SelfContainedKerberosUserService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + sasl.kerberos.service.name + Kerberos Service Name + The service name that matches the primary name of the Kafka server configured in the + broker JAAS file.This can be defined either in Kafka's JAAS config or in Kafka's config. + Corresponds to Kafka's 'security.protocol' property.It is ignored unless one of the SASL options + of the <Security Protocol> are selected. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.principal + Kerberos Principal + The Kerberos principal that will be used to connect to brokers. If not set, it is + expected to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf + file. This principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.keytab + Kerberos Keytab + The Kerberos keytab that will be used to connect to brokers. If not set, it is expected + to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf file. This + principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + SINGLE + + FILE + + + + + sasl.username + Username + The username when the SASL Mechanism is PLAIN or SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.password + Password + The password for the given username when the SASL Mechanism is PLAIN or + SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + true + true + VARIABLE_REGISTRY + false + false + + + + sasl.token.auth + Token Auth + When SASL Mechanism is SCRAM-SHA-256 or SCRAM-SHA-512, this property indicates if token + authentication should be used. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + ssl.context.service + SSL Context Service + Specifies the SSL Context Service to use for communicating with Kafka. + + + org.apache.nifi.ssl.SSLContextService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + key-attribute-encoding + Key Attribute Encoding + FlowFiles that are emitted have an attribute named 'kafka.key'. This property dictates + how the value of the attribute should be encoded. + + utf-8 + + + UTF-8 Encoded + utf-8 + The key is interpreted as a UTF-8 Encoded string. + + + Hex Encoded + hex + The key is interpreted as arbitrary binary data and is encoded using + hexadecimal characters with uppercase letters + + + + true + false + false + NONE + false + false + + + + auto.offset.reset + Offset Reset + Allows you to manage the condition when there is no initial offset in Kafka or if the + current offset does not exist any more on the server (e.g. because that data has been deleted). + Corresponds to Kafka's 'auto.offset.reset' property. + + latest + + + earliest + earliest + Automatically reset the offset to the earliest offset + + + latest + latest + Automatically reset the offset to the latest offset + + + none + none + Throw exception to the consumer if no previous offset is found for the + consumer's group + + + + true + false + false + NONE + false + false + + + + message-header-encoding + Message Header Encoding + Any message header that is found on a Kafka message will be added to the outbound + FlowFile as an attribute. This property indicates the Character Encoding to use for + deserializing the headers. + + UTF-8 + + false + false + false + NONE + false + false + + + + header-name-regex + Headers to Add as Attributes (Regex) + A Regular Expression that is matched against all message headers. Any message header + whose name matches the regex will be added to the FlowFile as an Attribute. If not specified, no + Header values will be added as FlowFile attributes. If two messages have a different value for + the same header and that header is selected by the provided regex, then those two messages must + be added to different FlowFiles. As a result, users should be cautious about using a regex like + ".*" if messages are expected to have header values that are unique per message, such as an + identifier or timestamp, because it will prevent NiFi from bundling the messages together + efficiently. + + + + false + false + false + NONE + false + false + + + + max.poll.records + Max Poll Records + Specifies the maximum number of records Kafka should return in a single poll. + + 10000 + + false + false + false + NONE + false + false + + + + Communications Timeout + Communications Timeout + Specifies the timeout that the consumer should use when communicating with the Kafka + Broker + + 60 secs + + true + false + false + NONE + false + false + + + + + + The name of a Kafka configuration property. + The value of a given Kafka configuration property. + These properties will be added on the Kafka configuration after loading any provided + configuration properties. In the event a dynamic property represents a property that was already + set, its value will be ignored and WARN message logged. For the list of available Kafka + properties please refer to: http://kafka.apache.org/documentation.html#configuration. + + false + VARIABLE_REGISTRY + + + + + success + FlowFiles received from Kafka. Depending on demarcation strategy it is a flow file per + message or a bundle of messages grouped by topic and partition. + + false + + + + + + + kafka.count + The number of messages written if more than one + + + kafka.key + The key of message if present and if single message. How the key is encoded depends on + the value of the 'Key Attribute Encoding' property. + + + + kafka.offset + The offset of the message in the partition of the topic. + + + kafka.timestamp + The timestamp of the message in the partition of the topic. + + + kafka.partition + The partition of the topic the message or message bundle is from + + + kafka.topic + The topic the message or message bundle is from + + + + + INPUT_FORBIDDEN + + + + + org.apache.nifi.processors.kafka.pubsub.PublishKafka_2_6 + PROCESSOR + + Sends the contents of a FlowFile as a message to Apache Kafka using the Kafka 2.6 Producer + API.The messages to send may be individual FlowFiles or may be delimited, using a user-specified + delimiter, such as a new-line. The complementary NiFi processor for fetching messages is + ConsumeKafka_2_6. + + + Apache + Kafka + Put + Send + Message + PubSub + 2.6 + + + + bootstrap.servers + Kafka Brokers + A comma-separated list of known Kafka Brokers in the format <host>:<port> + + localhost:9092 + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic + Topic Name + The name of the Kafka Topic to publish to. + + + true + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + use-transactions + Use Transactions + Specifies whether or not NiFi should provide Transactional guarantees when + communicating with Kafka. If there is a problem sending data to Kafka, and this property is set + to false, then the messages that have already been sent to Kafka will continue on and be + delivered to consumers. If this is set to true, then the Kafka transaction will be rolled back + so that those messages are not available to consumers. Setting this to true requires that the + <Delivery Guarantee> property be set to "Guarantee Replicated Delivery." + + true + + + true + true + + + + false + false + + + + true + false + false + NONE + false + false + + + + transactional-id-prefix + Transactional Id Prefix + When Use Transaction is set to true, KafkaProducer config 'transactional.id' will be a + generated UUID and will be prefixed with this string. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + use-transactions + Use Transactions + + true + + + + + + message-demarcator + Message Demarcator + Specifies the string (interpreted as UTF-8) to use for demarcating multiple messages + within a single FlowFile. If not specified, the entire content of the FlowFile will be used as a + single message. If specified, the contents of the FlowFile will be split on this delimiter and + each section sent as a separate Kafka message. To enter special character such as 'new line' use + CTRL+Enter or Shift+Enter, depending on your OS. + + + + false + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + Failure Strategy + Failure Strategy + Dictates how the processor handles a FlowFile if it is unable to publish the data to + Kafka + + Route to Failure + + + Route to Failure + Route to Failure + When unable to publish a FlowFile to Kafka, the FlowFile will be routed to the + 'failure' relationship. + + + + Rollback + Rollback + When unable to publish a FlowFile to Kafka, the FlowFile will be placed back on + the top of its queue so that it will be the next FlowFile tried again. For dataflows + where ordering of FlowFiles is important, this strategy can be used along with ensuring + that the each processor in the dataflow uses only a single Concurrent Task. + + + + true + false + false + NONE + false + false + + + + acks + Delivery Guarantee + Specifies the requirement for guaranteeing that a message is sent to Kafka. Corresponds + to Kafka's 'acks' property. + + 0 + + + Best Effort + 0 + FlowFile will be routed to success after successfully writing the content to a + Kafka node, without waiting for a response. This provides the best performance but may + result in data loss. + + + + Guarantee Single Node Delivery + 1 + FlowFile will be routed to success if the message is received by a single Kafka + node, whether or not it is replicated. This is faster than <Guarantee Replicated + Delivery> but can result in data loss if a Kafka node crashes + + + + Guarantee Replicated Delivery + all + FlowFile will be routed to failure unless the message is replicated to the + appropriate number of Kafka Nodes according to the Topic configuration + + + + true + false + false + NONE + false + false + + + + attribute-name-regex + Attributes to Send as Headers (Regex) + A Regular Expression that is matched against all FlowFile attribute names. Any + attribute whose name matches the regex will be added to the Kafka messages as a Header. If not + specified, no FlowFile attributes will be added as headers. + + + + false + false + false + NONE + false + false + + + + message-header-encoding + Message Header Encoding + For any attribute that is added as a message header, as configured via the <Attributes + to Send as Headers> property, this property indicates the Character Encoding to use for + serializing the headers. + + UTF-8 + + false + false + false + NONE + false + false + + + + security.protocol + Security Protocol + Protocol used to communicate with brokers. Corresponds to Kafka's 'security.protocol' + property. + + PLAINTEXT + + + PLAINTEXT + PLAINTEXT + PLAINTEXT + + + SSL + SSL + SSL + + + SASL_PLAINTEXT + SASL_PLAINTEXT + SASL_PLAINTEXT + + + SASL_SSL + SASL_SSL + SASL_SSL + + + true + false + false + NONE + false + false + + + + sasl.mechanism + SASL Mechanism + The SASL mechanism to use for authentication. Corresponds to Kafka's 'sasl.mechanism' + property. + + GSSAPI + + + GSSAPI + GSSAPI + The mechanism for authentication via Kerberos. The principal and keytab must be + provided to the processor by using a Keytab Credential service, or by specifying the + properties directly in the processor. + + + + PLAIN + PLAIN + The mechanism for authentication via username and password. The username and + password properties must be populated when using this mechanism. + + + + SCRAM-SHA-256 + SCRAM-SHA-256 + The Salted Challenge Response Authentication Mechanism using SHA-256. The + username and password properties must be set when using this mechanism. + + + + SCRAM-SHA-512 + SCRAM-SHA-512 + The Salted Challenge Response Authentication Mechanism using SHA-512. The + username and password properties must be set when using this mechanism. + + + + true + false + false + NONE + false + false + + + + kerberos-credentials-service + Kerberos Credentials Service + Specifies the Kerberos Credentials Controller Service that should be used for + authenticating with Kerberos + + + + org.apache.nifi.kerberos.KerberosCredentialsService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + kerberos-user-service + Kerberos User Service + Specifies the Kerberos User Controller Service that should be used for authenticating + with Kerberos + + + + org.apache.nifi.kerberos.SelfContainedKerberosUserService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + sasl.kerberos.service.name + Kerberos Service Name + The service name that matches the primary name of the Kafka server configured in the + broker JAAS file.This can be defined either in Kafka's JAAS config or in Kafka's config. + Corresponds to Kafka's 'security.protocol' property.It is ignored unless one of the SASL options + of the <Security Protocol> are selected. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.principal + Kerberos Principal + The Kerberos principal that will be used to connect to brokers. If not set, it is + expected to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf + file. This principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.keytab + Kerberos Keytab + The Kerberos keytab that will be used to connect to brokers. If not set, it is expected + to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf file. This + principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + SINGLE + + FILE + + + + + sasl.username + Username + The username when the SASL Mechanism is PLAIN or SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.password + Password + The password for the given username when the SASL Mechanism is PLAIN or + SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + true + true + VARIABLE_REGISTRY + false + false + + + + sasl.token.auth + Token Auth + When SASL Mechanism is SCRAM-SHA-256 or SCRAM-SHA-512, this property indicates if token + authentication should be used. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + ssl.context.service + SSL Context Service + Specifies the SSL Context Service to use for communicating with Kafka. + + + org.apache.nifi.ssl.SSLContextService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + kafka-key + Kafka Key + The Key to use for the Message. If not specified, the flow file attribute 'kafka.key' + is used as the message key, if it is present.Beware that setting Kafka key and demarcating at + the same time may potentially lead to many Kafka messages with the same key.Normally this is not + a problem as Kafka does not enforce or assume message and key uniqueness. Still, setting the + demarcator and Kafka key at the same time poses a risk of data loss on Kafka. During a topic + compaction on Kafka, messages will be deduplicated based on this key. + + + + false + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + key-attribute-encoding + Key Attribute Encoding + FlowFiles that are emitted have an attribute named 'kafka.key'. This property dictates + how the value of the attribute should be encoded. + + utf-8 + + + UTF-8 Encoded + utf-8 + The key is interpreted as a UTF-8 Encoded string. + + + Hex Encoded + hex + The key is interpreted as arbitrary binary data that is encoded using + hexadecimal characters with uppercase letters. + + + + true + false + false + NONE + false + false + + + + max.request.size + Max Request Size + The maximum size of a request in bytes. Corresponds to Kafka's 'max.request.size' + property and defaults to 1 MB (1048576). + + 1 MB + + true + false + false + NONE + false + false + + + + ack.wait.time + Acknowledgment Wait Time + After sending a message to Kafka, this indicates the amount of time that we are willing + to wait for a response from Kafka. If Kafka does not acknowledge the message within this time + period, the FlowFile will be routed to 'failure'. + + 5 secs + + true + false + false + NONE + false + false + + + + max.block.ms + Max Metadata Wait Time + The amount of time publisher will wait to obtain metadata or wait for the buffer to + flush during the 'send' call before failing the entire 'send' call. Corresponds to Kafka's + 'max.block.ms' property + + 5 sec + + true + false + true + VARIABLE_REGISTRY + false + false + + + + partitioner.class + Partitioner class + Specifies which class to use to compute a partition id for a message. Corresponds to + Kafka's 'partitioner.class' property. + + org.apache.kafka.clients.producer.internals.DefaultPartitioner + + + RoundRobinPartitioner + org.apache.nifi.processors.kafka.pubsub.Partitioners$RoundRobinPartitioner + Messages will be assigned partitions in a round-robin fashion, sending the + first message to Partition 1, the next Partition to Partition 2, and so on, wrapping as + necessary. + + + + DefaultPartitioner + org.apache.kafka.clients.producer.internals.DefaultPartitioner + Messages will be assigned to random partitions. + + + Expression Language Partitioner + org.apache.nifi.processors.kafka.pubsub.Partitioners$ExpressionLanguagePartitioner + + Interprets the <Partition> property as Expression Language that will be + evaluated against each FlowFile. This Expression will be evaluated once against the + FlowFile, so all Records in a given FlowFile will go to the same partition. + + + + false + false + false + NONE + false + false + + + + partition + Partition + Specifies which Partition Records will go to. + + + false + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + compression.type + Compression Type + This parameter allows you to specify the compression codec for all data generated by + this producer. + + none + + + none + none + + + + gzip + gzip + + + + snappy + snappy + + + + lz4 + lz4 + + + + true + false + false + NONE + false + false + + + + + + The name of a Kafka configuration property. + The value of a given Kafka configuration property. + These properties will be added on the Kafka configuration after loading any provided + configuration properties. In the event a dynamic property represents a property that was already + set, its value will be ignored and WARN message logged. For the list of available Kafka + properties please refer to: http://kafka.apache.org/documentation.html#configuration. + + false + VARIABLE_REGISTRY + + + + + success + FlowFiles for which all content was sent to Kafka. + false + + + failure + Any FlowFile that cannot be sent to Kafka will be routed to this Relationship + + false + + + + + + + msg.count + The number of messages that were sent to Kafka for this FlowFile. This attribute is + added only to FlowFiles that are routed to success. If the <Message Demarcator> Property + is not set, this will always be 1, but if the Property is set, it may be greater than 1. + + + + + + INPUT_REQUIRED + + + + + org.apache.nifi.processors.kafka.pubsub.PublishKafkaRecord_2_6 + PROCESSOR + + Sends the contents of a FlowFile as individual records to Apache Kafka using the Kafka 2.6 + Producer API. The contents of the FlowFile are expected to be record-oriented data that can be read by + the configured Record Reader. The complementary NiFi processor for fetching messages is + ConsumeKafkaRecord_2_6. + + + Apache + Kafka + Record + csv + json + avro + logs + Put + Send + Message + PubSub + 2.6 + + + + bootstrap.servers + Kafka Brokers + A comma-separated list of known Kafka Brokers in the format <host>:<port> + + localhost:9092 + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic + Topic Name + The name of the Kafka Topic to publish to. + + + true + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + record-reader + Record Reader + The Record Reader to use for incoming FlowFiles + + + org.apache.nifi.serialization.RecordReaderFactory + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + true + false + false + NONE + false + false + + + + record-writer + Record Writer + The Record Writer to use in order to serialize the data before sending to Kafka + + + + org.apache.nifi.serialization.RecordSetWriterFactory + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + true + false + false + NONE + false + false + + + + use-transactions + Use Transactions + Specifies whether or not NiFi should provide Transactional guarantees when + communicating with Kafka. If there is a problem sending data to Kafka, and this property is set + to false, then the messages that have already been sent to Kafka will continue on and be + delivered to consumers. If this is set to true, then the Kafka transaction will be rolled back + so that those messages are not available to consumers. Setting this to true requires that the + <Delivery Guarantee> property be set to "Guarantee Replicated Delivery." + + true + + + true + true + + + + false + false + + + + true + false + false + NONE + false + false + + + + transactional-id-prefix + Transactional Id Prefix + When Use Transaction is set to true, KafkaProducer config 'transactional.id' will be a + generated UUID and will be prefixed with this string. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + use-transactions + Use Transactions + + true + + + + + + Failure Strategy + Failure Strategy + Dictates how the processor handles a FlowFile if it is unable to publish the data to + Kafka + + Route to Failure + + + Route to Failure + Route to Failure + When unable to publish a FlowFile to Kafka, the FlowFile will be routed to the + 'failure' relationship. + + + + Rollback + Rollback + When unable to publish a FlowFile to Kafka, the FlowFile will be placed back on + the top of its queue so that it will be the next FlowFile tried again. For dataflows + where ordering of FlowFiles is important, this strategy can be used along with ensuring + that the each processor in the dataflow uses only a single Concurrent Task. + + + + true + false + false + NONE + false + false + + + + acks + Delivery Guarantee + Specifies the requirement for guaranteeing that a message is sent to Kafka. Corresponds + to Kafka's 'acks' property. + + 0 + + + Best Effort + 0 + FlowFile will be routed to success after successfully writing the content to a + Kafka node, without waiting for a response. This provides the best performance but may + result in data loss. + + + + Guarantee Single Node Delivery + 1 + FlowFile will be routed to success if the message is received by a single Kafka + node, whether or not it is replicated. This is faster than <Guarantee Replicated + Delivery> but can result in data loss if a Kafka node crashes + + + + Guarantee Replicated Delivery + all + FlowFile will be routed to failure unless the message is replicated to the + appropriate number of Kafka Nodes according to the Topic configuration + + + + true + false + false + NONE + false + false + + + + attribute-name-regex + Attributes to Send as Headers (Regex) + A Regular Expression that is matched against all FlowFile attribute names. Any + attribute whose name matches the regex will be added to the Kafka messages as a Header. If not + specified, no FlowFile attributes will be added as headers. + + + + false + false + false + NONE + false + false + + + + message-header-encoding + Message Header Encoding + For any attribute that is added as a message header, as configured via the <Attributes + to Send as Headers> property, this property indicates the Character Encoding to use for + serializing the headers. + + UTF-8 + + false + false + false + NONE + false + false + + + + security.protocol + Security Protocol + Protocol used to communicate with brokers. Corresponds to Kafka's 'security.protocol' + property. + + PLAINTEXT + + + PLAINTEXT + PLAINTEXT + PLAINTEXT + + + SSL + SSL + SSL + + + SASL_PLAINTEXT + SASL_PLAINTEXT + SASL_PLAINTEXT + + + SASL_SSL + SASL_SSL + SASL_SSL + + + true + false + false + NONE + false + false + + + + sasl.mechanism + SASL Mechanism + The SASL mechanism to use for authentication. Corresponds to Kafka's 'sasl.mechanism' + property. + + GSSAPI + + + GSSAPI + GSSAPI + The mechanism for authentication via Kerberos. The principal and keytab must be + provided to the processor by using a Keytab Credential service, or by specifying the + properties directly in the processor. + + + + PLAIN + PLAIN + The mechanism for authentication via username and password. The username and + password properties must be populated when using this mechanism. + + + + SCRAM-SHA-256 + SCRAM-SHA-256 + The Salted Challenge Response Authentication Mechanism using SHA-256. The + username and password properties must be set when using this mechanism. + + + + SCRAM-SHA-512 + SCRAM-SHA-512 + The Salted Challenge Response Authentication Mechanism using SHA-512. The + username and password properties must be set when using this mechanism. + + + + true + false + false + NONE + false + false + + + + kerberos-credentials-service + Kerberos Credentials Service + Specifies the Kerberos Credentials Controller Service that should be used for + authenticating with Kerberos + + + + org.apache.nifi.kerberos.KerberosCredentialsService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + kerberos-user-service + Kerberos User Service + Specifies the Kerberos User Controller Service that should be used for authenticating + with Kerberos + + + + org.apache.nifi.kerberos.SelfContainedKerberosUserService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + sasl.kerberos.service.name + Kerberos Service Name + The service name that matches the primary name of the Kafka server configured in the + broker JAAS file.This can be defined either in Kafka's JAAS config or in Kafka's config. + Corresponds to Kafka's 'security.protocol' property.It is ignored unless one of the SASL options + of the <Security Protocol> are selected. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.principal + Kerberos Principal + The Kerberos principal that will be used to connect to brokers. If not set, it is + expected to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf + file. This principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.keytab + Kerberos Keytab + The Kerberos keytab that will be used to connect to brokers. If not set, it is expected + to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf file. This + principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + SINGLE + + FILE + + + + + sasl.username + Username + The username when the SASL Mechanism is PLAIN or SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.password + Password + The password for the given username when the SASL Mechanism is PLAIN or + SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + true + true + VARIABLE_REGISTRY + false + false + + + + sasl.token.auth + Token Auth + When SASL Mechanism is SCRAM-SHA-256 or SCRAM-SHA-512, this property indicates if token + authentication should be used. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + ssl.context.service + SSL Context Service + Specifies the SSL Context Service to use for communicating with Kafka. + + + org.apache.nifi.ssl.SSLContextService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + message-key-field + Message Key Field + The name of a field in the Input Records that should be used as the Key for the Kafka + message. + + + + false + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + max.request.size + Max Request Size + The maximum size of a request in bytes. Corresponds to Kafka's 'max.request.size' + property and defaults to 1 MB (1048576). + + 1 MB + + true + false + false + NONE + false + false + + + + ack.wait.time + Acknowledgment Wait Time + After sending a message to Kafka, this indicates the amount of time that we are willing + to wait for a response from Kafka. If Kafka does not acknowledge the message within this time + period, the FlowFile will be routed to 'failure'. + + 5 secs + + true + false + false + NONE + false + false + + + + max.block.ms + Max Metadata Wait Time + The amount of time publisher will wait to obtain metadata or wait for the buffer to + flush during the 'send' call before failing the entire 'send' call. Corresponds to Kafka's + 'max.block.ms' property + + 5 sec + + true + false + true + VARIABLE_REGISTRY + false + false + + + + partitioner.class + Partitioner class + Specifies which class to use to compute a partition id for a message. Corresponds to + Kafka's 'partitioner.class' property. + + org.apache.kafka.clients.producer.internals.DefaultPartitioner + + + RoundRobinPartitioner + org.apache.nifi.processors.kafka.pubsub.Partitioners$RoundRobinPartitioner + Messages will be assigned partitions in a round-robin fashion, sending the + first message to Partition 1, the next Partition to Partition 2, and so on, wrapping as + necessary. + + + + DefaultPartitioner + org.apache.kafka.clients.producer.internals.DefaultPartitioner + Messages will be assigned to random partitions. + + + RecordPath Partitioner + org.apache.nifi.processors.kafka.pubsub.Partitioners$RecordPathPartitioner + Interprets the <Partition> property as a RecordPath that will be + evaluated against each Record to determine which partition the Record will go to. All + Records that have the same value for the given RecordPath will go to the same Partition. + + + + Expression Language Partitioner + org.apache.nifi.processors.kafka.pubsub.Partitioners$ExpressionLanguagePartitioner + + Interprets the <Partition> property as Expression Language that will be + evaluated against each FlowFile. This Expression will be evaluated once against the + FlowFile, so all Records in a given FlowFile will go to the same partition. + + + + false + false + false + NONE + false + false + + + + partition + Partition + Specifies which Partition Records will go to. How this value is interpreted is dictated + by the <Partitioner class> property. + + + + false + false + true + FLOWFILE_ATTRIBUTES + false + false + + + + compression.type + Compression Type + This parameter allows you to specify the compression codec for all data generated by + this producer. + + none + + + none + none + + + + gzip + gzip + + + + snappy + snappy + + + + lz4 + lz4 + + + + true + false + false + NONE + false + false + + + + + + The name of a Kafka configuration property. + The value of a given Kafka configuration property. + These properties will be added on the Kafka configuration after loading any provided + configuration properties. In the event a dynamic property represents a property that was already + set, its value will be ignored and WARN message logged. For the list of available Kafka + properties please refer to: http://kafka.apache.org/documentation.html#configuration. + + false + VARIABLE_REGISTRY + + + + + success + FlowFiles for which all content was sent to Kafka. + false + + + failure + Any FlowFile that cannot be sent to Kafka will be routed to this Relationship + + false + + + + + + + msg.count + The number of messages that were sent to Kafka for this FlowFile. This attribute is + added only to FlowFiles that are routed to success. + + + + + + INPUT_REQUIRED + + + org.apache.nifi.processors.kafka.pubsub.PublishKafka_2_6 + org.apache.nifi.processors.kafka.pubsub.ConsumeKafka_2_6 + org.apache.nifi.processors.kafka.pubsub.ConsumeKafkaRecord_2_6 + + + + org.apache.nifi.processors.kafka.pubsub.ConsumeKafkaRecord_2_6 + PROCESSOR + + Consumes messages from Apache Kafka specifically built against the Kafka 2.6 Consumer API. The + complementary NiFi processor for sending messages is PublishKafkaRecord_2_6. Please note that, at this + time, the Processor assumes that all records that are retrieved from a given partition have the same + schema. If any of the Kafka messages are pulled but cannot be parsed or written with the configured + Record Reader or Record Writer, the contents of the message will be written to a separate FlowFile, and + that FlowFile will be transferred to the 'parse.failure' relationship. Otherwise, each FlowFile is sent + to the 'success' relationship and may contain many individual messages within the single FlowFile. A + 'record.count' attribute is added to indicate how many messages are contained in the FlowFile. No two + Kafka messages will be placed into the same FlowFile if they have different schemas, or if they have + different values for a message header that is included by the <Headers to Add as Attributes> + property. + + + Kafka + Get + Record + csv + avro + json + Ingest + Ingress + Topic + PubSub + Consume + 2.6 + + + + bootstrap.servers + Kafka Brokers + A comma-separated list of known Kafka Brokers in the format <host>:<port> + + localhost:9092 + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic + Topic Name(s) + The name of the Kafka Topic(s) to pull from. More than one can be supplied if comma + separated. + + + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic_type + Topic Name Format + Specifies whether the Topic(s) provided are a comma separated list of names or a single + regular expression + + names + + + names + names + Topic is a full topic name or comma separated list of names + + + pattern + pattern + Topic is a regex using the Java Pattern syntax + + + true + false + false + NONE + false + false + + + + record-reader + Record Reader + The Record Reader to use for incoming FlowFiles + + + org.apache.nifi.serialization.RecordReaderFactory + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + true + false + false + NONE + false + false + + + + record-writer + Record Writer + The Record Writer to use in order to serialize the data before sending to Kafka + + + + org.apache.nifi.serialization.RecordSetWriterFactory + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + true + false + false + NONE + false + false + + + + group.id + Group ID + A Group ID is used to identify consumers that are within the same consumer group. + Corresponds to Kafka's 'group.id' property. + + + + true + false + true + VARIABLE_REGISTRY + false + false + + + + Commit Offsets + Commit Offsets + Specifies whether or not this Processor should commit the offsets to Kafka after + receiving messages. This value should be false when a PublishKafkaRecord processor is expected + to commit the offsets using Exactly Once semantics, and should be reserved for dataflows that + are designed to run within Stateless NiFi. See Processor's Usage / Additional Details for more + information. Note that setting this value to false can lead to significant data duplication or + potentially even data loss if the dataflow is not properly configured. + + true + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + max-uncommit-offset-wait + Max Uncommitted Time + Specifies the maximum amount of time allowed to pass before offsets must be committed. + This value impacts how often offsets will be committed. Committing offsets less often increases + throughput but also increases the window of potential data duplication in the event of a + rebalance or JVM restart between commits. This value is also related to maximum poll records and + the use of a message demarcator. When using a message demarcator we can have far more + uncommitted messages than when we're not as there is much less for us to keep track of in + memory. + + 1 secs + + false + false + false + NONE + false + false + + + + Commit Offsets + Commit Offsets + + true + + + + + + honor-transactions + Honor Transactions + Specifies whether or not NiFi should honor transactional guarantees when communicating + with Kafka. If false, the Processor will use an "isolation level" of read_uncomitted. This means + that messages will be received as soon as they are written to Kafka but will be pulled, even if + the producer cancels the transactions. If this value is true, NiFi will not receive any messages + for which the producer's transaction was canceled, but this can result in some latency since the + consumer must wait for the producer to finish its entire transaction instead of pulling as the + messages become available. + + true + + + true + true + + + + false + false + + + + true + false + false + NONE + false + false + + + + security.protocol + Security Protocol + Protocol used to communicate with brokers. Corresponds to Kafka's 'security.protocol' + property. + + PLAINTEXT + + + PLAINTEXT + PLAINTEXT + PLAINTEXT + + + SSL + SSL + SSL + + + SASL_PLAINTEXT + SASL_PLAINTEXT + SASL_PLAINTEXT + + + SASL_SSL + SASL_SSL + SASL_SSL + + + true + false + false + NONE + false + false + + + + sasl.mechanism + SASL Mechanism + The SASL mechanism to use for authentication. Corresponds to Kafka's 'sasl.mechanism' + property. + + GSSAPI + + + GSSAPI + GSSAPI + The mechanism for authentication via Kerberos. The principal and keytab must be + provided to the processor by using a Keytab Credential service, or by specifying the + properties directly in the processor. + + + + PLAIN + PLAIN + The mechanism for authentication via username and password. The username and + password properties must be populated when using this mechanism. + + + + SCRAM-SHA-256 + SCRAM-SHA-256 + The Salted Challenge Response Authentication Mechanism using SHA-256. The + username and password properties must be set when using this mechanism. + + + + SCRAM-SHA-512 + SCRAM-SHA-512 + The Salted Challenge Response Authentication Mechanism using SHA-512. The + username and password properties must be set when using this mechanism. + + + + true + false + false + NONE + false + false + + + + kerberos-credentials-service + Kerberos Credentials Service + Specifies the Kerberos Credentials Controller Service that should be used for + authenticating with Kerberos + + + + org.apache.nifi.kerberos.KerberosCredentialsService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + sasl.kerberos.service.name + Kerberos Service Name + The service name that matches the primary name of the Kafka server configured in the + broker JAAS file.This can be defined either in Kafka's JAAS config or in Kafka's config. + Corresponds to Kafka's 'security.protocol' property.It is ignored unless one of the SASL options + of the <Security Protocol> are selected. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.principal + Kerberos Principal + The Kerberos principal that will be used to connect to brokers. If not set, it is + expected to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf + file. This principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.kerberos.keytab + Kerberos Keytab + The Kerberos keytab that will be used to connect to brokers. If not set, it is expected + to set a JAAS configuration file in the JVM properties defined in the bootstrap.conf file. This + principal will be set into 'sasl.jaas.config' Kafka's property. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + SINGLE + + FILE + + + + + sasl.username + Username + The username when the SASL Mechanism is PLAIN or SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + sasl.password + Password + The password for the given username when the SASL Mechanism is PLAIN or + SCRAM-SHA-256/SCRAM-SHA-512 + + + + false + true + true + VARIABLE_REGISTRY + false + false + + + + sasl.token.auth + Token Auth + When SASL Mechanism is SCRAM-SHA-256 or SCRAM-SHA-512, this property indicates if token + authentication should be used. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + ssl.context.service + SSL Context Service + Specifies the SSL Context Service to use for communicating with Kafka. + + + org.apache.nifi.ssl.SSLContextService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + separate-by-key + Separate By Key + If true, two Records will only be added to the same FlowFile if both of the Kafka + Messages have identical keys. + + false + + + true + true + + + + false + false + + + + false + false + false + NONE + false + false + + + + key-attribute-encoding + Key Attribute Encoding + If the <Separate By Key> property is set to true, FlowFiles that are emitted have + an attribute named 'kafka.key'. This property dictates how the value of the attribute should be + encoded. + + utf-8 + + + UTF-8 Encoded + utf-8 + The key is interpreted as a UTF-8 Encoded string. + + + Hex Encoded + hex + The key is interpreted as arbitrary binary data and is encoded using + hexadecimal characters with uppercase letters + + + + Do Not Add Key as Attribute + do-not-add + The key will not be added as an Attribute + + + true + false + false + NONE + false + false + + + + auto.offset.reset + Offset Reset + Allows you to manage the condition when there is no initial offset in Kafka or if the + current offset does not exist any more on the server (e.g. because that data has been deleted). + Corresponds to Kafka's 'auto.offset.reset' property. + + latest + + + earliest + earliest + Automatically reset the offset to the earliest offset + + + latest + latest + Automatically reset the offset to the latest offset + + + none + none + Throw exception to the consumer if no previous offset is found for the + consumer's group + + + + true + false + false + NONE + false + false + + + + message-header-encoding + Message Header Encoding + Any message header that is found on a Kafka message will be added to the outbound + FlowFile as an attribute. This property indicates the Character Encoding to use for + deserializing the headers. + + UTF-8 + + false + false + false + NONE + false + false + + + + header-name-regex + Headers to Add as Attributes (Regex) + A Regular Expression that is matched against all message headers. Any message header + whose name matches the regex will be added to the FlowFile as an Attribute. If not specified, no + Header values will be added as FlowFile attributes. If two messages have a different value for + the same header and that header is selected by the provided regex, then those two messages must + be added to different FlowFiles. As a result, users should be cautious about using a regex like + ".*" if messages are expected to have header values that are unique per message, such as an + identifier or timestamp, because it will prevent NiFi from bundling the messages together + efficiently. + + + + false + false + false + NONE + false + false + + + + max.poll.records + Max Poll Records + Specifies the maximum number of records Kafka should return in a single poll. + + 10000 + + false + false + false + NONE + false + false + + + + Communications Timeout + Communications Timeout + Specifies the timeout that the consumer should use when communicating with the Kafka + Broker + + 60 secs + + true + false + false + NONE + false + false + + + + + + The name of a Kafka configuration property. + The value of a given Kafka configuration property. + These properties will be added on the Kafka configuration after loading any provided + configuration properties. In the event a dynamic property represents a property that was already + set, its value will be ignored and WARN message logged. For the list of available Kafka + properties please refer to: http://kafka.apache.org/documentation.html#configuration. + + false + VARIABLE_REGISTRY + + + + + success + FlowFiles received from Kafka. Depending on demarcation strategy it is a flow file per + message or a bundle of messages grouped by topic and partition. + + false + + + parse.failure + If a message from Kafka cannot be parsed using the configured Record Reader, the + contents of the message will be routed to this Relationship as its own individual FlowFile. + + false + + + + + + + record.count + The number of records received + + + mime.type + The MIME Type that is provided by the configured Record Writer + + + kafka.partition + The partition of the topic the records are from + + + kafka.timestamp + The timestamp of the message in the partition of the topic. + + + kafka.topic + The topic records are from + + + + + INPUT_FORBIDDEN + + + org.apache.nifi.processors.kafka.pubsub.ConsumeKafka_2_6 + org.apache.nifi.processors.kafka.pubsub.PublishKafka_2_6 + org.apache.nifi.processors.kafka.pubsub.PublishKafkaRecord_2_6 + + + + org.apache.nifi.record.sink.kafka.KafkaRecordSink_2_6 + CONTROLLER_SERVICE + + Provides a service to write records to a Kafka 2.6+ topic. + + kafka + record + sink + + + + bootstrap.servers + Kafka Brokers + A comma-separated list of known Kafka Brokers in the format <host>:<port> + + localhost:9092 + + true + false + true + VARIABLE_REGISTRY + false + false + + + + topic + Topic Name + The name of the Kafka Topic to publish to. + + + true + false + true + VARIABLE_REGISTRY + false + false + + + + record-sink-record-writer + Record Writer + Specifies the Controller Service to use for writing out the records. + + + org.apache.nifi.serialization.RecordSetWriterFactory + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + true + false + false + NONE + false + false + + + + acks + Delivery Guarantee + Specifies the requirement for guaranteeing that a message is sent to Kafka. Corresponds + to Kafka's 'acks' property. + + 0 + + + Best Effort + 0 + Records are considered 'transmitted successfully' after successfully writing + the content to a Kafka node, without waiting for a response. This provides the best + performance but may result in data loss. + + + + Guarantee Single Node Delivery + 1 + Records are considered 'transmitted successfully' if the message is received by + a single Kafka node, whether or not it is replicated. This is faster than <Guarantee + Replicated Delivery> but can result in data loss if a Kafka node crashes. + + + + Guarantee Replicated Delivery + all + Records are considered 'transmitted unsuccessfully' unless the message is + replicated to the appropriate number of Kafka Nodes according to the Topic + configuration. + + + + true + false + false + NONE + false + false + + + + message-header-encoding + Message Header Encoding + For any attribute that is added as a message header, as configured via the <Attributes + to Send as Headers> property, this property indicates the Character Encoding to use for + serializing the headers. + + UTF-8 + + false + false + false + NONE + false + false + + + + security.protocol + Security Protocol + Protocol used to communicate with brokers. Corresponds to Kafka's 'security.protocol' + property. + + PLAINTEXT + + + PLAINTEXT + PLAINTEXT + PLAINTEXT + + + SSL + SSL + SSL + + + SASL_PLAINTEXT + SASL_PLAINTEXT + SASL_PLAINTEXT + + + SASL_SSL + SASL_SSL + SASL_SSL + + + true + false + false + NONE + false + false + + + + kerberos-credentials-service + Kerberos Credentials Service + Specifies the Kerberos Credentials Controller Service that should be used for + authenticating with Kerberos + + + + org.apache.nifi.kerberos.KerberosCredentialsService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + sasl.kerberos.service.name + Kerberos Service Name + The service name that matches the primary name of the Kafka server configured in the + broker JAAS file.This can be defined either in Kafka's JAAS config or in Kafka's config. + Corresponds to Kafka's 'security.protocol' property.It is ignored unless one of the SASL options + of the <Security Protocol> are selected. + + + + false + false + true + VARIABLE_REGISTRY + false + false + + + + ssl.context.service + SSL Context Service + Specifies the SSL Context Service to use for communicating with Kafka. + + + org.apache.nifi.ssl.SSLContextService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + false + false + false + NONE + false + false + + + + max.request.size + Max Request Size + The maximum size of a request in bytes. Corresponds to Kafka's 'max.request.size' + property and defaults to 1 MB (1048576). + + 1 MB + + true + false + false + NONE + false + false + + + + ack.wait.time + Acknowledgment Wait Time + After sending a message to Kafka, this indicates the amount of time that we are willing + to wait for a response from Kafka. If Kafka does not acknowledge the message within this time + period, the FlowFile will be routed to 'failure'. + + 5 secs + + true + false + false + NONE + false + false + + + + max.block.ms + Max Metadata Wait Time + The amount of time publisher will wait to obtain metadata or wait for the buffer to + flush during the 'send' call before failing the entire 'send' call. Corresponds to Kafka's + 'max.block.ms' property + + 5 sec + + true + false + true + VARIABLE_REGISTRY + false + false + + + + compression.type + Compression Type + This parameter allows you to specify the compression codec for all data generated by + this producer. + + none + + + none + none + + + + gzip + gzip + + + + snappy + snappy + + + + lz4 + lz4 + + + + true + false + false + NONE + false + false + + + + + + + + + + + + org.apache.nifi.record.sink.RecordSinkService + org.apache.nifi + nifi-standard-services-api-nar + 1.16.0-SNAPSHOT + + + + + \ No newline at end of file diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Cardinality.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Cardinality.java new file mode 100644 index 0000000000..dac545bcf8 --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Cardinality.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.extension.component.manifest; + +import io.swagger.annotations.ApiModel; + +@ApiModel +public enum Cardinality { + + /** + * Exactly one resource must be specified + */ + SINGLE, + + /** + * One or more resources may be supplied, as a comma-separated list + */ + MULTIPLE; + +} diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Dependency.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Dependency.java new file mode 100644 index 0000000000..d7e9af294a --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Dependency.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.extension.component.manifest; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + +@ApiModel +@XmlAccessorType(XmlAccessType.FIELD) +public class Dependency { + + private String propertyName; + private String propertyDisplayName; + private DependentValues dependentValues; + + @ApiModelProperty(value = "The name of the dependent property") + public String getPropertyName() { + return propertyName; + } + + public void setPropertyName(String propertyName) { + this.propertyName = propertyName; + } + + @ApiModelProperty(value = "The display name of the dependent property") + public String getPropertyDisplayName() { + return propertyDisplayName; + } + + public void setPropertyDisplayName(String propertyDisplayName) { + this.propertyDisplayName = propertyDisplayName; + } + + @ApiModelProperty(value = "The values of the dependent property that enable the depending property") + public DependentValues getDependentValues() { + return dependentValues; + } + + public void setDependentValues(DependentValues dependentValues) { + this.dependentValues = dependentValues; + } +} diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/DependentValues.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/DependentValues.java new file mode 100644 index 0000000000..f7aa78855e --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/DependentValues.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.extension.component.manifest; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import java.util.List; + +@ApiModel +@XmlAccessorType(XmlAccessType.FIELD) +public class DependentValues { + + @XmlElement(name = "dependentValue") + private List values; + + @ApiModelProperty(value = "The dependent values") + public List getValues() { + return values; + } + + public void setValues(List values) { + this.values = values; + } + +} diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Property.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Property.java index f3723f3413..74bbcdbb1d 100644 --- a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Property.java +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/Property.java @@ -48,6 +48,11 @@ public class Property { private boolean dynamicallyModifiesClasspath; private boolean dynamic; + @XmlElementWrapper + @XmlElement(name = "dependency") + private List dependencies; + + private ResourceDefinition resourceDefinition; @ApiModelProperty(value = "The name of the property") public String getName() { @@ -156,4 +161,22 @@ public class Property { public void setDynamic(boolean dynamic) { this.dynamic = dynamic; } + + @ApiModelProperty(value = "The properties that this property depends on") + public List getDependencies() { + return dependencies; + } + + public void setDependencies(List dependencies) { + this.dependencies = dependencies; + } + + @ApiModelProperty(value = "The optional resource definition") + public ResourceDefinition getResourceDefinition() { + return resourceDefinition; + } + + public void setResourceDefinition(ResourceDefinition resourceDefinition) { + this.resourceDefinition = resourceDefinition; + } } diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceDefinition.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceDefinition.java new file mode 100644 index 0000000000..1e82e48290 --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceDefinition.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.extension.component.manifest; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import java.util.List; + +@ApiModel +@XmlAccessorType(XmlAccessType.FIELD) +public class ResourceDefinition { + + private Cardinality cardinality; + + @XmlElementWrapper + @XmlElement(name = "resourceType") + private List resourceTypes; + + @ApiModelProperty(value = "The cardinality of the resource definition") + public Cardinality getCardinality() { + return cardinality; + } + + public void setCardinality(Cardinality cardinality) { + this.cardinality = cardinality; + } + + @ApiModelProperty(value = "The types of resources") + public List getResourceTypes() { + return resourceTypes; + } + + public void setResourceTypes(List resourceTypes) { + this.resourceTypes = resourceTypes; + } + +} diff --git a/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceType.java b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceType.java new file mode 100644 index 0000000000..061f7e86cc --- /dev/null +++ b/nifi-registry/nifi-registry-core/nifi-registry-data-model/src/main/java/org/apache/nifi/registry/extension/component/manifest/ResourceType.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.extension.component.manifest; + +import io.swagger.annotations.ApiModel; + +@ApiModel +public enum ResourceType { + + /** + * Referenced Resource is a File on a local (or mounted) file system + */ + FILE, + + /** + * Referenced Resource is a directory on a local (or mounted) file system + */ + DIRECTORY, + + /** + * Referenced Resource is UTF-8 text, rather than an external entity + */ + TEXT, + + /** + * Referenced Resource is a URL that uses the HTTP, HTTPS, or file protocol + * (i.e., http://..., https://..., or file:...) + */ + URL + +}