NIFI-12920 Removed nifi-cassandra-bundle

Signed-off-by: Pierre Villard <pierre.villard.fr@gmail.com>

This closes #8531.
This commit is contained in:
exceptionfactory 2024-03-18 16:17:21 -05:00 committed by Pierre Villard
parent f5e44a99ac
commit d9bcc8b496
No known key found for this signature in database
GPG Key ID: F92A93B30C07C6D5
42 changed files with 0 additions and 8171 deletions

View File

@ -2040,11 +2040,6 @@ The following binary components are provided under the Apache Software License v
the terms of a BSD style license.
The original software and related information is available
at http://www.jcraft.com/jsch/.
(ASLv2) DataStax Java Driver for Apache Cassandra - Core
The following NOTICE information applies:
DataStax Java Driver for Apache Cassandra - Core
Copyright (C) 2012-2017 DataStax Inc.
(ASLv2) bytebuffer-collections
The following NOTICE information applies:
bytebuffer-collections

View File

@ -530,24 +530,6 @@ language governing permissions and limitations under the License. -->
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-nar</artifactId>
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api-nar</artifactId>
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-nar</artifactId>
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-registry-nar</artifactId>

View File

@ -838,26 +838,6 @@
<artifactId>nifi-box-services-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-distributedmapcache-service</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-processors</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cdc-api</artifactId>

View File

@ -404,26 +404,6 @@
<packageUrl regex="true">^pkg:maven/com\.google\.guava/guava@.*$</packageUrl>
<cve>CVE-2020-8908</cve>
</suppress>
<suppress>
<notes>CVE-2021-44521 applies to Apache Cassandra Server</notes>
<packageUrl regex="true">^pkg:maven/com\.datastax\.cassandra/cassandra\-driver\-extras@.*$</packageUrl>
<cve>CVE-2021-44521</cve>
</suppress>
<suppress>
<notes>CVE-2020-17516 applies to Apache Cassandra Server</notes>
<packageUrl regex="true">^pkg:maven/com\.datastax\.cassandra/cassandra\-driver\-extras@.*$</packageUrl>
<cve>CVE-2020-17516</cve>
</suppress>
<suppress>
<notes>CVE-2019-2684 applies to Apache Cassandra Server</notes>
<packageUrl regex="true">^pkg:maven/com\.datastax\.cassandra/cassandra\-driver\-extras@.*$</packageUrl>
<cve>CVE-2019-2684</cve>
</suppress>
<suppress>
<notes>CVE-2020-13946 applies to Apache Cassandra Server</notes>
<packageUrl regex="true">^pkg:maven/com\.datastax\.cassandra/cassandra\-driver\-extras@.*$</packageUrl>
<cve>CVE-2020-13946</cve>
</suppress>
<suppress>
<notes>Bundled versions of jQuery DataTables are not used</notes>
<packageUrl regex="true">^pkg:javascript/jquery\.datatables@.*$</packageUrl>

View File

@ -1,102 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-bundle</artifactId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<artifactId>nifi-cassandra-distributedmapcache-service</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-record-serialization-service-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-utils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-record</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-avro-record-utils</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock-record-utils</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-distributed-cache-client-service-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-ssl-context-service-api</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>cassandra</artifactId>
<version>${testcontainers.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<version>${testcontainers.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -1,241 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.controller.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.annotation.lifecycle.OnDisabled;
import org.apache.nifi.annotation.lifecycle.OnEnabled;
import org.apache.nifi.cassandra.CassandraSessionProviderService;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.controller.AbstractControllerService;
import org.apache.nifi.controller.ConfigurationContext;
import org.apache.nifi.distributed.cache.client.Deserializer;
import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
import org.apache.nifi.distributed.cache.client.Serializer;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.processor.util.StandardValidators;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.apache.nifi.controller.cassandra.QueryUtils.createDeleteStatement;
import static org.apache.nifi.controller.cassandra.QueryUtils.createExistsQuery;
import static org.apache.nifi.controller.cassandra.QueryUtils.createFetchQuery;
import static org.apache.nifi.controller.cassandra.QueryUtils.createInsertStatement;
@Tags({"map", "cache", "distributed", "cassandra"})
@CapabilityDescription("Provides a DistributedMapCache client that is based on Apache Cassandra.")
public class CassandraDistributedMapCache extends AbstractControllerService implements DistributedMapCacheClient {
public static final PropertyDescriptor SESSION_PROVIDER = new PropertyDescriptor.Builder()
.name("cassandra-dmc-session-provider")
.displayName("Session Provider")
.description("The client service that will configure the cassandra client connection.")
.required(true)
.identifiesControllerService(CassandraSessionProviderService.class)
.build();
public static final PropertyDescriptor TABLE_NAME = new PropertyDescriptor.Builder()
.name("cassandra-dmc-table-name")
.displayName("Table Name")
.description("The name of the table where the cache will be stored.")
.required(true)
.addValidator(StandardValidators.NON_EMPTY_EL_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.build();
public static final PropertyDescriptor KEY_FIELD_NAME = new PropertyDescriptor.Builder()
.name("cassandra-dmc-key-field-name")
.displayName("Key Field Name")
.description("The name of the field that acts as the unique key. (The CQL type should be \"blob\")")
.required(true)
.addValidator(StandardValidators.NON_EMPTY_EL_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.build();
public static final PropertyDescriptor VALUE_FIELD_NAME = new PropertyDescriptor.Builder()
.name("cassandra-dmc-value-field-name")
.displayName("Value Field Name")
.description("The name of the field that will store the value. (The CQL type should be \"blob\")")
.required(true)
.addValidator(StandardValidators.NON_EMPTY_EL_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.build();
public static final PropertyDescriptor TTL = new PropertyDescriptor.Builder()
.name("cassandra-dmc-ttl")
.displayName("TTL")
.description("If configured, this will set a TTL (Time to Live) for each row inserted into the table so that " +
"old cache items expire after a certain period of time.")
.addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.required(false)
.build();
public static final List<PropertyDescriptor> DESCRIPTORS = Collections.unmodifiableList(Arrays.asList(
SESSION_PROVIDER, TABLE_NAME, KEY_FIELD_NAME, VALUE_FIELD_NAME, TTL
));
private CassandraSessionProviderService sessionProviderService;
private String tableName;
private String keyField;
private String valueField;
private Long ttl;
private Session session;
private PreparedStatement deleteStatement;
private PreparedStatement existsStatement;
private PreparedStatement fetchStatement;
private PreparedStatement insertStatement;
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return DESCRIPTORS;
}
@OnEnabled
public void onEnabled(ConfigurationContext context) {
sessionProviderService = context.getProperty(SESSION_PROVIDER).asControllerService(CassandraSessionProviderService.class);
tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
keyField = context.getProperty(KEY_FIELD_NAME).evaluateAttributeExpressions().getValue();
valueField = context.getProperty(VALUE_FIELD_NAME).evaluateAttributeExpressions().getValue();
if (context.getProperty(TTL).isSet()) {
ttl = context.getProperty(TTL).evaluateAttributeExpressions().asTimePeriod(TimeUnit.SECONDS);
}
session = sessionProviderService.getCassandraSession();
deleteStatement = session.prepare(createDeleteStatement(keyField, tableName));
existsStatement = session.prepare(createExistsQuery(keyField, tableName));
fetchStatement = session.prepare(createFetchQuery(keyField, valueField, tableName));
insertStatement = session.prepare(createInsertStatement(keyField, valueField, tableName, ttl));
}
@OnDisabled
public void onDisabled() {
session = null;
deleteStatement = null;
existsStatement = null;
fetchStatement = null;
insertStatement = null;
}
@Override
public <K, V> boolean putIfAbsent(K k, V v, Serializer<K> keySerializer, Serializer<V> valueSerializer) throws IOException {
if (containsKey(k, keySerializer)) {
return false;
} else {
put(k, v, keySerializer, valueSerializer);
return true;
}
}
@Override
public <K, V> V getAndPutIfAbsent(K k, V v, Serializer<K> keySerializer, Serializer<V> valueSerializer, Deserializer<V> deserializer) throws IOException {
V got = get(k, keySerializer, deserializer);
boolean wasAbsent = putIfAbsent(k, v, keySerializer, valueSerializer);
return !wasAbsent ? got : null;
}
@Override
public <K> boolean containsKey(K k, Serializer<K> serializer) throws IOException {
byte[] key = serializeKey(k, serializer);
BoundStatement statement = existsStatement.bind();
ByteBuffer buffer = ByteBuffer.wrap(key);
statement.setBytes(0, buffer);
ResultSet rs =session.execute(statement);
Iterator<Row> iterator = rs.iterator();
if (iterator.hasNext()) {
Row row = iterator.next();
long value = row.getLong("exist_count");
return value > 0;
} else {
return false;
}
}
@Override
public <K, V> void put(K k, V v, Serializer<K> keySerializer, Serializer<V> valueSerializer) throws IOException {
BoundStatement statement = insertStatement.bind();
statement.setBytes(0, ByteBuffer.wrap(serializeKey(k, keySerializer)));
statement.setBytes(1, ByteBuffer.wrap(serializeValue(v, valueSerializer)));
session.execute(statement);
}
@Override
public <K, V> V get(K k, Serializer<K> serializer, Deserializer<V> deserializer) throws IOException {
BoundStatement boundStatement = fetchStatement.bind();
boundStatement.setBytes(0, ByteBuffer.wrap(serializeKey(k, serializer)));
ResultSet rs = session.execute(boundStatement);
Iterator<Row> iterator = rs.iterator();
if (!iterator.hasNext()) {
return null;
}
Row fetched = iterator.next();
ByteBuffer buffer = fetched.getBytes(valueField);
byte[] content = buffer.array();
return deserializer.deserialize(content);
}
@Override
public void close() throws IOException {
}
@Override
public <K> boolean remove(K k, Serializer<K> serializer) throws IOException {
BoundStatement delete = deleteStatement.bind();
delete.setBytes(0, ByteBuffer.wrap(serializeKey(k, serializer)));
session.execute(delete);
return true;
}
private <K> byte[] serializeKey(K k, Serializer<K> keySerializer) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
keySerializer.serialize(k, out);
out.close();
return out.toByteArray();
}
private <V> byte[] serializeValue(V v, Serializer<V> valueSerializer) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
valueSerializer.serialize(v, out);
out.close();
return out.toByteArray();
}
}

View File

@ -1,44 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.controller.cassandra;
public class QueryUtils {
private QueryUtils() {}
public static String createDeleteStatement(String keyField, String table) {
return String.format("DELETE FROM %s WHERE %s = ?", table, keyField);
}
public static String createExistsQuery(String keyField, String table) {
return String.format("SELECT COUNT(*) as exist_count FROM %s WHERE %s = ?", table, keyField);
}
public static String createFetchQuery(String keyField, String valueField, String table) {
return String.format("SELECT %s FROM %s WHERE %s = ?", valueField, table, keyField);
}
public static String createInsertStatement(String keyField, String valueField, String table, Long ttl) {
String retVal = String.format("INSERT INTO %s (%s, %s) VALUES(?, ?)", table, keyField, valueField);
if (ttl != null) {
retVal += String.format(" using ttl %d", ttl);
}
return retVal;
}
}

View File

@ -1,15 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.nifi.controller.cassandra.CassandraDistributedMapCache

View File

@ -1,51 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-bundle</artifactId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<artifactId>nifi-cassandra-nar</artifactId>
<packaging>nar</packaging>
<dependencyManagement>
<dependencies>
<!-- Provided in nifi-cassandra-services-api-nar -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api-nar</artifactId>
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-processors</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -1,342 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
APACHE NIFI SUBCOMPONENTS:
The Apache NiFi project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
This product bundles 'libffi' which is available under an MIT style license.
libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
see https://github.com/java-native-access/jna/blob/master/native/libffi/LICENSE
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This product bundles 'asm' which is available under a 3-Clause BSD style license.
For details see http://asm.ow2.org/asmdex-license.html
Copyright (c) 2012 France Télécom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
The binary distribution of this product bundles 'Bouncy Castle JDK 1.5'
under an MIT style license.
Copyright (c) 2000 - 2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
The binary distribution of this product bundles 'JNR x86asm' under an MIT
style license.
Copyright (C) 2010 Wayne Meissner
Copyright (c) 2008-2009, Petr Kobalicek <kobalicek.petr@gmail.com>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
The binary distribution of this product bundles 'ParaNamer' and 'Paranamer Core'
which is available under a BSD style license.
Copyright (c) 2006 Paul Hammant & ThoughtWorks Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,328 +0,0 @@
nifi-cassandra-nar
Copyright 2016-2020 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
******************
Apache Software License v2
******************
The following binary components are provided under the Apache Software License v2
(ASLv2) DataStax Java Driver for Apache Cassandra - Core
The following NOTICE information applies:
DataStax Java Driver for Apache Cassandra - Core
Copyright (C) 2012-2017 DataStax Inc.
(ASLv2) Apache Avro
The following NOTICE information applies:
Apache Avro
Copyright 2009-2017 The Apache Software Foundation
(ASLv2) Jackson JSON processor
The following NOTICE information applies:
# Jackson JSON processor
Jackson is a high-performance, Free/Open Source JSON processing library.
It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
been in development since 2007.
It is currently developed by a community of developers, as well as supported
commercially by FasterXML.com.
## Licensing
Jackson core and extension components may licensed under different licenses.
To find the details that apply to this artifact see the accompanying LICENSE file.
For more information, including possible other licensing options, contact
FasterXML.com (http://fasterxml.com).
## Credits
A list of contributors may be found from CREDITS file, which is included
in some artifacts (usually source distributions); but is always available
from the source code management (SCM) system project uses.
(ASLv2) Apache Commons Codec
The following NOTICE information applies:
Apache Commons Codec
Copyright 2002-2014 The Apache Software Foundation
src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
contains test data from http://aspell.net/test/orig/batch0.tab.
Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
===============================================================================
The content of package org.apache.commons.codec.language.bm has been translated
from the original php source code available at http://stevemorse.org/phoneticinfo.htm
with permission from the original authors.
Original source copyright:
Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
(ASLv2) Apache Commons Compress
The following NOTICE information applies:
Apache Commons Compress
Copyright 2002-2017 The Apache Software Foundation
The files in the package org.apache.commons.compress.archivers.sevenz
were derived from the LZMA SDK, version 9.20 (C/ and CPP/7zip/),
which has been placed in the public domain:
"LZMA SDK is placed in the public domain." (http://www.7-zip.org/sdk.html)
(ASLv2) Apache Commons IO
The following NOTICE information applies:
Apache Commons IO
Copyright 2002-2016 The Apache Software Foundation
(ASLv2) Apache Commons Lang
The following NOTICE information applies:
Apache Commons Lang
Copyright 2001-2017 The Apache Software Foundation
This product includes software from the Spring Framework,
under the Apache License 2.0 (see: StringUtils.containsWhitespace())
(ASLv2) Guava
The following NOTICE information applies:
Guava
Copyright 2015 The Guava Authors
(ASLv2) Dropwizard Metrics
The following NOTICE information applies:
Copyright (c) 2010-2013 Coda Hale, Yammer.com
This product includes software developed by Coda Hale and Yammer, Inc.
This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
LongAdder), which was released with the following comments:
Written by Doug Lea with assistance from members of JCP JSR-166
Expert Group and released to the public domain, as explained at
http://creativecommons.org/publicdomain/zero/1.0/
(ASLv2) The Netty Project
The following NOTICE information applies:
Copyright 2014 The Netty Project
-------------------------------------------------------------------------------
This product contains the extensions to Java Collections Framework which has
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
* LICENSE:
* license/LICENSE.jsr166y.txt (Public Domain)
* HOMEPAGE:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
This product contains a modified version of Robert Harder's Public Domain
Base64 Encoder and Decoder, which can be obtained at:
* LICENSE:
* license/LICENSE.base64.txt (Public Domain)
* HOMEPAGE:
* http://iharder.sourceforge.net/current/java/base64/
This product contains a modified portion of 'Webbit', an event based
WebSocket and HTTP server, which can be obtained at:
* LICENSE:
* license/LICENSE.webbit.txt (BSD License)
* HOMEPAGE:
* https://github.com/joewalnes/webbit
This product contains a modified portion of 'SLF4J', a simple logging
facade for Java, which can be obtained at:
* LICENSE:
* license/LICENSE.slf4j.txt (MIT License)
* HOMEPAGE:
* http://www.slf4j.org/
This product contains a modified portion of 'Apache Harmony', an open source
Java SE, which can be obtained at:
* LICENSE:
* license/LICENSE.harmony.txt (Apache License 2.0)
* HOMEPAGE:
* http://archive.apache.org/dist/harmony/
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
and decompression library written by Matthew J. Francis. It can be obtained at:
* LICENSE:
* license/LICENSE.jbzip2.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jbzip2/
This product contains a modified portion of 'libdivsufsort', a C API library to construct
the suffix array and the Burrows-Wheeler transformed string for any input string of
a constant-size alphabet written by Yuta Mori. It can be obtained at:
* LICENSE:
* license/LICENSE.libdivsufsort.txt (MIT License)
* HOMEPAGE:
* https://github.com/y-256/libdivsufsort
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
which can be obtained at:
* LICENSE:
* license/LICENSE.jctools.txt (ASL2 License)
* HOMEPAGE:
* https://github.com/JCTools/JCTools
This product optionally depends on 'JZlib', a re-implementation of zlib in
pure Java, which can be obtained at:
* LICENSE:
* license/LICENSE.jzlib.txt (BSD style License)
* HOMEPAGE:
* http://www.jcraft.com/jzlib/
This product optionally depends on 'Compress-LZF', a Java library for encoding and
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
* LICENSE:
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/ning/compress
This product optionally depends on 'lz4', a LZ4 Java compression
and decompression library written by Adrien Grand. It can be obtained at:
* LICENSE:
* license/LICENSE.lz4.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jpountz/lz4-java
This product optionally depends on 'lzma-java', a LZMA Java compression
and decompression library, which can be obtained at:
* LICENSE:
* license/LICENSE.lzma-java.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jponge/lzma-java
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
and decompression library written by William Kinney. It can be obtained at:
* LICENSE:
* license/LICENSE.jfastlz.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jfastlz/
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
interchange format, which can be obtained at:
* LICENSE:
* license/LICENSE.protobuf.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/protobuf
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
a temporary self-signed X.509 certificate when the JVM does not provide the
equivalent functionality. It can be obtained at:
* LICENSE:
* license/LICENSE.bouncycastle.txt (MIT License)
* HOMEPAGE:
* http://www.bouncycastle.org/
This product optionally depends on 'Snappy', a compression library produced
by Google Inc, which can be obtained at:
* LICENSE:
* license/LICENSE.snappy.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/snappy
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
* LICENSE:
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
* HOMEPAGE:
* http://www.jboss.org/jbossmarshalling
This product optionally depends on 'Caliper', Google's micro-
benchmarking framework, which can be obtained at:
* LICENSE:
* license/LICENSE.caliper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/caliper
This product optionally depends on 'Apache Log4J', a logging framework, which
can be obtained at:
* LICENSE:
* license/LICENSE.log4j.txt (Apache License 2.0)
* HOMEPAGE:
* http://logging.apache.org/log4j/
This product optionally depends on 'Aalto XML', an ultra-high performance
non-blocking XML processor, which can be obtained at:
* LICENSE:
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
* HOMEPAGE:
* http://wiki.fasterxml.com/AaltoHome
This product contains a modified version of 'HPACK', a Java implementation of
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
* LICENSE:
* license/LICENSE.hpack.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/twitter/hpack
This product contains a modified portion of 'Apache Commons Lang', a Java library
provides utilities for the java.lang API, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-lang.txt (Apache License 2.0)
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains a forked and modified version of Tomcat Native
* LICENSE:
* ASL2
* HOMEPAGE:
* http://tomcat.apache.org/native-doc/
* https://svn.apache.org/repos/asf/tomcat/native/
(ASLv2) Objenesis
The following NOTICE information applies:
Objenesis
Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita
(ASLv2) Snappy Java
The following NOTICE information applies:
This product includes software developed by Google
Snappy: http://code.google.com/p/snappy/ (New BSD License)
This product includes software developed by Apache
PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
(Apache 2.0 license)
This library containd statically linked libstdc++. This inclusion is allowed by
"GCC RUntime Library Exception"
http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
************************
Eclipse Public License 1.0
************************
The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
(EPL 1.0) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
*****************
Public Domain
*****************
The following binary components are provided to the 'Public Domain'. See project link for details.
(Public Domain) XZ for Java (org.tukaani:xz:jar:1.5 - http://tukaani.org/xz/java.html

View File

@ -1,108 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-bundle</artifactId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<artifactId>nifi-cassandra-processors</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-utils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-properties</artifactId>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>${cassandra.sdk.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-extras</artifactId>
<version>${cassandra.sdk.version}</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-ssl-context-service-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-record-serialization-service-api</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-record</artifactId>
<scope>compile</scope>
</dependency>
<!-- test scoped in nar-bundles dependency management -->
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock-record-utils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-text</artifactId>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>cassandra</artifactId>
<version>${testcontainers.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<version>${testcontainers.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -1,533 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.CodecRegistry;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.ProtocolOptions;
import com.datastax.driver.core.RemoteEndpointAwareJdkSSLOptions;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.SSLOptions;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TypeCodec;
import com.datastax.driver.core.exceptions.AuthenticationException;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import com.datastax.driver.extras.codecs.arrays.ObjectArrayCodec;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import javax.net.ssl.SSLContext;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.cassandra.CassandraSessionProviderService;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.ValidationContext;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.AbstractProcessor;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.security.util.ClientAuth;
import org.apache.nifi.ssl.SSLContextService;
/**
* AbstractCassandraProcessor is a base class for Cassandra processors and contains logic and variables common to most
* processors integrating with Apache Cassandra.
*/
public abstract class AbstractCassandraProcessor extends AbstractProcessor {
public static final int DEFAULT_CASSANDRA_PORT = 9042;
// Common descriptors
static final PropertyDescriptor CONNECTION_PROVIDER_SERVICE = new PropertyDescriptor.Builder()
.name("cassandra-connection-provider")
.displayName("Cassandra Connection Provider")
.description("Specifies the Cassandra connection providing controller service to be used to connect to Cassandra cluster.")
.required(false)
.identifiesControllerService(CassandraSessionProviderService.class)
.build();
static final PropertyDescriptor CONTACT_POINTS = new PropertyDescriptor.Builder()
.name("Cassandra Contact Points")
.description("Contact points are addresses of Cassandra nodes. The list of contact points should be "
+ "comma-separated and in hostname:port format. Example node1:port,node2:port,...."
+ " The default client port for Cassandra is 9042, but the port(s) must be explicitly specified.")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.HOSTNAME_PORT_LIST_VALIDATOR)
.build();
static final PropertyDescriptor KEYSPACE = new PropertyDescriptor.Builder()
.name("Keyspace")
.description("The Cassandra Keyspace to connect to. If no keyspace is specified, the query will need to " +
"include the keyspace name before any table reference, in case of 'query' native processors or " +
"if the processor exposes the 'Table' property, the keyspace name has to be provided with the " +
"table name in the form of <KEYSPACE>.<TABLE>")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
static final PropertyDescriptor PROP_SSL_CONTEXT_SERVICE = new PropertyDescriptor.Builder()
.name("SSL Context Service")
.description("The SSL Context Service used to provide client certificate information for TLS/SSL "
+ "connections.")
.required(false)
.identifiesControllerService(SSLContextService.class)
.build();
static final PropertyDescriptor CLIENT_AUTH = new PropertyDescriptor.Builder()
.name("Client Auth")
.description("Client authentication policy when connecting to secure (TLS/SSL) cluster. "
+ "Possible values are REQUIRED, WANT, NONE. This property is only used when an SSL Context "
+ "has been defined and enabled.")
.required(false)
.allowableValues(ClientAuth.values())
.defaultValue("REQUIRED")
.build();
static final PropertyDescriptor USERNAME = new PropertyDescriptor.Builder()
.name("Username")
.description("Username to access the Cassandra cluster")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
static final PropertyDescriptor PASSWORD = new PropertyDescriptor.Builder()
.name("Password")
.description("Password to access the Cassandra cluster")
.required(false)
.sensitive(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
static final PropertyDescriptor CONSISTENCY_LEVEL = new PropertyDescriptor.Builder()
.name("Consistency Level")
.description("The strategy for how many replicas must respond before results are returned.")
.required(false)
.allowableValues(ConsistencyLevel.values())
.defaultValue("ONE")
.build();
static final PropertyDescriptor COMPRESSION_TYPE = new PropertyDescriptor.Builder()
.name("Compression Type")
.description("Enable compression at transport-level requests and responses")
.required(false)
.allowableValues(ProtocolOptions.Compression.values())
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.defaultValue("NONE")
.build();
static final PropertyDescriptor CHARSET = new PropertyDescriptor.Builder()
.name("Character Set")
.description("Specifies the character set of the record data.")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.defaultValue("UTF-8")
.addValidator(StandardValidators.CHARACTER_SET_VALIDATOR)
.build();
static final Relationship REL_SUCCESS = new Relationship.Builder()
.name("success")
.description("A FlowFile is transferred to this relationship if the operation completed successfully.")
.build();
static final Relationship REL_FAILURE = new Relationship.Builder()
.name("failure")
.description("A FlowFile is transferred to this relationship if the operation failed.")
.build();
static final Relationship REL_RETRY = new Relationship.Builder().name("retry")
.description("A FlowFile is transferred to this relationship if the operation cannot be completed but attempting "
+ "it again may succeed.")
.build();
protected static List<PropertyDescriptor> descriptors = new ArrayList<>();
static {
descriptors.add(CONNECTION_PROVIDER_SERVICE);
descriptors.add(CONTACT_POINTS);
descriptors.add(KEYSPACE);
descriptors.add(PROP_SSL_CONTEXT_SERVICE);
descriptors.add(CLIENT_AUTH);
descriptors.add(USERNAME);
descriptors.add(PASSWORD);
descriptors.add(CONSISTENCY_LEVEL);
descriptors.add(COMPRESSION_TYPE);
descriptors.add(CHARSET);
}
protected final AtomicReference<Cluster> cluster = new AtomicReference<>(null);
protected final AtomicReference<Session> cassandraSession = new AtomicReference<>(null);
protected static final CodecRegistry codecRegistry = new CodecRegistry();
@Override
protected Collection<ValidationResult> customValidate(ValidationContext validationContext) {
Set<ValidationResult> results = new HashSet<>();
// Ensure that if username or password is set, then the other is too
String userName = validationContext.getProperty(USERNAME).evaluateAttributeExpressions().getValue();
String password = validationContext.getProperty(PASSWORD).evaluateAttributeExpressions().getValue();
if (StringUtils.isEmpty(userName) != StringUtils.isEmpty(password)) {
results.add(new ValidationResult.Builder().subject("Username / Password configuration").valid(false).explanation(
"If username or password is specified, then the other must be specified as well").build());
}
// Ensure that both Connection provider service and the processor specific configurations are not provided
boolean connectionProviderIsSet = validationContext.getProperty(CONNECTION_PROVIDER_SERVICE).isSet();
boolean contactPointsIsSet = validationContext.getProperty(CONTACT_POINTS).isSet();
if (connectionProviderIsSet && contactPointsIsSet) {
results.add(new ValidationResult.Builder().subject("Cassandra configuration").valid(false).explanation("both " + CONNECTION_PROVIDER_SERVICE.getDisplayName() +
" and processor level Cassandra configuration cannot be provided at the same time.").build());
}
if (!connectionProviderIsSet && !contactPointsIsSet) {
results.add(new ValidationResult.Builder().subject("Cassandra configuration").valid(false).explanation("either " + CONNECTION_PROVIDER_SERVICE.getDisplayName() +
" or processor level Cassandra configuration has to be provided.").build());
}
return results;
}
@OnScheduled
public void onScheduled(ProcessContext context) {
final boolean connectionProviderIsSet = context.getProperty(CONNECTION_PROVIDER_SERVICE).isSet();
registerAdditionalCodecs();
if (connectionProviderIsSet) {
CassandraSessionProviderService sessionProvider = context.getProperty(CONNECTION_PROVIDER_SERVICE).asControllerService(CassandraSessionProviderService.class);
cluster.set(sessionProvider.getCluster());
cassandraSession.set(sessionProvider.getCassandraSession());
return;
}
try {
connectToCassandra(context);
} catch (NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
getLogger().error(nhae.getCustomMessage(10, true, false));
throw new ProcessException(nhae);
} catch (AuthenticationException ae) {
getLogger().error("Invalid username/password combination", ae);
throw new ProcessException(ae);
}
}
void connectToCassandra(ProcessContext context) {
if (cluster.get() == null) {
ComponentLog log = getLogger();
final String contactPointList = context.getProperty(CONTACT_POINTS).evaluateAttributeExpressions().getValue();
final String consistencyLevel = context.getProperty(CONSISTENCY_LEVEL).getValue();
final String compressionType = context.getProperty(COMPRESSION_TYPE).getValue();
List<InetSocketAddress> contactPoints = getContactPoints(contactPointList);
// Set up the client for secure (SSL/TLS communications) if configured to do so
final SSLContextService sslService = context.getProperty(PROP_SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class);
final SSLContext sslContext;
if (sslService != null) {
sslContext = sslService.createContext();
} else {
sslContext = null;
}
final String username, password;
PropertyValue usernameProperty = context.getProperty(USERNAME).evaluateAttributeExpressions();
PropertyValue passwordProperty = context.getProperty(PASSWORD).evaluateAttributeExpressions();
if (usernameProperty != null && passwordProperty != null) {
username = usernameProperty.getValue();
password = passwordProperty.getValue();
} else {
username = null;
password = null;
}
// Create the cluster and connect to it
Cluster newCluster = createCluster(contactPoints, sslContext, username, password, compressionType);
PropertyValue keyspaceProperty = context.getProperty(KEYSPACE).evaluateAttributeExpressions();
final Session newSession;
// For Java 11, the getValue() call was added so the test could pass
if (keyspaceProperty != null && keyspaceProperty.getValue() != null) {
newSession = newCluster.connect(keyspaceProperty.getValue());
} else {
newSession = newCluster.connect();
}
newCluster.getConfiguration().getQueryOptions().setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel));
Metadata metadata = newCluster.getMetadata();
log.info("Connected to Cassandra cluster: {}", new Object[]{metadata.getClusterName()});
cluster.set(newCluster);
cassandraSession.set(newSession);
}
}
protected void registerAdditionalCodecs() {
// Conversion between a String[] and a list of varchar
CodecRegistry.DEFAULT_INSTANCE.register(new ObjectArrayCodec<>(
DataType.list(DataType.varchar()),
String[].class,
TypeCodec.varchar()));
}
/**
* Uses a Cluster.Builder to create a Cassandra cluster reference using the given parameters
*
* @param contactPoints The contact points (hostname:port list of Cassandra nodes)
* @param sslContext The SSL context (used for secure connections)
* @param username The username for connection authentication
* @param password The password for connection authentication
* @param compressionType Enable compression at transport-level requests and responses.
* @return A reference to the Cluster object associated with the given Cassandra configuration
*/
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster.Builder builder = Cluster.builder().addContactPointsWithPorts(contactPoints);
if (sslContext != null) {
final SSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions.builder()
.withSSLContext(sslContext)
.build();
builder = builder.withSSL(sslOptions);
if (ProtocolOptions.Compression.SNAPPY.name().equals(compressionType)) {
builder = builder.withCompression(ProtocolOptions.Compression.SNAPPY);
} else if (ProtocolOptions.Compression.LZ4.name().equals(compressionType)) {
builder = builder.withCompression(ProtocolOptions.Compression.LZ4);
}
}
if (username != null && password != null) {
builder = builder.withCredentials(username, password);
}
return builder.build();
}
public void stop(ProcessContext context) {
// We don't want to close the connection when using 'Cassandra Connection Provider'
// because each time @OnUnscheduled/@OnShutdown annotated method is triggered on a
// processor, the connection would be closed which is not ideal for a centralized
// connection provider controller service
if (!context.getProperty(CONNECTION_PROVIDER_SERVICE).isSet()) {
if (cassandraSession.get() != null) {
cassandraSession.get().close();
cassandraSession.set(null);
}
if (cluster.get() != null) {
cluster.get().close();
cluster.set(null);
}
}
}
protected static Object getCassandraObject(Row row, int i, DataType dataType) {
if (dataType.equals(DataType.blob())) {
return row.getBytes(i);
} else if (dataType.equals(DataType.varint()) || dataType.equals(DataType.decimal())) {
// Avro can't handle BigDecimal and BigInteger as numbers - it will throw an
// AvroRuntimeException such as: "Unknown datum type: java.math.BigDecimal: 38"
return row.getObject(i).toString();
} else if (dataType.equals(DataType.cboolean())) {
return row.getBool(i);
} else if (dataType.equals(DataType.cint())) {
return row.getInt(i);
} else if (dataType.equals(DataType.bigint())
|| dataType.equals(DataType.counter())) {
return row.getLong(i);
} else if (dataType.equals(DataType.ascii())
|| dataType.equals(DataType.text())
|| dataType.equals(DataType.varchar())) {
return row.getString(i);
} else if (dataType.equals(DataType.cfloat())) {
return row.getFloat(i);
} else if (dataType.equals(DataType.cdouble())) {
return row.getDouble(i);
} else if (dataType.equals(DataType.timestamp())) {
return row.getTimestamp(i);
} else if (dataType.equals(DataType.date())) {
return row.getDate(i);
} else if (dataType.equals(DataType.time())) {
return row.getTime(i);
} else if (dataType.isCollection()) {
List<DataType> typeArguments = dataType.getTypeArguments();
if (typeArguments == null || typeArguments.size() == 0) {
throw new IllegalArgumentException("Column[" + i + "] " + dataType.getName()
+ " is a collection but no type arguments were specified!");
}
// Get the first type argument, to be used for lists and sets (and the first in a map)
DataType firstArg = typeArguments.get(0);
TypeCodec firstCodec = codecRegistry.codecFor(firstArg);
if (dataType.equals(DataType.set(firstArg))) {
return row.getSet(i, firstCodec.getJavaType());
} else if (dataType.equals(DataType.list(firstArg))) {
return row.getList(i, firstCodec.getJavaType());
} else {
// Must be an n-arg collection like map
DataType secondArg = typeArguments.get(1);
TypeCodec secondCodec = codecRegistry.codecFor(secondArg);
if (dataType.equals(DataType.map(firstArg, secondArg))) {
return row.getMap(i, firstCodec.getJavaType(), secondCodec.getJavaType());
}
}
} else {
// The different types that we support are numbers (int, long, double, float),
// as well as boolean values and Strings. Since Avro doesn't provide
// timestamp types, we want to convert those to Strings. So we will cast anything other
// than numbers or booleans to strings by using the toString() method.
return row.getObject(i).toString();
}
return null;
}
/**
* This method will create a schema a union field consisting of null and the specified type.
*
* @param dataType The data type of the field
*/
protected static Schema getUnionFieldType(String dataType) {
return SchemaBuilder.builder().unionOf().nullBuilder().endNull().and().type(getSchemaForType(dataType)).endUnion();
}
/**
* This method will create an Avro schema for the specified type.
*
* @param dataType The data type of the field
*/
protected static Schema getSchemaForType(final String dataType) {
final SchemaBuilder.TypeBuilder<Schema> typeBuilder = SchemaBuilder.builder();
final Schema returnSchema = switch (dataType) {
case "string" -> typeBuilder.stringType();
case "boolean" -> typeBuilder.booleanType();
case "int" -> typeBuilder.intType();
case "long" -> typeBuilder.longType();
case "float" -> typeBuilder.floatType();
case "double" -> typeBuilder.doubleType();
case "bytes" -> typeBuilder.bytesType();
default -> throw new IllegalArgumentException("Unknown Avro primitive type: " + dataType);
};
return returnSchema;
}
protected static String getPrimitiveAvroTypeFromCassandraType(final DataType dataType) {
// Map types from Cassandra to Avro where possible
if (dataType.equals(DataType.ascii())
|| dataType.equals(DataType.text())
|| dataType.equals(DataType.varchar())
// Nonstandard types represented by this processor as a string
|| dataType.equals(DataType.timestamp())
|| dataType.equals(DataType.timeuuid())
|| dataType.equals(DataType.uuid())
|| dataType.equals(DataType.inet())
|| dataType.equals(DataType.varint())) {
return "string";
} else if (dataType.equals(DataType.cboolean())) {
return "boolean";
} else if (dataType.equals(DataType.cint())) {
return "int";
} else if (dataType.equals(DataType.bigint())
|| dataType.equals(DataType.counter())) {
return "long";
} else if (dataType.equals(DataType.cfloat())) {
return "float";
} else if (dataType.equals(DataType.cdouble())) {
return "double";
} else if (dataType.equals(DataType.blob())) {
return "bytes";
} else {
throw new IllegalArgumentException("createSchema: Unknown Cassandra data type " + dataType.getName()
+ " cannot be converted to Avro type");
}
}
protected static DataType getPrimitiveDataTypeFromString(String dataTypeName) {
Set<DataType> primitiveTypes = DataType.allPrimitiveTypes();
for (DataType primitiveType : primitiveTypes) {
if (primitiveType.toString().equals(dataTypeName)) {
return primitiveType;
}
}
return null;
}
/**
* Gets a list of InetSocketAddress objects that correspond to host:port entries for Cassandra contact points
*
* @param contactPointList A comma-separated list of Cassandra contact points (host:port,host2:port2, etc.)
* @return List of InetSocketAddresses for the Cassandra contact points
*/
protected List<InetSocketAddress> getContactPoints(String contactPointList) {
if (contactPointList == null) {
return null;
}
final String[] contactPointStringList = contactPointList.split(",");
List<InetSocketAddress> contactPoints = new ArrayList<>();
for (String contactPointEntry : contactPointStringList) {
String[] addresses = contactPointEntry.split(":");
final String hostName = addresses[0].trim();
final int port = (addresses.length > 1) ? Integer.parseInt(addresses[1].trim()) : DEFAULT_CASSANDRA_PORT;
contactPoints.add(new InetSocketAddress(hostName, port));
}
return contactPoints;
}
}

View File

@ -1,406 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TypeCodec;
import com.datastax.driver.core.exceptions.InvalidTypeException;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import com.datastax.driver.core.exceptions.QueryExecutionException;
import com.datastax.driver.core.exceptions.QueryValidationException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheBuilder;
import org.apache.nifi.annotation.behavior.SystemResourceConsideration;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.behavior.ReadsAttribute;
import org.apache.nifi.annotation.behavior.ReadsAttributes;
import org.apache.nifi.annotation.behavior.SupportsBatching;
import org.apache.nifi.annotation.behavior.SystemResource;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.annotation.lifecycle.OnStopped;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.io.InputStreamCallback;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.stream.io.StreamUtils;
import org.apache.nifi.util.StringUtils;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@SupportsBatching
@Tags({"cassandra", "cql", "put", "insert", "update", "set"})
@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
@CapabilityDescription("Execute provided Cassandra Query Language (CQL) statement on a Cassandra 1.x, 2.x, or 3.0.x cluster. "
+ "The content of an incoming FlowFile is expected to be the CQL command to execute. The CQL command may use "
+ "the ? to escape parameters. In this case, the parameters to use must exist as FlowFile attributes with the "
+ "naming convention cql.args.N.type and cql.args.N.value, where N is a positive integer. The cql.args.N.type "
+ "is expected to be a lowercase string indicating the Cassandra type.")
@ReadsAttributes({
@ReadsAttribute(attribute = "cql.args.N.type",
description = "Incoming FlowFiles are expected to be parameterized CQL statements. The type of each "
+ "parameter is specified as a lowercase string corresponding to the Cassandra data type (text, "
+ "int, boolean, e.g.). In the case of collections, the primitive type(s) of the elements in the "
+ "collection should be comma-delimited, follow the collection type, and be enclosed in angle brackets "
+ "(< and >), for example set<text> or map<timestamp, int>."),
@ReadsAttribute(attribute = "cql.args.N.value",
description = "Incoming FlowFiles are expected to be parameterized CQL statements. The value of the "
+ "parameters are specified as cql.args.1.value, cql.args.2.value, cql.args.3.value, and so on. The "
+ " type of the cql.args.1.value parameter is specified by the cql.args.1.type attribute.")
})
@SystemResourceConsideration(resource = SystemResource.MEMORY)
public class PutCassandraQL extends AbstractCassandraProcessor {
public static final PropertyDescriptor STATEMENT_TIMEOUT = new PropertyDescriptor.Builder()
.name("Max Wait Time")
.displayName("Max Wait Time")
.description("The maximum amount of time allowed for a running CQL select query. Must be of format "
+ "<duration> <TimeUnit> where <duration> is a non-negative integer and TimeUnit is a supported "
+ "Time Unit, such as: nanos, millis, secs, mins, hrs, days. A value of zero means there is no limit. ")
.defaultValue("0 seconds")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
.build();
public static final PropertyDescriptor STATEMENT_CACHE_SIZE = new PropertyDescriptor.Builder()
.name("putcql-stmt-cache-size")
.displayName("Statement Cache Size")
.description("The maximum number of CQL Prepared Statements to cache. This can improve performance if many incoming flow files have the same CQL statement "
+ "with different values for the parameters. If this property is set to zero, the cache is effectively disabled.")
.defaultValue("0")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR)
.build();
private final static List<PropertyDescriptor> propertyDescriptors;
private final static Set<Relationship> relationships;
private static final Pattern CQL_TYPE_ATTRIBUTE_PATTERN = Pattern.compile("cql\\.args\\.(\\d+)\\.type");
// Matches on top-level type (primitive types like text,int) and also for collections (like list<boolean> and map<float,double>)
private static final Pattern CQL_TYPE_PATTERN = Pattern.compile("([^<]+)(<([^,>]+)(,([^,>]+))*>)?");
/**
* LRU cache for the compiled patterns. The size of the cache is determined by the value of the Statement Cache Size property
*/
@VisibleForTesting
private ConcurrentMap<String, PreparedStatement> statementCache;
/*
* Will ensure that the list of property descriptors is build only once.
* Will also create a Set of relationships
*/
static {
List<PropertyDescriptor> _propertyDescriptors = new ArrayList<>();
_propertyDescriptors.addAll(descriptors);
_propertyDescriptors.add(STATEMENT_TIMEOUT);
_propertyDescriptors.add(STATEMENT_CACHE_SIZE);
propertyDescriptors = Collections.unmodifiableList(_propertyDescriptors);
Set<Relationship> _relationships = new HashSet<>();
_relationships.add(REL_SUCCESS);
_relationships.add(REL_FAILURE);
_relationships.add(REL_RETRY);
relationships = Collections.unmodifiableSet(_relationships);
}
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return propertyDescriptors;
}
@Override
public Set<Relationship> getRelationships() {
return relationships;
}
@OnScheduled
public void onScheduled(final ProcessContext context) {
super.onScheduled(context);
// Initialize the prepared statement cache
int statementCacheSize = context.getProperty(STATEMENT_CACHE_SIZE).evaluateAttributeExpressions().asInteger();
statementCache = CacheBuilder.newBuilder()
.maximumSize(statementCacheSize)
.<String, PreparedStatement>build()
.asMap();
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final long statementTimeout = context.getProperty(STATEMENT_TIMEOUT).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
String cql = getCQL(session, flowFile, charset);
try {
PreparedStatement statement = statementCache.get(cql);
if(statement == null) {
statement = connectionSession.prepare(cql);
statementCache.put(cql, statement);
}
BoundStatement boundStatement = statement.bind();
Map<String, String> attributes = flowFile.getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
final Matcher matcher = CQL_TYPE_ATTRIBUTE_PATTERN.matcher(key);
if (matcher.matches()) {
final int parameterIndex = Integer.parseInt(matcher.group(1));
String paramType = entry.getValue();
if (StringUtils.isEmpty(paramType)) {
throw new ProcessException("Value of the " + key + " attribute is null or empty, it must contain a valid value");
}
paramType = paramType.trim();
final String valueAttrName = "cql.args." + parameterIndex + ".value";
final String parameterValue = attributes.get(valueAttrName);
try {
setStatementObject(boundStatement, parameterIndex - 1, valueAttrName, parameterValue, paramType);
} catch (final InvalidTypeException | IllegalArgumentException e) {
throw new ProcessException("The value of the " + valueAttrName + " is '" + parameterValue
+ "', which cannot be converted into the necessary data type: " + paramType, e);
}
}
}
try {
ResultSetFuture future = connectionSession.executeAsync(boundStatement);
if (statementTimeout > 0) {
future.getUninterruptibly(statementTimeout, TimeUnit.MILLISECONDS);
} else {
future.getUninterruptibly();
}
// Emit a Provenance SEND event
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
// This isn't a real URI but since Cassandra is distributed we just use the cluster name
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName();
session.getProvenanceReporter().send(flowFile, transitUri, transmissionMillis, true);
session.transfer(flowFile, REL_SUCCESS);
} catch (final TimeoutException e) {
throw new ProcessException(e);
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the statement with the requested consistency level successfully", qee);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryValidationException qve) {
logger.error("The CQL statement {} is invalid due to syntax error, authorization issue, or another "
+ "validation problem; routing {} to failure", cql, flowFile, qve);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
} catch (final ProcessException e) {
logger.error("Unable to execute CQL select statement {} for {} due to {}; routing to failure",
new Object[]{cql, flowFile, e});
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
/**
* Determines the CQL statement that should be executed for the given FlowFile
*
* @param session the session that can be used to access the given FlowFile
* @param flowFile the FlowFile whose CQL statement should be executed
* @return the CQL that is associated with the given FlowFile
*/
private String getCQL(final ProcessSession session, final FlowFile flowFile, final Charset charset) {
// Read the CQL from the FlowFile's content
final byte[] buffer = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, buffer);
}
});
// Create the PreparedStatement string to use for this FlowFile.
return new String(buffer, charset);
}
/**
* Determines how to map the given value to the appropriate Cassandra data type and returns the object as
* represented by the given type. This can be used in a Prepared/BoundStatement.
*
* @param statement the BoundStatement for setting objects on
* @param paramIndex the index of the parameter at which to set the object
* @param attrName the name of the attribute that the parameter is coming from - for logging purposes
* @param paramValue the value of the CQL parameter to set
* @param paramType the Cassandra data type of the CQL parameter to set
* @throws IllegalArgumentException if the PreparedStatement throws a CQLException when calling the appropriate setter
*/
protected void setStatementObject(final BoundStatement statement, final int paramIndex, final String attrName,
final String paramValue, final String paramType) throws IllegalArgumentException {
if (paramValue == null) {
statement.setToNull(paramIndex);
return;
} else if (paramType == null) {
throw new IllegalArgumentException("Parameter type for " + attrName + " cannot be null");
} else {
// Parse the top-level type and any parameterized types (for collections)
final Matcher matcher = CQL_TYPE_PATTERN.matcher(paramType);
// If the matcher doesn't match, this should fall through to the exception at the bottom
if (matcher.find() && matcher.groupCount() > 1) {
String mainTypeString = matcher.group(1).toLowerCase();
DataType mainType = getPrimitiveDataTypeFromString(mainTypeString);
if (mainType != null) {
TypeCodec typeCodec = codecRegistry.codecFor(mainType);
// Need the right statement.setXYZ() method
if (mainType.equals(DataType.ascii())
|| mainType.equals(DataType.text())
|| mainType.equals(DataType.varchar())
|| mainType.equals(DataType.inet())
|| mainType.equals(DataType.varint())) {
// These are strings, so just use the paramValue
statement.setString(paramIndex, paramValue);
} else if (mainType.equals(DataType.cboolean())) {
statement.setBool(paramIndex, (boolean) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.cint())) {
statement.setInt(paramIndex, (int) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.bigint())
|| mainType.equals(DataType.counter())) {
statement.setLong(paramIndex, (long) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.cfloat())) {
statement.setFloat(paramIndex, (float) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.cdouble())) {
statement.setDouble(paramIndex, (double) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.blob())) {
statement.setBytes(paramIndex, (ByteBuffer) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.timestamp())) {
statement.setTimestamp(paramIndex, (Date) typeCodec.parse(paramValue));
} else if (mainType.equals(DataType.timeuuid())
|| mainType.equals(DataType.uuid())) {
statement.setUUID(paramIndex, (UUID) typeCodec.parse(paramValue));
}
return;
} else {
// Get the first parameterized type
if (matcher.groupCount() > 2) {
String firstParamTypeName = matcher.group(3);
DataType firstParamType = getPrimitiveDataTypeFromString(firstParamTypeName);
if (firstParamType == null) {
throw new IllegalArgumentException("Nested collections are not supported");
}
// Check for map type
if (DataType.Name.MAP.toString().equalsIgnoreCase(mainTypeString)) {
if (matcher.groupCount() > 4) {
String secondParamTypeName = matcher.group(5);
DataType secondParamType = getPrimitiveDataTypeFromString(secondParamTypeName);
DataType mapType = DataType.map(firstParamType, secondParamType);
statement.setMap(paramIndex, (Map) codecRegistry.codecFor(mapType).parse(paramValue));
return;
}
} else {
// Must be set or list
if (DataType.Name.SET.toString().equalsIgnoreCase(mainTypeString)) {
DataType setType = DataType.set(firstParamType);
statement.setSet(paramIndex, (Set) codecRegistry.codecFor(setType).parse(paramValue));
return;
} else if (DataType.Name.LIST.toString().equalsIgnoreCase(mainTypeString)) {
DataType listType = DataType.list(firstParamType);
statement.setList(paramIndex, (List) codecRegistry.codecFor(listType).parse(paramValue));
return;
}
}
} else {
throw new IllegalArgumentException(
"Collection type " + mainTypeString + " needs parameterized type(s), such as set<text>");
}
}
}
}
throw new IllegalArgumentException("Cannot create object of type " + paramType + " using input " + paramValue);
}
@OnStopped
public void stop(ProcessContext context) {
super.stop(context);
statementCache.clear();
}
}

View File

@ -1,471 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.BatchStatement;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.Assignment;
import com.datastax.driver.core.querybuilder.Insert;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Update;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.behavior.ReadsAttribute;
import org.apache.nifi.annotation.behavior.ReadsAttributes;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.annotation.lifecycle.OnShutdown;
import org.apache.nifi.annotation.lifecycle.OnUnscheduled;
import org.apache.nifi.components.AllowableValue;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.ValidationContext;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.serialization.RecordReader;
import org.apache.nifi.serialization.RecordReaderFactory;
import org.apache.nifi.serialization.record.DataType;
import org.apache.nifi.serialization.record.Record;
import org.apache.nifi.serialization.record.RecordFieldType;
import org.apache.nifi.serialization.record.RecordSchema;
import org.apache.nifi.serialization.record.type.ArrayDataType;
import org.apache.nifi.serialization.record.util.DataTypeUtils;
import org.apache.nifi.util.StopWatch;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static java.lang.String.format;
@Tags({"cassandra", "cql", "put", "insert", "update", "set", "record"})
@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
@CapabilityDescription("This is a record aware processor that reads the content of the incoming FlowFile as individual records using the " +
"configured 'Record Reader' and writes them to Apache Cassandra using native protocol version 3 or higher.")
@ReadsAttributes({
@ReadsAttribute(attribute = "cql.statement.type", description = "If 'Use cql.statement.type Attribute' is selected for the Statement " +
"Type property, the value of the cql.statement.type Attribute will be used to determine which type of statement (UPDATE, INSERT) " +
"will be generated and executed"),
@ReadsAttribute(attribute = "cql.update.method", description = "If 'Use cql.update.method Attribute' is selected for the Update " +
"Method property, the value of the cql.update.method Attribute will be used to determine which operation (Set, Increment, Decrement) " +
"will be used to generate and execute the Update statement. Ignored if the Statement Type property is not set to UPDATE"),
@ReadsAttribute(attribute = "cql.batch.statement.type", description = "If 'Use cql.batch.statement.type Attribute' is selected for the Batch " +
"Statement Type property, the value of the cql.batch.statement.type Attribute will be used to determine which type of batch statement " +
"(LOGGED, UNLOGGED, COUNTER) will be generated and executed")
})
public class PutCassandraRecord extends AbstractCassandraProcessor {
static final AllowableValue UPDATE_TYPE = new AllowableValue("UPDATE", "UPDATE",
"Use an UPDATE statement.");
static final AllowableValue INSERT_TYPE = new AllowableValue("INSERT", "INSERT",
"Use an INSERT statement.");
static final AllowableValue STATEMENT_TYPE_USE_ATTR_TYPE = new AllowableValue("USE_ATTR", "Use cql.statement.type Attribute",
"The value of the cql.statement.type Attribute will be used to determine which type of statement (UPDATE, INSERT) " +
"will be generated and executed");
static final String STATEMENT_TYPE_ATTRIBUTE = "cql.statement.type";
static final AllowableValue INCR_TYPE = new AllowableValue("INCREMENT", "Increment",
"Use an increment operation (+=) for the Update statement.");
static final AllowableValue SET_TYPE = new AllowableValue("SET", "Set",
"Use a set operation (=) for the Update statement.");
static final AllowableValue DECR_TYPE = new AllowableValue("DECREMENT", "Decrement",
"Use a decrement operation (-=) for the Update statement.");
static final AllowableValue UPDATE_METHOD_USE_ATTR_TYPE = new AllowableValue("USE_ATTR", "Use cql.update.method Attribute",
"The value of the cql.update.method Attribute will be used to determine which operation (Set, Increment, Decrement) " +
"will be used to generate and execute the Update statement.");
static final String UPDATE_METHOD_ATTRIBUTE = "cql.update.method";
static final AllowableValue LOGGED_TYPE = new AllowableValue("LOGGED", "LOGGED",
"Use a LOGGED batch statement");
static final AllowableValue UNLOGGED_TYPE = new AllowableValue("UNLOGGED", "UNLOGGED",
"Use an UNLOGGED batch statement");
static final AllowableValue COUNTER_TYPE = new AllowableValue("COUNTER", "COUNTER",
"Use a COUNTER batch statement");
static final AllowableValue BATCH_STATEMENT_TYPE_USE_ATTR_TYPE = new AllowableValue("USE_ATTR", "Use cql.batch.statement.type Attribute",
"The value of the cql.batch.statement.type Attribute will be used to determine which type of batch statement (LOGGED, UNLOGGED or COUNTER) " +
"will be used to generate and execute the Update statement.");
static final String BATCH_STATEMENT_TYPE_ATTRIBUTE = "cql.batch.statement.type";
static final PropertyDescriptor RECORD_READER_FACTORY = new PropertyDescriptor.Builder()
.name("put-cassandra-record-reader")
.displayName("Record Reader")
.description("Specifies the type of Record Reader controller service to use for parsing the incoming data " +
"and determining the schema")
.identifiesControllerService(RecordReaderFactory.class)
.required(true)
.build();
static final PropertyDescriptor STATEMENT_TYPE = new PropertyDescriptor.Builder()
.name("put-cassandra-record-statement-type")
.displayName("Statement Type")
.description("Specifies the type of CQL Statement to generate.")
.required(true)
.defaultValue(INSERT_TYPE.getValue())
.allowableValues(UPDATE_TYPE, INSERT_TYPE, STATEMENT_TYPE_USE_ATTR_TYPE)
.build();
static final PropertyDescriptor UPDATE_METHOD = new PropertyDescriptor.Builder()
.name("put-cassandra-record-update-method")
.displayName("Update Method")
.description("Specifies the method to use to SET the values. This property is used if the Statement Type is " +
"UPDATE and ignored otherwise.")
.required(false)
.defaultValue(SET_TYPE.getValue())
.allowableValues(INCR_TYPE, DECR_TYPE, SET_TYPE, UPDATE_METHOD_USE_ATTR_TYPE)
.build();
static final PropertyDescriptor UPDATE_KEYS = new PropertyDescriptor.Builder()
.name("put-cassandra-record-update-keys")
.displayName("Update Keys")
.description("A comma-separated list of column names that uniquely identifies a row in the database for UPDATE statements. "
+ "If the Statement Type is UPDATE and this property is not set, the conversion to CQL will fail. "
+ "This property is ignored if the Statement Type is not UPDATE.")
.addValidator(StandardValidators.createListValidator(true, false, StandardValidators.NON_EMPTY_VALIDATOR))
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.build();
static final PropertyDescriptor TABLE = new PropertyDescriptor.Builder()
.name("put-cassandra-record-table")
.displayName("Table name")
.description("The name of the Cassandra table to which the records have to be written.")
.required(true)
.addValidator(StandardValidators.NON_EMPTY_EL_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.build();
static final PropertyDescriptor BATCH_SIZE = new PropertyDescriptor.Builder()
.name("put-cassandra-record-batch-size")
.displayName("Batch size")
.description("Specifies the number of 'Insert statements' to be grouped together to execute as a batch (BatchStatement)")
.defaultValue("100")
.addValidator(StandardValidators.POSITIVE_INTEGER_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.required(true)
.build();
static final PropertyDescriptor BATCH_STATEMENT_TYPE = new PropertyDescriptor.Builder()
.name("put-cassandra-record-batch-statement-type")
.displayName("Batch Statement Type")
.description("Specifies the type of 'Batch Statement' to be used.")
.allowableValues(LOGGED_TYPE, UNLOGGED_TYPE, COUNTER_TYPE, BATCH_STATEMENT_TYPE_USE_ATTR_TYPE)
.defaultValue(LOGGED_TYPE.getValue())
.required(false)
.build();
static final PropertyDescriptor CONSISTENCY_LEVEL = new PropertyDescriptor.Builder()
.fromPropertyDescriptor(AbstractCassandraProcessor.CONSISTENCY_LEVEL)
.allowableValues(ConsistencyLevel.SERIAL.name(), ConsistencyLevel.LOCAL_SERIAL.name())
.defaultValue(ConsistencyLevel.SERIAL.name())
.build();
private final static List<PropertyDescriptor> propertyDescriptors = Collections.unmodifiableList(Arrays.asList(
CONNECTION_PROVIDER_SERVICE, CONTACT_POINTS, KEYSPACE, TABLE, STATEMENT_TYPE, UPDATE_KEYS, UPDATE_METHOD, CLIENT_AUTH, USERNAME, PASSWORD,
RECORD_READER_FACTORY, BATCH_SIZE, CONSISTENCY_LEVEL, BATCH_STATEMENT_TYPE, PROP_SSL_CONTEXT_SERVICE));
private final static Set<Relationship> relationships = Collections.unmodifiableSet(
new HashSet<>(Arrays.asList(REL_SUCCESS, REL_FAILURE)));
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return propertyDescriptors;
}
@Override
public Set<Relationship> getRelationships() {
return relationships;
}
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
FlowFile inputFlowFile = session.get();
if (inputFlowFile == null) {
return;
}
final String cassandraTable = context.getProperty(TABLE).evaluateAttributeExpressions(inputFlowFile).getValue();
final RecordReaderFactory recordParserFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
final int batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger();
final String serialConsistencyLevel = context.getProperty(CONSISTENCY_LEVEL).getValue();
final String updateKeys = context.getProperty(UPDATE_KEYS).evaluateAttributeExpressions(inputFlowFile).getValue();
// Get the statement type from the attribute if necessary
final String statementTypeProperty = context.getProperty(STATEMENT_TYPE).getValue();
String statementType = statementTypeProperty;
if (STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equals(statementTypeProperty)) {
statementType = inputFlowFile.getAttribute(STATEMENT_TYPE_ATTRIBUTE);
}
// Get the update method from the attribute if necessary
final String updateMethodProperty = context.getProperty(UPDATE_METHOD).getValue();
String updateMethod = updateMethodProperty;
if (UPDATE_METHOD_USE_ATTR_TYPE.getValue().equals(updateMethodProperty)) {
updateMethod = inputFlowFile.getAttribute(UPDATE_METHOD_ATTRIBUTE);
}
// Get the batch statement type from the attribute if necessary
final String batchStatementTypeProperty = context.getProperty(BATCH_STATEMENT_TYPE).getValue();
String batchStatementType = batchStatementTypeProperty;
if (BATCH_STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equals(batchStatementTypeProperty)) {
batchStatementType = inputFlowFile.getAttribute(BATCH_STATEMENT_TYPE_ATTRIBUTE).toUpperCase();
}
if (StringUtils.isEmpty(batchStatementType)) {
throw new IllegalArgumentException(format("Batch Statement Type is not specified, FlowFile %s", inputFlowFile));
}
final BatchStatement batchStatement;
final Session connectionSession = cassandraSession.get();
final AtomicInteger recordsAdded = new AtomicInteger(0);
final StopWatch stopWatch = new StopWatch(true);
boolean error = false;
try (final InputStream inputStream = session.read(inputFlowFile);
final RecordReader reader = recordParserFactory.createRecordReader(inputFlowFile, inputStream, getLogger())){
// throw an exception if statement type is not set
if (StringUtils.isEmpty(statementType)) {
throw new IllegalArgumentException(format("Statement Type is not specified, FlowFile %s", inputFlowFile));
}
// throw an exception if the statement type is set to update and updateKeys is empty
if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType) && StringUtils.isEmpty(updateKeys)) {
throw new IllegalArgumentException(format("Update Keys are not specified, FlowFile %s", inputFlowFile));
}
// throw an exception if the Update Method is Increment or Decrement and the batch statement type is not UNLOGGED or COUNTER
if (INCR_TYPE.getValue().equalsIgnoreCase(updateMethod) || DECR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
if (!(UNLOGGED_TYPE.getValue().equalsIgnoreCase(batchStatementType) || COUNTER_TYPE.getValue().equalsIgnoreCase(batchStatementType))) {
throw new IllegalArgumentException(format("Increment/Decrement Update Method can only be used with COUNTER " +
"or UNLOGGED Batch Statement Type, FlowFile %s", inputFlowFile));
}
}
final RecordSchema schema = reader.getSchema();
Record record;
batchStatement = new BatchStatement(BatchStatement.Type.valueOf(batchStatementType));
batchStatement.setSerialConsistencyLevel(ConsistencyLevel.valueOf(serialConsistencyLevel));
while((record = reader.nextRecord()) != null) {
Map<String, Object> recordContentMap = (Map<String, Object>) DataTypeUtils
.convertRecordFieldtoObject(record, RecordFieldType.RECORD.getRecordDataType(record.getSchema()));
Statement query;
if (INSERT_TYPE.getValue().equalsIgnoreCase(statementType)) {
query = generateInsert(cassandraTable, schema, recordContentMap);
} else if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType)) {
query = generateUpdate(cassandraTable, schema, updateKeys, updateMethod, recordContentMap);
} else {
throw new IllegalArgumentException(format("Statement Type %s is not valid, FlowFile %s", statementType, inputFlowFile));
}
if (getLogger().isDebugEnabled()) {
getLogger().debug("Query: {}", query.toString());
}
batchStatement.add(query);
if (recordsAdded.incrementAndGet() == batchSize) {
connectionSession.execute(batchStatement);
batchStatement.clear();
recordsAdded.set(0);
}
}
if (batchStatement.size() != 0) {
connectionSession.execute(batchStatement);
batchStatement.clear();
}
} catch (Exception e) {
error = true;
getLogger().error("Unable to write the records into Cassandra table due to {}", new Object[] {e});
session.transfer(inputFlowFile, REL_FAILURE);
} finally {
if (!error) {
stopWatch.stop();
long duration = stopWatch.getDuration(TimeUnit.MILLISECONDS);
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName() + "." + cassandraTable;
session.getProvenanceReporter().send(inputFlowFile, transitUri, "Inserted " + recordsAdded.get() + " records", duration);
session.transfer(inputFlowFile, REL_SUCCESS);
}
}
}
protected Statement generateUpdate(String cassandraTable, RecordSchema schema, String updateKeys, String updateMethod, Map<String, Object> recordContentMap) {
Update updateQuery;
// Split up the update key names separated by a comma, should not be empty
final Set<String> updateKeyNames;
updateKeyNames = Arrays.stream(updateKeys.split(","))
.map(String::trim)
.filter(StringUtils::isNotEmpty)
.collect(Collectors.toSet());
if (updateKeyNames.isEmpty()) {
throw new IllegalArgumentException("No Update Keys were specified");
}
// Verify if all update keys are present in the record
for (String updateKey : updateKeyNames) {
if (!schema.getFieldNames().contains(updateKey)) {
throw new IllegalArgumentException("Update key '" + updateKey + "' is not present in the record schema");
}
}
// Prepare keyspace/table names
if (cassandraTable.contains(".")) {
String[] keyspaceAndTable = cassandraTable.split("\\.");
updateQuery = QueryBuilder.update(keyspaceAndTable[0], keyspaceAndTable[1]);
} else {
updateQuery = QueryBuilder.update(cassandraTable);
}
// Loop through the field names, setting those that are not in the update key set, and using those
// in the update key set as conditions.
for (String fieldName : schema.getFieldNames()) {
Object fieldValue = recordContentMap.get(fieldName);
if (updateKeyNames.contains(fieldName)) {
updateQuery.where(QueryBuilder.eq(fieldName, fieldValue));
} else {
Assignment assignment;
if (SET_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
assignment = QueryBuilder.set(fieldName, fieldValue);
} else if (INCR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
assignment = QueryBuilder.incr(fieldName, convertFieldObjectToLong(fieldName, fieldValue));
} else if (DECR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
assignment = QueryBuilder.decr(fieldName, convertFieldObjectToLong(fieldName, fieldValue));
} else {
throw new IllegalArgumentException("Update Method '" + updateMethod + "' is not valid.");
}
updateQuery.with(assignment);
}
}
return updateQuery;
}
private Long convertFieldObjectToLong(String name, Object value) {
if (!(value instanceof Number)) {
throw new IllegalArgumentException("Field '" + name + "' is not of type Number");
}
return ((Number) value).longValue();
}
@Override
protected Collection<ValidationResult> customValidate(ValidationContext validationContext) {
Set<ValidationResult> results = (Set<ValidationResult>) super.customValidate(validationContext);
String statementType = validationContext.getProperty(STATEMENT_TYPE).getValue();
if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType)) {
// Check that update keys are set
String updateKeys = validationContext.getProperty(UPDATE_KEYS).getValue();
if (StringUtils.isEmpty(updateKeys)) {
results.add(new ValidationResult.Builder().subject("Update statement configuration").valid(false).explanation(
"if the Statement Type is set to Update, then the Update Keys must be specified as well").build());
}
// Check that if the update method is set to increment or decrement that the batch statement type is set to
// unlogged or counter (or USE_ATTR_TYPE, which we cannot check at this point).
String updateMethod = validationContext.getProperty(UPDATE_METHOD).getValue();
String batchStatementType = validationContext.getProperty(BATCH_STATEMENT_TYPE).getValue();
if (INCR_TYPE.getValue().equalsIgnoreCase(updateMethod)
|| DECR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
if (!(COUNTER_TYPE.getValue().equalsIgnoreCase(batchStatementType)
|| UNLOGGED_TYPE.getValue().equalsIgnoreCase(batchStatementType)
|| BATCH_STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equalsIgnoreCase(batchStatementType))) {
results.add(new ValidationResult.Builder().subject("Update method configuration").valid(false).explanation(
"if the Update Method is set to Increment or Decrement, then the Batch Statement Type must be set " +
"to either COUNTER or UNLOGGED").build());
}
}
}
return results;
}
protected Statement generateInsert(String cassandraTable, RecordSchema schema, Map<String, Object> recordContentMap) {
Insert insertQuery;
if (cassandraTable.contains(".")) {
String[] keyspaceAndTable = cassandraTable.split("\\.");
insertQuery = QueryBuilder.insertInto(keyspaceAndTable[0], keyspaceAndTable[1]);
} else {
insertQuery = QueryBuilder.insertInto(cassandraTable);
}
for (String fieldName : schema.getFieldNames()) {
Object value = recordContentMap.get(fieldName);
if (value != null && value.getClass().isArray()) {
Object[] array = (Object[]) value;
if (array.length > 0) {
if (array[0] instanceof Byte) {
Object[] temp = (Object[]) value;
byte[] newArray = new byte[temp.length];
for (int x = 0; x < temp.length; x++) {
newArray[x] = (Byte) temp[x];
}
value = ByteBuffer.wrap(newArray);
}
}
}
if (schema.getDataType(fieldName).isPresent()) {
DataType fieldDataType = schema.getDataType(fieldName).get();
if (fieldDataType.getFieldType() == RecordFieldType.ARRAY) {
if (((ArrayDataType)fieldDataType).getElementType().getFieldType() == RecordFieldType.STRING) {
value = Arrays.stream((Object[])value).toArray(String[]::new);
}
}
}
insertQuery.value(fieldName, value);
}
return insertQuery;
}
@OnUnscheduled
public void stop(ProcessContext context) {
super.stop(context);
}
@OnShutdown
public void shutdown(ProcessContext context) {
super.stop(context);
}
}

View File

@ -1,736 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import com.datastax.driver.core.exceptions.QueryExecutionException;
import com.datastax.driver.core.exceptions.QueryValidationException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.lifecycle.OnShutdown;
import org.apache.nifi.annotation.lifecycle.OnUnscheduled;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.flowfile.attributes.CoreAttributes;
import org.apache.nifi.annotation.behavior.WritesAttribute;
import org.apache.nifi.annotation.behavior.WritesAttributes;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.flowfile.attributes.FragmentAttributes;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.util.StopWatch;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
@Tags({"cassandra", "cql", "select"})
@InputRequirement(InputRequirement.Requirement.INPUT_ALLOWED)
@CapabilityDescription("Execute provided Cassandra Query Language (CQL) select query on a Cassandra 1.x, 2.x, or 3.0.x cluster. Query result "
+ "may be converted to Avro or JSON format. Streaming is used so arbitrarily large result sets are supported. This processor can be "
+ "scheduled to run on a timer, or cron expression, using the standard scheduling methods, or it can be triggered by an incoming FlowFile. "
+ "If it is triggered by an incoming FlowFile, then attributes of that FlowFile will be available when evaluating the "
+ "select query. FlowFile attribute 'executecql.row.count' indicates how many rows were selected.")
@WritesAttributes({
@WritesAttribute(attribute = "executecql.row.count", description = "The number of rows returned by the CQL query"),
@WritesAttribute(attribute = "fragment.identifier", description = "If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set "
+ "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."),
@WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of "
+ "FlowFiles produced by a single ResultSet. This can be used in conjunction with the "
+ "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this "
+ "attribute will not be populated."),
@WritesAttribute(attribute = "fragment.index", description = "If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be "
+ "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order "
+ "FlowFiles were produced")
})
public class QueryCassandra extends AbstractCassandraProcessor {
public static final String AVRO_FORMAT = "Avro";
public static final String JSON_FORMAT = "JSON";
public static final String RESULT_ROW_COUNT = "executecql.row.count";
public static final String FRAGMENT_ID = FragmentAttributes.FRAGMENT_ID.key();
public static final String FRAGMENT_INDEX = FragmentAttributes.FRAGMENT_INDEX.key();
public static final String FRAGMENT_COUNT = FragmentAttributes.FRAGMENT_COUNT.key();
public static final PropertyDescriptor CQL_SELECT_QUERY = new PropertyDescriptor.Builder()
.name("CQL select query")
.description("CQL select query")
.required(true)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.build();
public static final PropertyDescriptor QUERY_TIMEOUT = new PropertyDescriptor.Builder()
.name("Max Wait Time")
.description("The maximum amount of time allowed for a running CQL select query. Must be of format "
+ "<duration> <TimeUnit> where <duration> is a non-negative integer and TimeUnit is a supported "
+ "Time Unit, such as: nanos, millis, secs, mins, hrs, days. A value of zero means there is no limit. ")
.defaultValue("0 seconds")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
.build();
public static final PropertyDescriptor FETCH_SIZE = new PropertyDescriptor.Builder()
.name("Fetch size")
.description("The number of result rows to be fetched from the result set at a time. Zero is the default "
+ "and means there is no limit.")
.defaultValue("0")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.INTEGER_VALIDATOR)
.build();
public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder()
.name("Max Rows Per Flow File")
.description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large "
+ "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.")
.defaultValue("0")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.INTEGER_VALIDATOR)
.build();
public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder()
.name("qdbt-output-batch-size")
.displayName("Output Batch Size")
.description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows "
+ "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles "
+ "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will "
+ "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The maxvalue.* and fragment.count attributes will not be set on FlowFiles when this "
+ "property is set.")
.defaultValue("0")
.required(true)
.addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.build();
public static final PropertyDescriptor OUTPUT_FORMAT = new PropertyDescriptor.Builder()
.name("Output Format")
.description("The format to which the result rows will be converted. If JSON is selected, the output will "
+ "contain an object with field 'results' containing an array of result rows. Each row in the array is a "
+ "map of the named column to its value. For example: { \"results\": [{\"userid\":1, \"name\":\"Joe Smith\"}]}")
.required(true)
.allowableValues(AVRO_FORMAT, JSON_FORMAT)
.defaultValue(AVRO_FORMAT)
.build();
public static final PropertyDescriptor TIMESTAMP_FORMAT_PATTERN = new PropertyDescriptor.Builder()
.name("timestamp-format-pattern")
.displayName("Timestamp Format Pattern for JSON output")
.description("Pattern to use when converting timestamp fields to JSON. Note: the formatted timestamp will be in UTC timezone.")
.required(true)
.defaultValue("yyyy-MM-dd HH:mm:ssZ")
.addValidator((subject, input, context) -> {
final ValidationResult.Builder vrb = new ValidationResult.Builder().subject(subject).input(input);
try {
DateTimeFormatter.ofPattern(input);
vrb.valid(true).explanation("Valid date format pattern");
} catch (Exception ex) {
vrb.valid(false).explanation("the pattern is invalid: " + ex.getMessage());
}
return vrb.build();
})
.build();
private final static List<PropertyDescriptor> propertyDescriptors;
private final static Set<Relationship> relationships;
/*
* Will ensure that the list of property descriptors is build only once.
* Will also create a Set of relationships
*/
static {
List<PropertyDescriptor> _propertyDescriptors = new ArrayList<>();
_propertyDescriptors.addAll(descriptors);
_propertyDescriptors.add(CQL_SELECT_QUERY);
_propertyDescriptors.add(QUERY_TIMEOUT);
_propertyDescriptors.add(FETCH_SIZE);
_propertyDescriptors.add(MAX_ROWS_PER_FLOW_FILE);
_propertyDescriptors.add(OUTPUT_BATCH_SIZE);
_propertyDescriptors.add(OUTPUT_FORMAT);
_propertyDescriptors.add(TIMESTAMP_FORMAT_PATTERN);
propertyDescriptors = Collections.unmodifiableList(_propertyDescriptors);
Set<Relationship> _relationships = new HashSet<>();
_relationships.add(REL_SUCCESS);
_relationships.add(REL_FAILURE);
_relationships.add(REL_RETRY);
relationships = Collections.unmodifiableSet(_relationships);
}
@Override
public Set<Relationship> getRelationships() {
return relationships;
}
@Override
public final List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return propertyDescriptors;
}
@OnScheduled
public void onScheduled(final ProcessContext context) {
super.onScheduled(context);
final int fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
if (fetchSize > 0) {
synchronized (cluster.get()) {
cluster.get().getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
}
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile fileToProcess = null;
if (context.hasIncomingConnection()) {
fileToProcess = session.get();
// If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
// However, if we have no FlowFile and we have connections coming from other Processors, then
// we know that we should run only if we have a FlowFile.
if (fileToProcess == null && context.hasNonLoopConnection()) {
return;
}
}
final ComponentLog logger = getLogger();
final String selectQuery = context.getProperty(CQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess).getValue();
final long queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asTimePeriod(TimeUnit.MILLISECONDS);
final String outputFormat = context.getProperty(OUTPUT_FORMAT).getValue();
final long maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger();
final long outputBatchSize = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger();
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(fileToProcess).getValue());
final StopWatch stopWatch = new StopWatch(true);
final List<FlowFile> resultSetFlowFiles = new LinkedList<>();
try {
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
final ResultSet resultSet;
if (queryTimeout > 0) {
resultSet = connectionSession.execute(selectQuery, queryTimeout, TimeUnit.MILLISECONDS);
}else{
resultSet = connectionSession.execute(selectQuery);
}
final AtomicLong nrOfRows = new AtomicLong(0L);
if(fileToProcess == null) {
fileToProcess = session.create();
}
int fragmentIndex = 0;
final String fragmentId = UUID.randomUUID().toString();
while(true) {
fileToProcess = session.write(fileToProcess, out -> {
try {
logger.debug("Executing CQL query {}", selectQuery);
if (queryTimeout > 0) {
if (AVRO_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToAvroStream(resultSet, maxRowsPerFlowFile,
out, queryTimeout, TimeUnit.MILLISECONDS));
} else if (JSON_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToJsonStream(resultSet, maxRowsPerFlowFile,
out, charset, queryTimeout, TimeUnit.MILLISECONDS));
}
} else {
if (AVRO_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToAvroStream(resultSet, maxRowsPerFlowFile,
out, 0, null));
} else if (JSON_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToJsonStream(resultSet, maxRowsPerFlowFile,
out, charset, 0, null));
}
}
} catch (final TimeoutException | InterruptedException | ExecutionException e) {
throw new ProcessException(e);
}
});
// set attribute how many rows were selected
fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
// set mime.type based on output format
fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(),
JSON_FORMAT.equals(outputFormat) ? "application/json" : "application/avro-binary");
if (logger.isDebugEnabled()) {
logger.info("{} contains {} records; transferring to 'success'",
fileToProcess, nrOfRows.get());
}
if (maxRowsPerFlowFile > 0) {
fileToProcess = session.putAttribute(fileToProcess, FRAGMENT_ID, fragmentId);
fileToProcess = session.putAttribute(fileToProcess, FRAGMENT_INDEX, String.valueOf(fragmentIndex));
}
session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
stopWatch.getElapsed(TimeUnit.MILLISECONDS));
resultSetFlowFiles.add(fileToProcess);
if (outputBatchSize > 0) {
if (resultSetFlowFiles.size() == outputBatchSize) {
session.transfer(resultSetFlowFiles, REL_SUCCESS);
session.commitAsync();
resultSetFlowFiles.clear();
}
}
fragmentIndex++;
resultSet.fetchMoreResults().get();
if (resultSet.isExhausted()) {
// If we are splitting results but not outputting batches, set count on all FlowFiles
if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) {
for (int i = 0; i < resultSetFlowFiles.size(); i++) {
resultSetFlowFiles.set(i,
session.putAttribute(resultSetFlowFiles.get(i), FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
}
}
session.transfer(resultSetFlowFiles, REL_SUCCESS);
session.commitAsync();
resultSetFlowFiles.clear();
break;
}
fileToProcess = session.create();
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
if (fileToProcess == null) {
fileToProcess = session.create();
}
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the query with the requested consistency level successfully", qee);
if (fileToProcess == null) {
fileToProcess = session.create();
}
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_RETRY);
} catch (final QueryValidationException qve) {
if (context.hasIncomingConnection()) {
logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another "
+ "validation problem; routing {} to failure",
selectQuery, fileToProcess, qve);
if (fileToProcess == null) {
fileToProcess = session.create();
}
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_FAILURE);
} else {
// This can happen if any exceptions occur while setting up the connection, statement, etc.
logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another "
+ "validation problem", selectQuery, qve);
if (fileToProcess != null) {
session.remove(fileToProcess);
}
context.yield();
}
} catch (InterruptedException|ExecutionException ex) {
if (context.hasIncomingConnection()) {
logger.error("The CQL query {} has yielded an unknown error, routing {} to failure",
selectQuery, fileToProcess, ex);
if (fileToProcess == null) {
fileToProcess = session.create();
}
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_FAILURE);
} else {
// This can happen if any exceptions occur while setting up the connection, statement, etc.
logger.error("The CQL query {} has run into an unknown error.", selectQuery, ex);
if (fileToProcess != null) {
session.remove(fileToProcess);
}
context.yield();
}
} catch (final ProcessException e) {
if (context.hasIncomingConnection()) {
logger.error("Unable to execute CQL select query {} for {} due to {}; routing to failure",
selectQuery, fileToProcess, e);
if (fileToProcess == null) {
fileToProcess = session.create();
}
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_FAILURE);
} else {
logger.error("Unable to execute CQL select query {} due to {}",
selectQuery, e);
if (fileToProcess != null) {
session.remove(fileToProcess);
}
context.yield();
}
}
session.commitAsync();
}
@OnUnscheduled
public void stop(ProcessContext context) {
super.stop(context);
}
@OnShutdown
public void shutdown(ProcessContext context) {
super.stop(context);
}
/**
* Converts a result set into an Avro record and writes it to the given stream.
*
* @param rs The result set to convert
* @param outStream The stream to which the Avro record will be written
* @param timeout The max number of timeUnits to wait for a result set fetch to complete
* @param timeUnit The unit of time (SECONDS, e.g.) associated with the timeout amount
* @return The number of rows from the result set written to the stream
* @throws IOException If the Avro record cannot be written
* @throws InterruptedException If a result set fetch is interrupted
* @throws TimeoutException If a result set fetch has taken longer than the specified timeout
* @throws ExecutionException If any error occurs during the result set fetch
*/
public static long convertToAvroStream(final ResultSet rs, long maxRowsPerFlowFile,
final OutputStream outStream,
long timeout, TimeUnit timeUnit)
throws IOException, InterruptedException, TimeoutException, ExecutionException {
final Schema schema = createSchema(rs);
final GenericRecord rec = new GenericData.Record(schema);
final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
dataFileWriter.create(schema, outStream);
ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
long nrOfRows = 0;
long rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
if (columnDefinitions != null) {
// Grab the ones we have
if (rowsAvailableWithoutFetching == 0
|| rowsAvailableWithoutFetching < maxRowsPerFlowFile) {
// Get more
if (timeout <= 0 || timeUnit == null) {
rs.fetchMoreResults().get();
} else {
rs.fetchMoreResults().get(timeout, timeUnit);
}
}
Row row;
while ((maxRowsPerFlowFile == 0) || nrOfRows < maxRowsPerFlowFile) {
try {
row = rs.iterator().next();
} catch (NoSuchElementException nsee) {
break;
}
// iterator().next() is like iterator().one() => return null on end
// https://docs.datastax.com/en/drivers/java/2.0/com/datastax/driver/core/ResultSet.html#one--
if (row == null) {
break;
}
for (int i = 0; i < columnDefinitions.size(); i++) {
final DataType dataType = columnDefinitions.getType(i);
if (row.isNull(i)) {
rec.put(i, null);
} else {
rec.put(i, getCassandraObject(row, i, dataType));
}
}
dataFileWriter.append(rec);
nrOfRows += 1;
}
}
return nrOfRows;
}
}
private static String getFormattedDate(final Optional<ProcessContext> context, Date value) {
final String dateFormatPattern = context
.map(_context -> _context.getProperty(TIMESTAMP_FORMAT_PATTERN).getValue())
.orElse(TIMESTAMP_FORMAT_PATTERN.getDefaultValue());
final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern(dateFormatPattern);
final OffsetDateTime offsetDateTime = value.toInstant().atOffset(ZoneOffset.UTC);
return dateTimeFormatter.format(offsetDateTime);
}
public static long convertToJsonStream(final ResultSet rs, long maxRowsPerFlowFile,
final OutputStream outStream,
Charset charset, long timeout, TimeUnit timeUnit)
throws IOException, InterruptedException, TimeoutException, ExecutionException {
return convertToJsonStream(Optional.empty(), rs, maxRowsPerFlowFile, outStream, charset, timeout, timeUnit);
}
/**
* Converts a result set into an Json object and writes it to the given stream using the specified character set.
*
* @param rs The result set to convert
* @param outStream The stream to which the JSON object will be written
* @param timeout The max number of timeUnits to wait for a result set fetch to complete
* @param timeUnit The unit of time (SECONDS, e.g.) associated with the timeout amount
* @return The number of rows from the result set written to the stream
* @throws IOException If the JSON object cannot be written
* @throws InterruptedException If a result set fetch is interrupted
* @throws TimeoutException If a result set fetch has taken longer than the specified timeout
* @throws ExecutionException If any error occurs during the result set fetch
*/
@VisibleForTesting
public static long convertToJsonStream(final Optional<ProcessContext> context,
final ResultSet rs, long maxRowsPerFlowFile,
final OutputStream outStream,
Charset charset, long timeout, TimeUnit timeUnit)
throws IOException, InterruptedException, TimeoutException, ExecutionException {
try {
// Write the initial object brace
outStream.write("{\"results\":[".getBytes(charset));
ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
long nrOfRows = 0;
long rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
if (columnDefinitions != null) {
// Grab the ones we have
if (rowsAvailableWithoutFetching == 0) {
// Get more
if (timeout <= 0 || timeUnit == null) {
rs.fetchMoreResults().get();
} else {
rs.fetchMoreResults().get(timeout, timeUnit);
}
rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
}
if(maxRowsPerFlowFile == 0){
maxRowsPerFlowFile = rowsAvailableWithoutFetching;
}
Row row;
while(nrOfRows < maxRowsPerFlowFile){
try {
row = rs.iterator().next();
}catch (NoSuchElementException nsee){
nrOfRows -= 1;
break;
}
// iterator().next() is like iterator().one() => return null on end
// https://docs.datastax.com/en/drivers/java/2.0/com/datastax/driver/core/ResultSet.html#one--
if(row == null){
break;
}
if (nrOfRows != 0) {
outStream.write(",".getBytes(charset));
}
outStream.write("{".getBytes(charset));
for (int i = 0; i < columnDefinitions.size(); i++) {
final DataType dataType = columnDefinitions.getType(i);
final String colName = columnDefinitions.getName(i);
if (i != 0) {
outStream.write(",".getBytes(charset));
}
if (row.isNull(i)) {
outStream.write(("\"" + colName + "\"" + ":null").getBytes(charset));
} else {
Object value = getCassandraObject(row, i, dataType);
String valueString;
if (value instanceof List || value instanceof Set) {
boolean first = true;
StringBuilder sb = new StringBuilder("[");
for (Object element : ((Collection) value)) {
if (!first) {
sb.append(",");
}
sb.append(getJsonElement(context, element));
first = false;
}
sb.append("]");
valueString = sb.toString();
} else if (value instanceof Map) {
boolean first = true;
StringBuilder sb = new StringBuilder("{");
for (Object element : ((Map) value).entrySet()) {
Map.Entry entry = (Map.Entry) element;
Object mapKey = entry.getKey();
Object mapValue = entry.getValue();
if (!first) {
sb.append(",");
}
sb.append(getJsonElement(context, mapKey));
sb.append(":");
sb.append(getJsonElement(context, mapValue));
first = false;
}
sb.append("}");
valueString = sb.toString();
} else {
valueString = getJsonElement(context, value);
}
outStream.write(("\"" + colName + "\":"
+ valueString + "").getBytes(charset));
}
}
nrOfRows += 1;
outStream.write("}".getBytes(charset));
}
}
return nrOfRows;
} finally {
outStream.write("]}".getBytes());
}
}
protected static String getJsonElement(Object value) {
return getJsonElement(Optional.empty(), value);
}
protected static String getJsonElement(final Optional<ProcessContext> context, Object value) {
if (value instanceof Number) {
return value.toString();
} else if (value instanceof Date) {
return "\"" + getFormattedDate(context, (Date) value) + "\"";
} else if (value instanceof String) {
return "\"" + StringEscapeUtils.escapeJson((String) value) + "\"";
} else {
return "\"" + value.toString() + "\"";
}
}
/**
* Creates an Avro schema from the given result set. The metadata (column definitions, data types, etc.) is used
* to determine a schema for Avro.
*
* @param rs The result set from which an Avro schema will be created
* @return An Avro schema corresponding to the given result set's metadata
*/
public static Schema createSchema(final ResultSet rs) {
final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
final int nrOfColumns = (columnDefinitions == null ? 0 : columnDefinitions.size());
String tableName = "NiFi_Cassandra_Query_Record";
if (nrOfColumns > 0) {
String tableNameFromMeta = columnDefinitions.getTable(0);
if (!StringUtils.isBlank(tableNameFromMeta)) {
tableName = tableNameFromMeta;
}
}
final SchemaBuilder.FieldAssembler<Schema> builder = SchemaBuilder.record(tableName).namespace("any.data").fields();
if (columnDefinitions != null) {
for (int i = 0; i < nrOfColumns; i++) {
DataType dataType = columnDefinitions.getType(i);
if (dataType == null) {
throw new IllegalArgumentException("No data type for column[" + i + "] with name " + columnDefinitions.getName(i));
}
// Map types from Cassandra to Avro where possible
if (dataType.isCollection()) {
List<DataType> typeArguments = dataType.getTypeArguments();
if (typeArguments == null || typeArguments.size() == 0) {
throw new IllegalArgumentException("Column[" + i + "] " + dataType.getName()
+ " is a collection but no type arguments were specified!");
}
// Get the first type argument, to be used for lists and sets
DataType firstArg = typeArguments.get(0);
if (dataType.equals(DataType.set(firstArg))
|| dataType.equals(DataType.list(firstArg))) {
builder.name(columnDefinitions.getName(i)).type().unionOf().nullBuilder().endNull().and().array()
.items(getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(firstArg))).endUnion().noDefault();
} else {
// Must be an n-arg collection like map
DataType secondArg = typeArguments.get(1);
if (dataType.equals(DataType.map(firstArg, secondArg))) {
builder.name(columnDefinitions.getName(i)).type().unionOf().nullBuilder().endNull().and().map().values(
getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(secondArg))).endUnion().noDefault();
}
}
} else {
builder.name(columnDefinitions.getName(i))
.type(getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(dataType))).noDefault();
}
}
}
return builder.endRecord();
}
}

View File

@ -1,17 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.nifi.processors.cassandra.QueryCassandra
org.apache.nifi.processors.cassandra.PutCassandraQL
org.apache.nifi.processors.cassandra.PutCassandraRecord

View File

@ -1,320 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.Row;
import com.google.common.collect.Sets;
import org.apache.nifi.annotation.lifecycle.OnEnabled;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.controller.ConfigurationContext;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.service.CassandraSessionProvider;
import org.apache.nifi.ssl.SSLContextService;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.net.ssl.SSLContext;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit tests for the AbstractCassandraProcessor class
*/
public class AbstractCassandraProcessorTest {
MockAbstractCassandraProcessor processor;
private TestRunner testRunner;
@BeforeEach
public void setUp() throws Exception {
processor = new MockAbstractCassandraProcessor();
testRunner = TestRunners.newTestRunner(processor);
}
@Test
public void testCustomValidate() throws Exception {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.assertValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042, node2: 4399");
testRunner.assertValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, " localhost : 9042, node2: 4399");
testRunner.assertValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042, node2");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:65536");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "user");
testRunner.assertNotValid(); // Needs a password set if user is set
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.assertValid();
}
@Test
public void testCustomValidateEL() throws Exception {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "${host}");
testRunner.setProperty(AbstractCassandraProcessor.KEYSPACE, "${keyspace}");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "${user}");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "${password}");
testRunner.setProperty(AbstractCassandraProcessor.CHARSET, "${charset}");
testRunner.assertValid();
}
@SuppressWarnings("unchecked")
@Test
public void testGetCassandraObject() throws Exception {
Row row = CassandraQueryTestUtil.createRow("user1", "Joe", "Smith",
Sets.newHashSet("jsmith@notareal.com", "joes@fakedomain.com"), Arrays.asList("New York, NY", "Santa Clara, CA"),
new HashMap<Date, String>() {{
put(Calendar.getInstance().getTime(), "Set my alarm for a month from now");
}}, true, 1.0f, 2.0);
assertEquals("user1", AbstractCassandraProcessor.getCassandraObject(row, 0, DataType.text()));
assertEquals("Joe", AbstractCassandraProcessor.getCassandraObject(row, 1, DataType.text()));
assertEquals("Smith", AbstractCassandraProcessor.getCassandraObject(row, 2, DataType.text()));
Set<String> emails = (Set<String>) AbstractCassandraProcessor.getCassandraObject(row, 3, DataType.set(DataType.text()));
assertNotNull(emails);
assertEquals(2, emails.size());
List<String> topPlaces = (List<String>) AbstractCassandraProcessor.getCassandraObject(row, 4, DataType.list(DataType.text()));
assertNotNull(topPlaces);
Map<Date, String> todoMap = (Map<Date, String>) AbstractCassandraProcessor.getCassandraObject(
row, 5, DataType.map(DataType.timestamp(), DataType.text()));
assertNotNull(todoMap);
assertEquals(1, todoMap.values().size());
Boolean registered = (Boolean) AbstractCassandraProcessor.getCassandraObject(row, 6, DataType.cboolean());
assertNotNull(registered);
assertTrue(registered);
}
@Test
public void testGetSchemaForType() throws Exception {
assertEquals(AbstractCassandraProcessor.getSchemaForType("string").getType().getName(), "string");
assertEquals(AbstractCassandraProcessor.getSchemaForType("boolean").getType().getName(), "boolean");
assertEquals(AbstractCassandraProcessor.getSchemaForType("int").getType().getName(), "int");
assertEquals(AbstractCassandraProcessor.getSchemaForType("long").getType().getName(), "long");
assertEquals(AbstractCassandraProcessor.getSchemaForType("float").getType().getName(), "float");
assertEquals(AbstractCassandraProcessor.getSchemaForType("double").getType().getName(), "double");
assertEquals(AbstractCassandraProcessor.getSchemaForType("bytes").getType().getName(), "bytes");
}
@Test
public void testGetSchemaForTypeBadType() throws Exception {
assertThrows(IllegalArgumentException.class, () -> AbstractCassandraProcessor.getSchemaForType("nothing"));
}
@Test
public void testGetPrimitiveAvroTypeFromCassandraType() throws Exception {
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.ascii()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.text()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.varchar()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.timestamp()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.timeuuid()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.uuid()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.inet()));
assertEquals("string", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.varint()));
assertEquals("boolean", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.cboolean()));
assertEquals("int", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.cint()));
assertEquals("long", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.bigint()));
assertEquals("long", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.counter()));
assertEquals("float", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.cfloat()));
assertEquals("double", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.cdouble()));
assertEquals("bytes", AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(DataType.blob()));
}
@Test
public void testGetPrimitiveAvroTypeFromCassandraTypeBadType() throws Exception {
DataType mockDataType = mock(DataType.class);
assertThrows(IllegalArgumentException.class, () -> AbstractCassandraProcessor.getPrimitiveAvroTypeFromCassandraType(mockDataType));
}
@Test
public void testGetPrimitiveDataTypeFromString() {
assertEquals(DataType.ascii(), AbstractCassandraProcessor.getPrimitiveDataTypeFromString("ascii"));
}
@Test
public void testGetContactPoints() throws Exception {
List<InetSocketAddress> contactPoints = processor.getContactPoints("");
assertNotNull(contactPoints);
assertEquals(1, contactPoints.size());
assertEquals("localhost", contactPoints.get(0).getHostName());
assertEquals(AbstractCassandraProcessor.DEFAULT_CASSANDRA_PORT, contactPoints.get(0).getPort());
contactPoints = processor.getContactPoints("192.168.99.100:9042");
assertNotNull(contactPoints);
assertEquals(1, contactPoints.size());
assertEquals("192.168.99.100", contactPoints.get(0).getAddress().getHostAddress());
assertEquals(9042, contactPoints.get(0).getPort());
contactPoints = processor.getContactPoints("192.168.99.100:9042, mydomain.com : 4000");
assertNotNull(contactPoints);
assertEquals(2, contactPoints.size());
assertEquals("192.168.99.100", contactPoints.get(0).getAddress().getHostAddress());
assertEquals(9042, contactPoints.get(0).getPort());
assertEquals("mydomain.com", contactPoints.get(1).getHostName());
assertEquals(4000, contactPoints.get(1).getPort());
}
@Test
public void testConnectToCassandra() throws Exception {
// Follow the non-null path
Cluster cluster = mock(Cluster.class);
processor.setCluster(cluster);
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
processor.connectToCassandra(testRunner.getProcessContext());
processor.stop(testRunner.getProcessContext());
assertNull(processor.getCluster());
// Now do a connect where a cluster is "built"
processor.connectToCassandra(testRunner.getProcessContext());
assertEquals("cluster1", processor.getCluster().getMetadata().getClusterName());
}
@Test
public void testConnectToCassandraWithSSL() throws Exception {
SSLContextService sslService = mock(SSLContextService.class);
when(sslService.getIdentifier()).thenReturn("ssl-context");
testRunner.addControllerService("ssl-context", sslService);
testRunner.enableControllerService(sslService);
testRunner.setProperty(AbstractCassandraProcessor.PROP_SSL_CONTEXT_SERVICE, "ssl-context");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.assertValid(sslService);
processor.connectToCassandra(testRunner.getProcessContext());
assertNotNull(processor.getCluster());
processor.setCluster(null);
// Try with a ClientAuth value
testRunner.setProperty(AbstractCassandraProcessor.CLIENT_AUTH, "WANT");
processor.connectToCassandra(testRunner.getProcessContext());
assertNotNull(processor.getCluster());
}
@Test
public void testConnectToCassandraUsernamePassword() throws Exception {
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "user");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
// Now do a connect where a cluster is "built"
processor.connectToCassandra(testRunner.getProcessContext());
assertNotNull(processor.getCluster());
}
@Test
public void testCustomValidateCassandraConnectionConfiguration() throws InitializationException {
MockCassandraSessionProvider sessionProviderService = new MockCassandraSessionProvider();
testRunner.addControllerService("cassandra-connection-provider", sessionProviderService);
testRunner.setProperty(sessionProviderService, CassandraSessionProvider.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(sessionProviderService, CassandraSessionProvider.KEYSPACE, "somekyespace");
testRunner.setProperty(AbstractCassandraProcessor.CONNECTION_PROVIDER_SERVICE, "cassandra-connection-provider");
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(AbstractCassandraProcessor.KEYSPACE, "some-keyspace");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "user");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.enableControllerService(sessionProviderService);
testRunner.assertNotValid();
testRunner.removeProperty(AbstractCassandraProcessor.CONTACT_POINTS);
testRunner.removeProperty(AbstractCassandraProcessor.KEYSPACE);
testRunner.removeProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL);
testRunner.removeProperty(AbstractCassandraProcessor.USERNAME);
testRunner.removeProperty(AbstractCassandraProcessor.PASSWORD);
testRunner.assertValid();
}
/**
* Provides a stubbed processor instance for testing
*/
public static class MockAbstractCassandraProcessor extends AbstractCassandraProcessor {
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return Arrays.asList(CONNECTION_PROVIDER_SERVICE, CONTACT_POINTS, KEYSPACE, USERNAME, PASSWORD, CONSISTENCY_LEVEL, CHARSET);
}
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
}
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster mockCluster = mock(Cluster.class);
Metadata mockMetadata = mock(Metadata.class);
when(mockMetadata.getClusterName()).thenReturn("cluster1");
when(mockCluster.getMetadata()).thenReturn(mockMetadata);
Configuration config = Configuration.builder().build();
when(mockCluster.getConfiguration()).thenReturn(config);
return mockCluster;
}
public Cluster getCluster() {
return cluster.get();
}
public void setCluster(Cluster newCluster) {
this.cluster.set(newCluster);
}
}
/**
* Mock CassandraSessionProvider implementation for testing purpose
*/
private class MockCassandraSessionProvider extends CassandraSessionProvider {
@OnEnabled
public void onEnabled(final ConfigurationContext context) {
}
}
}

View File

@ -1,219 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.google.common.collect.Sets;
import com.google.common.reflect.TypeToken;
import com.google.common.util.concurrent.ListenableFuture;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.time.OffsetDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Set;
import java.util.Map;
import java.util.List;
import java.util.HashMap;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.TimeZone;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Utility methods for Cassandra processors' unit tests
*/
public class CassandraQueryTestUtil {
static final Date TEST_DATE;
static {
Calendar c = GregorianCalendar.getInstance(TimeZone.getTimeZone("PST"));
c.set(2020, Calendar.JANUARY, 1, 10, 10, 10);
c.set(Calendar.MILLISECOND, 10);
TEST_DATE = c.getTime();
}
public static ResultSet createMockResultSet(boolean falseThenTrue) throws Exception {
ResultSet resultSet = mock(ResultSet.class);
ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class);
when(columnDefinitions.size()).thenReturn(9);
when(columnDefinitions.getName(anyInt())).thenAnswer(new Answer<String>() {
List<String> colNames = Arrays.asList(
"user_id", "first_name", "last_name", "emails", "top_places", "todo", "registered", "scale", "metric");
@Override
public String answer(InvocationOnMock invocationOnMock) {
return colNames.get((Integer) invocationOnMock.getArguments()[0]);
}
});
when(columnDefinitions.getTable(0)).thenReturn("users");
when(columnDefinitions.getType(anyInt())).thenAnswer(new Answer<DataType>() {
List<DataType> dataTypes = Arrays.asList(
DataType.text(), DataType.text(), DataType.text(), DataType.set(DataType.text()),
DataType.list(DataType.text()), DataType.map(DataType.timestamp(), DataType.text()), DataType.cboolean(),
DataType.cfloat(), DataType.cdouble()
);
@Override
public DataType answer(InvocationOnMock invocationOnMock) throws Throwable {
return dataTypes.get((Integer) invocationOnMock.getArguments()[0]);
}
});
final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ssZ");
final Date aMonthPrior = Date.from(OffsetDateTime.parse("2016-01-03 05:00:00+0000", dateTimeFormatter).toInstant());
final Date testDate = Date.from(OffsetDateTime.parse("2016-02-03 05:00:00+0000", dateTimeFormatter).toInstant());
List<Row> rows = Arrays.asList(
createRow("user1", "Joe", "Smith", Sets.newHashSet("jsmith@notareal.com"),
Arrays.asList("New York, NY", "Santa Clara, CA"),
new HashMap<Date, String>() {{
put(aMonthPrior, "Set my alarm \"for\" a month from now");
}}, false, 1.0f, 2.0),
createRow("user2", "Mary", "Jones", Sets.newHashSet("mjones@notareal.com"),
Collections.singletonList("Orlando, FL"),
new HashMap<Date, String>() {{
put(testDate, "Get milk and bread");
}}, true, 3.0f, 4.0)
);
ListenableFuture future = mock(ListenableFuture.class);
when(future.get()).thenReturn(rows);
when(resultSet.fetchMoreResults()).thenReturn(future);
when(resultSet.iterator()).thenReturn(rows.iterator());
when(resultSet.all()).thenReturn(rows);
when(resultSet.getAvailableWithoutFetching()).thenReturn(rows.size());
when(resultSet.isFullyFetched()).thenReturn(false).thenReturn(true);
if(falseThenTrue) {
when(resultSet.isExhausted()).thenReturn(false, true);
}else{
when(resultSet.isExhausted()).thenReturn(true);
}
when(resultSet.getColumnDefinitions()).thenReturn(columnDefinitions);
return resultSet;
}
public static ResultSet createMockResultSet() throws Exception {
return createMockResultSet(true);
}
public static ResultSet createMockResultSetOneColumn() throws Exception {
ResultSet resultSet = mock(ResultSet.class);
ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class);
when(columnDefinitions.size()).thenReturn(1);
when(columnDefinitions.getName(anyInt())).thenAnswer(new Answer<String>() {
List<String> colNames = Arrays.asList("user_id");
@Override
public String answer(InvocationOnMock invocationOnMock) throws Throwable {
return colNames.get((Integer) invocationOnMock.getArguments()[0]);
}
});
when(columnDefinitions.getTable(0)).thenReturn("users");
when(columnDefinitions.getType(anyInt())).thenAnswer(new Answer<DataType>() {
List<DataType> dataTypes = Arrays.asList(DataType.text());
@Override
public DataType answer(InvocationOnMock invocationOnMock) throws Throwable {
return dataTypes.get((Integer) invocationOnMock.getArguments()[0]);
}
});
List<Row> rows = Arrays.asList(
createRow("user1"),
createRow("user2")
);
ListenableFuture future = mock(ListenableFuture.class);
when(future.get()).thenReturn(rows);
when(resultSet.fetchMoreResults()).thenReturn(future);
when(resultSet.iterator()).thenReturn(rows.iterator());
when(resultSet.all()).thenReturn(rows);
when(resultSet.getAvailableWithoutFetching()).thenReturn(rows.size());
when(resultSet.isFullyFetched()).thenReturn(false).thenReturn(true);
when(resultSet.isExhausted()).thenReturn(false).thenReturn(true);
when(resultSet.getColumnDefinitions()).thenReturn(columnDefinitions);
return resultSet;
}
public static ResultSet createMockDateResultSet() throws Exception {
ResultSet resultSet = mock(ResultSet.class);
ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class);
when(columnDefinitions.size()).thenReturn(1);
when(columnDefinitions.getName(anyInt())).thenReturn("date");
when(columnDefinitions.getTable(0)).thenReturn("users");
when(columnDefinitions.getType(anyInt())).thenReturn(DataType.timestamp());
Row row = mock(Row.class);
when(row.getTimestamp(0)).thenReturn(TEST_DATE);
List<Row> rows = Collections.singletonList(row);
when(resultSet.iterator()).thenReturn(rows.iterator());
when(resultSet.all()).thenReturn(rows);
when(resultSet.getAvailableWithoutFetching()).thenReturn(rows.size());
when(resultSet.isFullyFetched()).thenReturn(false).thenReturn(true);
when(resultSet.getColumnDefinitions()).thenReturn(columnDefinitions);
return resultSet;
}
public static Row createRow(String user_id, String first_name, String last_name, Set<String> emails,
List<String> top_places, Map<Date, String> todo, boolean registered,
float scale, double metric) {
Row row = mock(Row.class);
when(row.getString(0)).thenReturn(user_id);
when(row.getString(1)).thenReturn(first_name);
when(row.getString(2)).thenReturn(last_name);
when(row.getSet(eq(3), any(TypeToken.class))).thenReturn(emails);
when(row.getList(eq(4), any(TypeToken.class))).thenReturn(top_places);
when(row.getMap(eq(5), any(TypeToken.class), any(TypeToken.class))).thenReturn(todo);
when(row.getBool(6)).thenReturn(registered);
when(row.getFloat(7)).thenReturn(scale);
when(row.getDouble(8)).thenReturn(metric);
return row;
}
public static Row createRow(String user_id) {
Row row = mock(Row.class);
when(row.getString(0)).thenReturn(user_id);
return row;
}
}

View File

@ -1,434 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SniEndPoint;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.exceptions.InvalidQueryException;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import com.datastax.driver.core.exceptions.UnavailableException;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.net.ssl.SSLContext;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit tests for the PutCassandraQL processor
*/
public class PutCassandraQLTest {
private TestRunner testRunner;
private MockPutCassandraQL processor;
@BeforeEach
public void setUp() {
processor = new MockPutCassandraQL();
testRunner = TestRunners.newTestRunner(processor);
}
@Test
public void testProcessorConfigValidity() {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.assertValid();
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "username");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.assertValid();
}
@Test
public void testProcessorELConfigValidity() {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "${hosts}");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "${pass}");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "${user}");
testRunner.setProperty(AbstractCassandraProcessor.CHARSET, "${charset}");
testRunner.setProperty(PutCassandraQL.STATEMENT_TIMEOUT, "${timeout}");
testRunner.assertValid();
}
@Test
public void testProcessorHappyPath() {
setUpStandardTestConfig();
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
new HashMap<String, String>() {
{
put("cql.args.1.type", "int");
put("cql.args.1.value", "1");
put("cql.args.2.type", "text");
put("cql.args.2.value", "Joe");
put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
put("cql.args.4.type", "map<text,text>");
put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
put("cql.args.5.type", "list<boolean>");
put("cql.args.5.value", "[true,false,true]");
put("cql.args.6.type", "set<double>");
put("cql.args.6.value", "{1.0, 2.0}");
put("cql.args.7.type", "bigint");
put("cql.args.7.value", "20000000");
put("cql.args.8.type", "float");
put("cql.args.8.value", "1.0");
put("cql.args.9.type", "blob");
put("cql.args.9.value", "0xDEADBEEF");
put("cql.args.10.type", "timestamp");
put("cql.args.10.value", "2016-07-01T15:21:05Z");
}
});
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_SUCCESS, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorHappyPathELConfig() {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "${hosts}");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "${pass}");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "${user}");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.setProperty(AbstractCassandraProcessor.CHARSET, "${charset}");
testRunner.setProperty(PutCassandraQL.STATEMENT_TIMEOUT, "${timeout}");
testRunner.assertValid();
testRunner.setEnvironmentVariableValue("hosts", "localhost:9042");
testRunner.setEnvironmentVariableValue("user", "username");
testRunner.setEnvironmentVariableValue("pass", "password");
testRunner.setEnvironmentVariableValue("charset", "UTF-8");
testRunner.setEnvironmentVariableValue("timeout", "30 sec");
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
new HashMap<String, String>() {
{
put("cql.args.1.type", "int");
put("cql.args.1.value", "1");
put("cql.args.2.type", "text");
put("cql.args.2.value", "Joe");
put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
put("cql.args.4.type", "map<text,text>");
put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
put("cql.args.5.type", "list<boolean>");
put("cql.args.5.value", "[true,false,true]");
put("cql.args.6.type", "set<double>");
put("cql.args.6.value", "{1.0, 2.0}");
put("cql.args.7.type", "bigint");
put("cql.args.7.value", "20000000");
put("cql.args.8.type", "float");
put("cql.args.8.value", "1.0");
put("cql.args.9.type", "blob");
put("cql.args.9.value", "0xDEADBEEF");
put("cql.args.10.type", "timestamp");
put("cql.args.10.value", "2016-07-01T15:21:05Z");
}
});
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_SUCCESS, 1);
testRunner.clearTransferState();
}
@Test
public void testMultipleQuery() {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraQL.STATEMENT_CACHE_SIZE, "1");
HashMap<String, String> testData = new HashMap<>();
testData.put("cql.args.1.type", "int");
testData.put("cql.args.1.value", "1");
testData.put("cql.args.2.type", "text");
testData.put("cql.args.2.value", "Joe");
testData.put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
testData.put("cql.args.4.type", "map<text,text>");
testData.put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
testData.put("cql.args.5.type", "list<boolean>");
testData.put("cql.args.5.value", "[true,false,true]");
testData.put("cql.args.6.type", "set<double>");
testData.put("cql.args.6.value", "{1.0, 2.0}");
testData.put("cql.args.7.type", "bigint");
testData.put("cql.args.7.value", "20000000");
testData.put("cql.args.8.type", "float");
testData.put("cql.args.8.value", "1.0");
testData.put("cql.args.9.type", "blob");
testData.put("cql.args.9.value", "0xDEADBEEF");
testData.put("cql.args.10.type", "timestamp");
testData.put("cql.args.10.value", "2016-07-01T15:21:05Z");
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
testData);
testRunner.enqueue("INSERT INTO newusers (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
testData);
// Change it up a bit, the same statement is executed with different data
testData.put("cql.args.1.value", "2");
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
testData);
testRunner.enqueue("INSERT INTO users (user_id) VALUES ('user_id data');");
testRunner.run(4, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_SUCCESS, 4);
}
@Test
public void testProcessorBadTimestamp() {
setUpStandardTestConfig();
processor.setExceptionToThrow(
new InvalidQueryException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), "invalid timestamp"));
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
new HashMap<String, String>() {
{
put("cql.args.1.type", "int");
put("cql.args.1.value", "1");
put("cql.args.2.type", "text");
put("cql.args.2.value", "Joe");
put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
put("cql.args.4.type", "map<text,text>");
put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
put("cql.args.5.type", "list<boolean>");
put("cql.args.5.value", "[true,false,true]");
put("cql.args.6.type", "set<double>");
put("cql.args.6.value", "{1.0, 2.0}");
put("cql.args.7.type", "bigint");
put("cql.args.7.value", "20000000");
put("cql.args.8.type", "float");
put("cql.args.8.value", "1.0");
put("cql.args.9.type", "blob");
put("cql.args.9.value", "0xDEADBEEF");
put("cql.args.10.type", "timestamp");
put("cql.args.10.value", "not a timestamp");
}
});
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_FAILURE, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorUuid() {
setUpStandardTestConfig();
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
new HashMap<String, String>() {
{
put("cql.args.1.type", "int");
put("cql.args.1.value", "1");
put("cql.args.2.type", "text");
put("cql.args.2.value", "Joe");
put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
put("cql.args.4.type", "map<text,text>");
put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
put("cql.args.5.type", "list<boolean>");
put("cql.args.5.value", "[true,false,true]");
put("cql.args.6.type", "set<double>");
put("cql.args.6.value", "{1.0, 2.0}");
put("cql.args.7.type", "bigint");
put("cql.args.7.value", "20000000");
put("cql.args.8.type", "float");
put("cql.args.8.value", "1.0");
put("cql.args.9.type", "blob");
put("cql.args.9.value", "0xDEADBEEF");
put("cql.args.10.type", "uuid");
put("cql.args.10.value", "5442b1f6-4c16-11ea-87f5-45a32dbc5199");
}
});
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_SUCCESS, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorBadUuid() {
setUpStandardTestConfig();
testRunner.enqueue("INSERT INTO users (user_id, first_name, last_name, properties, bits, scaleset, largenum, scale, byteobject, ts) VALUES ?, ?, ?, ?, ?, ?, ?, ?, ?, ?",
new HashMap<String, String>() {
{
put("cql.args.1.type", "int");
put("cql.args.1.value", "1");
put("cql.args.2.type", "text");
put("cql.args.2.value", "Joe");
put("cql.args.3.type", "text");
// No value for arg 3 to test setNull
put("cql.args.4.type", "map<text,text>");
put("cql.args.4.value", "{'a':'Hello', 'b':'World'}");
put("cql.args.5.type", "list<boolean>");
put("cql.args.5.value", "[true,false,true]");
put("cql.args.6.type", "set<double>");
put("cql.args.6.value", "{1.0, 2.0}");
put("cql.args.7.type", "bigint");
put("cql.args.7.value", "20000000");
put("cql.args.8.type", "float");
put("cql.args.8.value", "1.0");
put("cql.args.9.type", "blob");
put("cql.args.9.value", "0xDEADBEEF");
put("cql.args.10.type", "uuid");
put("cql.args.10.value", "bad-uuid");
}
});
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_FAILURE, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorInvalidQueryException() {
setUpStandardTestConfig();
// Test exceptions
processor.setExceptionToThrow(
new InvalidQueryException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), "invalid query"));
testRunner.enqueue("UPDATE users SET cities = [ 'New York', 'Los Angeles' ] WHERE user_id = 'coast2coast';");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_FAILURE, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorUnavailableException() {
setUpStandardTestConfig();
processor.setExceptionToThrow(
new UnavailableException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), ConsistencyLevel.ALL, 5, 2));
testRunner.enqueue("UPDATE users SET cities = [ 'New York', 'Los Angeles' ] WHERE user_id = 'coast2coast';");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_RETRY, 1);
}
@Test
public void testProcessorNoHostAvailableException() {
setUpStandardTestConfig();
processor.setExceptionToThrow(new NoHostAvailableException(new HashMap<>()));
testRunner.enqueue("UPDATE users SET cities = [ 'New York', 'Los Angeles' ] WHERE user_id = 'coast2coast';");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_RETRY, 1);
}
@Test
public void testProcessorProcessException() {
setUpStandardTestConfig();
processor.setExceptionToThrow(new ProcessException());
testRunner.enqueue("UPDATE users SET cities = [ 'New York', 'Los Angeles' ] WHERE user_id = 'coast2coast';");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_FAILURE, 1);
}
private void setUpStandardTestConfig() {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "username");
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.assertValid();
}
/**
* Provides a stubbed processor instance for testing
*/
private static class MockPutCassandraQL extends PutCassandraQL {
private Exception exceptionToThrow = null;
private Session mockSession = mock(Session.class);
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster mockCluster = mock(Cluster.class);
try {
Metadata mockMetadata = mock(Metadata.class);
when(mockMetadata.getClusterName()).thenReturn("cluster1");
when(mockCluster.getMetadata()).thenReturn(mockMetadata);
when(mockCluster.connect()).thenReturn(mockSession);
when(mockCluster.connect(anyString())).thenReturn(mockSession);
Configuration config = Configuration.builder().build();
when(mockCluster.getConfiguration()).thenReturn(config);
ResultSetFuture future = mock(ResultSetFuture.class);
ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
PreparedStatement ps = mock(PreparedStatement.class);
when(mockSession.prepare(anyString())).thenReturn(ps);
BoundStatement bs = mock(BoundStatement.class);
when(ps.bind()).thenReturn(bs);
when(future.getUninterruptibly()).thenReturn(rs);
try {
doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
} catch (TimeoutException te) {
throw new IllegalArgumentException("Mocked cluster doesn't time out");
}
if (exceptionToThrow != null) {
doThrow(exceptionToThrow).when(mockSession).executeAsync(anyString());
doThrow(exceptionToThrow).when(mockSession).executeAsync(any(Statement.class));
} else {
when(mockSession.executeAsync(anyString())).thenReturn(future);
when(mockSession.executeAsync(any(Statement.class))).thenReturn(future);
}
when(mockSession.getCluster()).thenReturn(mockCluster);
} catch (Exception e) {
fail(e.getMessage());
}
return mockCluster;
}
void setExceptionToThrow(Exception e) {
exceptionToThrow = e;
doThrow(exceptionToThrow).when(mockSession).executeAsync(anyString());
doThrow(exceptionToThrow).when(mockSession).executeAsync(any(Statement.class));
}
}
}

View File

@ -1,132 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Truncate;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.serialization.record.MockRecordParser;
import org.apache.nifi.serialization.record.RecordFieldType;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.testcontainers.containers.CassandraContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
@Testcontainers
public class PutCassandraRecordIT {
@Container
private static final CassandraContainer CASSANDRA_CONTAINER = new CassandraContainer(DockerImageName.parse("cassandra:4.1"));
private static TestRunner testRunner;
private static MockRecordParser recordReader;
private static Cluster cluster;
private static Session session;
private static final String KEYSPACE = "sample_keyspace";
private static final String TABLE = "sample_table";
@BeforeAll
public static void setup() throws InitializationException {
recordReader = new MockRecordParser();
testRunner = TestRunners.newTestRunner(PutCassandraRecord.class);
InetSocketAddress contactPoint = CASSANDRA_CONTAINER.getContactPoint();
testRunner.setProperty(PutCassandraRecord.RECORD_READER_FACTORY, "reader");
testRunner.setProperty(PutCassandraRecord.CONTACT_POINTS, contactPoint.getHostString() + ":" + contactPoint.getPort());
testRunner.setProperty(PutCassandraRecord.KEYSPACE, KEYSPACE);
testRunner.setProperty(PutCassandraRecord.TABLE, TABLE);
testRunner.setProperty(PutCassandraRecord.CONSISTENCY_LEVEL, "SERIAL");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, "LOGGED");
testRunner.addControllerService("reader", recordReader);
testRunner.enableControllerService(recordReader);
cluster = Cluster.builder().addContactPoint(contactPoint.getHostName())
.withPort(contactPoint.getPort()).build();
session = cluster.connect();
String createKeyspace = "CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = {'class':'SimpleStrategy','replication_factor':1};";
String createTable = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + "." + TABLE + "(id int PRIMARY KEY, name text, age int);";
session.execute(createKeyspace);
session.execute(createTable);
}
@Test
public void testSimplePut() {
recordReader.addSchemaField("id", RecordFieldType.INT);
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addRecord(1, "Ram", 42);
recordReader.addRecord(2, "Jeane", 47);
recordReader.addRecord(3, "Ilamaran", 27);
recordReader.addRecord(4, "Jian", 14);
recordReader.addRecord(5, "Sakura", 24);
testRunner.enqueue("");
testRunner.run();
testRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, 1);
assertEquals(5, getRecordsCount());
}
private int getRecordsCount() {
Select selectQuery = QueryBuilder.select().all().from(KEYSPACE, TABLE);
ResultSet result = session.execute(selectQuery);
List<Integer> resultsList = result.all()
.stream()
.map(r -> r.getInt(0))
.collect(Collectors.toList());
dropRecords();
return resultsList.size();
}
private void dropRecords() {
Truncate query = QueryBuilder.truncate(KEYSPACE, TABLE);
session.execute(query);
}
@AfterAll
public static void shutdown() {
String dropKeyspace = "DROP KEYSPACE " + KEYSPACE;
String dropTable = "DROP TABLE IF EXISTS " + KEYSPACE + "." + TABLE;
session.execute(dropTable);
session.execute(dropKeyspace);
session.close();
cluster.close();
}
}

View File

@ -1,116 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.querybuilder.Insert;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.nifi.serialization.record.RecordFieldType;
import org.apache.nifi.serialization.record.RecordSchema;
import org.apache.nifi.util.Tuple;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.when;
public class PutCassandraRecordInsertTest {
private PutCassandraRecord testSubject;
@Mock
private RecordSchema schema;
private AutoCloseable mockCloseable;
@BeforeEach
public void setUp() {
mockCloseable = MockitoAnnotations.openMocks(this);
testSubject = new PutCassandraRecord();
}
@AfterEach
public void closeMock() throws Exception {
if (mockCloseable != null) {
mockCloseable.close();
}
}
@Test
public void testGenerateInsert() {
testGenerateInsert(
"keyspace.table",
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("integerField", 15),
new Tuple<>("longField", 67L),
new Tuple<>("stringField", "abcd")
),
Arrays.asList(
new Tuple<>("keyField", RecordFieldType.INT.getDataType()),
new Tuple<>("integerField", RecordFieldType.INT.getDataType()),
new Tuple<>("longField", RecordFieldType.LONG.getDataType()),
new Tuple<>("stringField", RecordFieldType.STRING.getDataType())
),
"INSERT INTO keyspace.table (keyField,integerField,longField,stringField) VALUES (1,15,67,'abcd');"
);
}
@Test
public void testGenerateInsertStringArray() {
testGenerateInsert(
"keyspace.table",
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("integerField", 15),
new Tuple<>("arrayField", new Object[]{"test1", "test2"})
),
Arrays.asList(
new Tuple<>("keyField", RecordFieldType.INT.getDataType()),
new Tuple<>("integerField", RecordFieldType.INT.getDataType()),
new Tuple<>("arrayField", RecordFieldType.ARRAY.getArrayDataType(RecordFieldType.STRING.getDataType()))
),
"INSERT INTO keyspace.table (keyField,integerField,arrayField) VALUES (1,15,['test1','test2']);"
);
}
private void testGenerateInsert(String table, List<Tuple<String, Object>> records, List<Tuple<String, org.apache.nifi.serialization.record.DataType>> recordSchema, String expected) {
Map<String, Object> recordContentMap = records.stream()
.collect(Collectors.toMap(Tuple::getKey, Tuple::getValue));
Map<String, Object> recordSchemaMap = recordSchema.stream()
.collect(Collectors.toMap(Tuple::getKey, Tuple::getValue));
List<String> fieldNames = records.stream().map(Tuple::getKey).collect(Collectors.toList());
when(schema.getFieldNames()).thenReturn(fieldNames);
when(schema.getDataType(anyString())).thenAnswer(i -> Optional.of(recordSchemaMap.get(i.getArgument(0))));
Insert actual = (Insert)testSubject.generateInsert(table, schema, recordContentMap);
actual.setForceNoValues(true);
// Codecs are normally registered in the onScheduled method
testSubject.registerAdditionalCodecs();
assertEquals(expected, actual.getQueryString());
}
}

View File

@ -1,552 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.serialization.record.MockRecordParser;
import org.apache.nifi.serialization.record.RecordField;
import org.apache.nifi.serialization.record.RecordFieldType;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.net.ssl.SSLContext;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class PutCassandraRecordTest {
private TestRunner testRunner;
private MockRecordParser recordReader;
@BeforeEach
public void setUp() throws Exception {
MockPutCassandraRecord processor = new MockPutCassandraRecord();
recordReader = new MockRecordParser();
testRunner = TestRunners.newTestRunner(processor);
testRunner.setProperty(PutCassandraRecord.RECORD_READER_FACTORY, "reader");
}
@Test
public void testProcessorConfigValidity() throws InitializationException {
testRunner.setProperty(PutCassandraRecord.CONTACT_POINTS, "localhost:9042");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.PASSWORD, "password");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.USERNAME, "username");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.CONSISTENCY_LEVEL, "SERIAL");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, "LOGGED");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.KEYSPACE, "sampleks");
testRunner.assertNotValid();
testRunner.setProperty(PutCassandraRecord.TABLE, "sampletbl");
testRunner.assertNotValid();
testRunner.addControllerService("reader", recordReader);
testRunner.enableControllerService(recordReader);
testRunner.assertValid();
}
private void setUpStandardTestConfig() throws InitializationException {
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "username");
testRunner.setProperty(PutCassandraRecord.CONSISTENCY_LEVEL, "SERIAL");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, "LOGGED");
testRunner.setProperty(PutCassandraRecord.TABLE, "sampleks.sampletbl");
testRunner.addControllerService("reader", recordReader);
testRunner.enableControllerService(recordReader);
}
@Test
public void testSimplePut() throws InitializationException {
setUpStandardTestConfig();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("sport", RecordFieldType.STRING);
recordReader.addRecord("John Doe", 48, "Soccer");
recordReader.addRecord("Jane Doe", 47, "Tennis");
recordReader.addRecord("Sally Doe", 47, "Curling");
recordReader.addRecord("Jimmy Doe", 14, null);
recordReader.addRecord("Pizza Doe", 14, null);
testRunner.enqueue("");
testRunner.run();
testRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, 1);
}
@Test
public void testStringArrayPut() throws InitializationException {
setUpStandardTestConfig();
recordReader.addSchemaField(new RecordField("names", RecordFieldType.ARRAY.getArrayDataType(RecordFieldType.STRING.getDataType())));
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addRecord(new Object[]{"John", "Doe"}, 1);
recordReader.addRecord(new Object[]{"John", "Doe"}, 2);
recordReader.addRecord(new Object[]{"John", "Doe"}, 3);
testRunner.enqueue("");
testRunner.run();
testRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, 1);
}
@Test
public void testSimpleUpdate() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.SET_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.INT);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 2L);
recordReader.addRecord("Sally Doe", 47, 0);
testRunner.enqueue("");
testRunner.run();
testRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, 1);
}
@Test
public void testUpdateInvalidFieldType() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.INCR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.STRING);
recordReader.addRecord("John Doe", 48,"1");
recordReader.addRecord("Jane Doe", 47, "1");
recordReader.addRecord("Sally Doe", 47, "1");
testRunner.enqueue("");
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testUpdateEmptyUpdateKeys() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.INCR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertNotValid();
}
@Test
public void testUpdateNullUpdateKeys() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.SET_TYPE);
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertNotValid();
}
@Test
public void testUpdateSetLoggedBatch() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.SET_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.LOGGED_TYPE);
testRunner.assertValid();
}
@Test
public void testUpdateCounterWrongBatchStatementType() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.INCR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.LOGGED_TYPE);
testRunner.assertNotValid();
}
@Test
public void testUpdateWithUpdateMethodAndKeyAttributes() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.UPDATE_METHOD_USE_ATTR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "${cql.update.keys}");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.update.method", "Increment");
attributes.put("cql.update.keys", "name,age");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testInsertWithStatementAttribute() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.STATEMENT_TYPE_USE_ATTR_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.statement.type", "Insert");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testInsertWithStatementAttributeInvalid() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.STATEMENT_TYPE_USE_ATTR_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.statement.type", "invalid-type");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testInsertWithBatchStatementAttribute() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.INSERT_TYPE);
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.BATCH_STATEMENT_TYPE_USE_ATTR_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.batch.statement.type", "counter");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testInsertWithBatchStatementAttributeInvalid() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.INSERT_TYPE);
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.BATCH_STATEMENT_TYPE_USE_ATTR_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.batch.statement.type", "invalid-type");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testUpdateWithAttributesInvalidUpdateMethod() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.UPDATE_METHOD_USE_ATTR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "${cql.update.keys}");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.INT);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.update.method", "invalid-method");
attributes.put("cql.update.keys", "name,age");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testUpdateWithAttributesIncompatibleBatchStatementType() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.INCR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.BATCH_STATEMENT_TYPE_USE_ATTR_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.INT);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
Map<String, String> attributes = new HashMap<>();
attributes.put("cql.batch.statement.type", "LOGGED");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testUpdateWithAttributesEmptyUpdateKeysAttribute() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.UPDATE_METHOD_USE_ATTR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "${cql.update.keys}");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
HashMap<String, String> attributes = new HashMap<>();
attributes.put("cql.update.method", "Increment");
attributes.put("cql.update.keys", "");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testUpdateWithAttributesEmptyUpdateMethodAttribute() throws InitializationException {
setUpStandardTestConfig();
testRunner.setProperty(PutCassandraRecord.STATEMENT_TYPE, PutCassandraRecord.UPDATE_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_METHOD, PutCassandraRecord.UPDATE_METHOD_USE_ATTR_TYPE);
testRunner.setProperty(PutCassandraRecord.UPDATE_KEYS, "name,age");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, PutCassandraRecord.COUNTER_TYPE);
testRunner.assertValid();
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("goals", RecordFieldType.LONG);
recordReader.addRecord("John Doe", 48, 1L);
recordReader.addRecord("Jane Doe", 47, 1L);
recordReader.addRecord("Sally Doe", 47, 1L);
HashMap<String, String> attributes = new HashMap<>();
attributes.put("cql.update.method", "");
testRunner.enqueue("", attributes);
testRunner.run();
testRunner.assertTransferCount(PutCassandraRecord.REL_FAILURE, 1);
testRunner.assertTransferCount(PutCassandraRecord.REL_SUCCESS, 0);
testRunner.assertTransferCount(PutCassandraRecord.REL_RETRY, 0);
}
@Test
public void testEL() throws InitializationException {
testRunner.setProperty(PutCassandraRecord.CONTACT_POINTS, "${contact.points}");
testRunner.setProperty(PutCassandraRecord.PASSWORD, "${pass}");
testRunner.setProperty(PutCassandraRecord.USERNAME, "${user}");
testRunner.setProperty(PutCassandraRecord.CONSISTENCY_LEVEL, "SERIAL");
testRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, "LOGGED");
testRunner.setProperty(PutCassandraRecord.TABLE, "sampleks.sampletbl");
testRunner.addControllerService("reader", recordReader);
testRunner.enableControllerService(recordReader);
testRunner.assertValid();
testRunner.setEnvironmentVariableValue("contact.points", "localhost:9042");
testRunner.setEnvironmentVariableValue("user", "username");
testRunner.setEnvironmentVariableValue("pass", "password");
recordReader.addSchemaField("name", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
recordReader.addSchemaField("sport", RecordFieldType.STRING);
recordReader.addRecord("John Doe", 48, "Soccer");
recordReader.addRecord("Jane Doe", 47, "Tennis");
recordReader.addRecord("Sally Doe", 47, "Curling");
recordReader.addRecord("Jimmy Doe", 14, null);
recordReader.addRecord("Pizza Doe", 14, null);
testRunner.enqueue("");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, 1);
}
private static class MockPutCassandraRecord extends PutCassandraRecord {
private Exception exceptionToThrow = null;
private Session mockSession = mock(Session.class);
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster mockCluster = mock(Cluster.class);
try {
Metadata mockMetadata = mock(Metadata.class);
when(mockMetadata.getClusterName()).thenReturn("cluster1");
when(mockCluster.getMetadata()).thenReturn(mockMetadata);
when(mockCluster.connect()).thenReturn(mockSession);
when(mockCluster.connect(anyString())).thenReturn(mockSession);
Configuration config = Configuration.builder().build();
when(mockCluster.getConfiguration()).thenReturn(config);
ResultSetFuture future = mock(ResultSetFuture.class);
ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
PreparedStatement ps = mock(PreparedStatement.class);
when(mockSession.prepare(anyString())).thenReturn(ps);
BoundStatement bs = mock(BoundStatement.class);
when(ps.bind()).thenReturn(bs);
when(future.getUninterruptibly()).thenReturn(rs);
try {
doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
} catch (TimeoutException te) {
throw new IllegalArgumentException("Mocked cluster doesn't time out");
}
if (exceptionToThrow != null) {
doThrow(exceptionToThrow).when(mockSession).executeAsync(anyString());
doThrow(exceptionToThrow).when(mockSession).executeAsync(any(Statement.class));
} else {
when(mockSession.executeAsync(anyString())).thenReturn(future);
when(mockSession.executeAsync(any(Statement.class))).thenReturn(future);
}
when(mockSession.getCluster()).thenReturn(mockCluster);
} catch (Exception e) {
fail(e.getMessage());
}
return mockCluster;
}
}
}

View File

@ -1,289 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Statement;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.nifi.serialization.record.RecordSchema;
import org.apache.nifi.util.Tuple;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class PutCassandraRecordUpdateTest {
private PutCassandraRecord testSubject;
private final RecordSchema schema = mock(RecordSchema.class);
@BeforeEach
public void setUp() {
testSubject = new PutCassandraRecord();
}
@Test
public void testGenerateUpdateWithEmptyKeyList() {
Stream.of("", ",", ",,,").forEach(updateKeys -> testGenerateUpdate(
"keyspace.table",
updateKeys,
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("stringField", "newStringValue")
),
new IllegalArgumentException("No Update Keys were specified")
));
}
@Test
public void testGenerateUpdateWithMissingKey() {
testGenerateUpdate(
"keyspace.table",
"keyField,missingKeyField",
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("stringField", "newStringValue")
),
new IllegalArgumentException("Update key 'missingKeyField' is not present in the record schema")
);
}
@Test
public void testGenerateUpdateWithInvalidUpdateMethod() {
testGenerateUpdate(
"keyspace.table",
"keyField",
"invalidUpdateMethod",
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("longField", 15L)
),
new IllegalArgumentException("Update Method 'invalidUpdateMethod' is not valid.")
);
}
@Test
public void testGenerateUpdateIncrementString() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("stringField", "15")
),
new IllegalArgumentException("Field 'stringField' is not of type Number")
);
}
@Test
public void testGenerateUpdateSimpleTableName() {
testGenerateUpdate(
"table",
"keyField1",
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField1", 1),
new Tuple<>("stringField", "newStringValue")
),
"UPDATE table SET stringField='newStringValue' WHERE keyField1=1;"
);
}
@Test
public void testGenerateUpdateKeyspacedTableName() {
testGenerateUpdate(
"keyspace.table",
"keyField1",
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField1", 1),
new Tuple<>("stringField", "newStringValue")
),
"UPDATE keyspace.table SET stringField='newStringValue' WHERE keyField1=1;"
);
}
@Test
public void testGenerateUpdateMultipleKeys() {
testGenerateUpdate(
"keyspace.table",
"keyField1,keyField2,keyField3",
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField1", 1),
new Tuple<>("keyField2", "key2"),
new Tuple<>("keyField3", 123L),
new Tuple<>("stringField", "newStringValue")
),
"UPDATE keyspace.table SET stringField='newStringValue' WHERE keyField1=1 AND keyField2='key2' AND keyField3=123;"
);
}
@Test
public void testGenerateUpdateIncrementLong() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("longField", 15L)
),
"UPDATE keyspace.table SET longField=longField+15 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateDecrementLong() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.DECR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("longField", 15L)
),
"UPDATE keyspace.table SET longField=longField-15 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateIncrementInteger() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("integerField", 15)
),
"UPDATE keyspace.table SET integerField=integerField+15 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateIncrementFloat() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("floatField", 15.05F)
),
"UPDATE keyspace.table SET floatField=floatField+15 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateIncrementDouble() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("doubleField", 15.05D)
),
"UPDATE keyspace.table SET doubleField=doubleField+15 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateSetMultipleValues() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.SET_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("stringField", "newStringValue"),
new Tuple<>("integerField", 15),
new Tuple<>("longField", 67L)
),
"UPDATE keyspace.table SET stringField='newStringValue',integerField=15,longField=67 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateIncrementMultipleValues() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.INCR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("integerField", 15),
new Tuple<>("longField", 67L)
),
"UPDATE keyspace.table SET integerField=integerField+15,longField=longField+67 WHERE keyField=1;"
);
}
@Test
public void testGenerateUpdateDecrementMultipleValues() {
testGenerateUpdate(
"keyspace.table",
"keyField",
PutCassandraRecord.DECR_TYPE.getValue(),
Arrays.asList(
new Tuple<>("keyField", 1),
new Tuple<>("integerField", 15),
new Tuple<>("longField", 67L)
),
"UPDATE keyspace.table SET integerField=integerField-15,longField=longField-67 WHERE keyField=1;"
);
}
private void testGenerateUpdate(String table, String updateKeys, String updateMethod, List<Tuple<String, Object>> records, String expected) {
Map<String, Object> recordContentMap = records.stream()
.collect(Collectors.toMap(Tuple::getKey, Tuple::getValue));
List<String> fieldNames = records.stream().map(Tuple::getKey).collect(Collectors.toList());
when(schema.getFieldNames()).thenReturn(fieldNames);
Statement actual = testSubject.generateUpdate(table, schema, updateKeys, updateMethod, recordContentMap);
assertEquals(expected, actual.toString());
}
private <E extends Exception> void testGenerateUpdate(String table, String updateKeys, String updateMethod, List<Tuple<String, Object>> records, E expected) {
Map<String, Object> recordContentMap = records.stream()
.collect(Collectors.toMap(Tuple::getKey, Tuple::getValue));
List<String> fieldNames = records.stream().map(Tuple::getKey).collect(Collectors.toList());
when(schema.getFieldNames()).thenReturn(fieldNames);
try {
testSubject.generateUpdate("keyspace.table", schema, updateKeys, updateMethod, recordContentMap);
fail();
} catch (Exception e) {
assertEquals(expected.getClass(), e.getClass());
assertEquals(expected.getMessage(), e.getMessage());
}
}
}

View File

@ -1,183 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Truncate;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.serialization.record.MockRecordParser;
import org.apache.nifi.serialization.record.RecordFieldType;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.testcontainers.containers.CassandraContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
@Testcontainers
public class QueryCassandraIT {
@Container
private static final CassandraContainer CASSANDRA_CONTAINER = new CassandraContainer(DockerImageName.parse("cassandra:4.1"));
private static TestRunner putCassandraTestRunner;
private static TestRunner queryCassandraTestRunner;
private static MockRecordParser recordReader;
private static Cluster cluster;
private static Session session;
private static final int LOAD_FLOW_FILE_SIZE = 100;
private static final int LOAD_FLOW_FILE_BATCH_SIZE = 10;
private static final String KEYSPACE = "sample_keyspace";
private static final String TABLE = "sample_table";
@BeforeAll
public static void setup() throws InitializationException {
recordReader = new MockRecordParser();
putCassandraTestRunner = TestRunners.newTestRunner(PutCassandraRecord.class);
queryCassandraTestRunner = TestRunners.newTestRunner(QueryCassandra.class);
InetSocketAddress contactPoint = CASSANDRA_CONTAINER.getContactPoint();
putCassandraTestRunner.setProperty(PutCassandraRecord.RECORD_READER_FACTORY, "reader");
putCassandraTestRunner.setProperty(PutCassandraRecord.CONTACT_POINTS, contactPoint.getHostString() + ":" + contactPoint.getPort());
putCassandraTestRunner.setProperty(PutCassandraRecord.KEYSPACE, KEYSPACE);
putCassandraTestRunner.setProperty(PutCassandraRecord.TABLE, TABLE);
putCassandraTestRunner.setProperty(PutCassandraRecord.CONSISTENCY_LEVEL, "SERIAL");
putCassandraTestRunner.setProperty(PutCassandraRecord.BATCH_STATEMENT_TYPE, "LOGGED");
putCassandraTestRunner.addControllerService("reader", recordReader);
putCassandraTestRunner.enableControllerService(recordReader);
queryCassandraTestRunner.setProperty(QueryCassandra.CONTACT_POINTS, contactPoint.getHostName() + ":" + contactPoint.getPort());
queryCassandraTestRunner.setProperty(QueryCassandra.FETCH_SIZE, "10");
queryCassandraTestRunner.setProperty(QueryCassandra.OUTPUT_BATCH_SIZE, "10");
queryCassandraTestRunner.setProperty(QueryCassandra.KEYSPACE, KEYSPACE);
queryCassandraTestRunner.setProperty(QueryCassandra.CQL_SELECT_QUERY, "select * from " + TABLE + ";");
cluster = Cluster.builder().addContactPoint(contactPoint.getHostName())
.withPort(contactPoint.getPort()).build();
session = cluster.connect();
String createKeyspace = "CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = {'class':'SimpleStrategy','replication_factor':1};";
String createTable = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + "." + TABLE + "(id int PRIMARY KEY, uuid text, age int);";
session.execute(createKeyspace);
session.execute(createTable);
loadData();
}
private static void loadData() {
recordReader.addSchemaField("id", RecordFieldType.INT);
recordReader.addSchemaField("uuid", RecordFieldType.STRING);
recordReader.addSchemaField("age", RecordFieldType.INT);
int recordCount = 0;
for (int i = 0; i<LOAD_FLOW_FILE_SIZE; i++) {
for (int j = 0; j<LOAD_FLOW_FILE_BATCH_SIZE; j++) {
recordCount++;
recordReader.addRecord(recordCount, UUID.randomUUID().toString(),
ThreadLocalRandom.current().nextInt(0, 101));
}
putCassandraTestRunner.enqueue("");
putCassandraTestRunner.run();
}
putCassandraTestRunner.assertAllFlowFilesTransferred(PutCassandraRecord.REL_SUCCESS, LOAD_FLOW_FILE_SIZE);
assertEquals(LOAD_FLOW_FILE_SIZE*LOAD_FLOW_FILE_BATCH_SIZE, getRecordsCount());
}
@Test
public void testSimpleQuery() {
queryCassandraTestRunner.enqueue("");
queryCassandraTestRunner.run();
Assertions.assertEquals(1, queryCassandraTestRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS).size());
queryCassandraTestRunner.clearTransferState();
}
@Test
public void testWithoutBatchSize() {
queryCassandraTestRunner.removeProperty(QueryCassandra.OUTPUT_BATCH_SIZE);
queryCassandraTestRunner.enqueue("");
queryCassandraTestRunner.run();
Assertions.assertEquals(1, queryCassandraTestRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS).size());
queryCassandraTestRunner.clearTransferState();
}
@Test
public void testWithMaxRowsPerFlowFile() {
queryCassandraTestRunner.setProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE, "10");
queryCassandraTestRunner.enqueue("");
queryCassandraTestRunner.run();
Assertions.assertEquals(100, queryCassandraTestRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS).size());
queryCassandraTestRunner.clearTransferState();
}
@Test
public void testWithDefaults() {
queryCassandraTestRunner.removeProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE);
queryCassandraTestRunner.removeProperty(QueryCassandra.OUTPUT_BATCH_SIZE);
queryCassandraTestRunner.removeProperty(QueryCassandra.FETCH_SIZE);
queryCassandraTestRunner.enqueue("");
queryCassandraTestRunner.run();
Assertions.assertEquals(1, queryCassandraTestRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS).size());
queryCassandraTestRunner.clearTransferState();
}
private static int getRecordsCount() {
Select selectQuery = QueryBuilder.select().all().from(KEYSPACE, TABLE);
ResultSet result = session.execute(selectQuery);
List<Integer> resultsList = result.all()
.stream()
.map(r -> r.getInt(0))
.collect(Collectors.toList());
return resultsList.size();
}
private void dropRecords() {
Truncate query = QueryBuilder.truncate(KEYSPACE, TABLE);
session.execute(query);
}
@AfterAll
public static void shutdown() {
String dropKeyspace = "DROP KEYSPACE " + KEYSPACE;
String dropTable = "DROP TABLE IF EXISTS " + KEYSPACE + "." + TABLE;
session.execute(dropTable);
session.execute(dropKeyspace);
session.close();
cluster.close();
}
}

View File

@ -1,594 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SniEndPoint;
import com.datastax.driver.core.exceptions.InvalidQueryException;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import com.datastax.driver.core.exceptions.ReadTimeoutException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.avro.Schema;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.util.MockFlowFile;
import org.apache.nifi.util.MockProcessContext;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.net.ssl.SSLContext;
import java.io.ByteArrayOutputStream;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class QueryCassandraTest {
private TestRunner testRunner;
private MockQueryCassandra processor;
@BeforeEach
public void setUp() {
processor = new MockQueryCassandra();
testRunner = TestRunners.newTestRunner(processor);
}
@Test
public void testProcessorConfigValid() {
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.assertNotValid();
testRunner.setProperty(QueryCassandra.CQL_SELECT_QUERY, "select * from test");
testRunner.assertValid();
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.assertNotValid();
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "username");
testRunner.assertValid();
testRunner.setProperty(QueryCassandra.TIMESTAMP_FORMAT_PATTERN, "invalid format");
testRunner.assertNotValid();
testRunner.setProperty(QueryCassandra.TIMESTAMP_FORMAT_PATTERN, "yyyy-MM-dd HH:mm:ss.SSSZ");
testRunner.assertValid();
}
@Test
public void testProcessorELConfigValid() {
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "${hosts}");
testRunner.setProperty(QueryCassandra.CQL_SELECT_QUERY, "${query}");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "${pass}");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "${user}");
testRunner.assertValid();
}
@Test
public void testProcessorNoInputFlowFileAndExceptions() {
setUpStandardProcessorConfig();
// Test no input flowfile
testRunner.setIncomingConnection(false);
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_SUCCESS, 1);
testRunner.clearTransferState();
// Test exceptions
processor.setExceptionToThrow(new NoHostAvailableException(new HashMap<>()));
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_RETRY, 1);
testRunner.clearTransferState();
processor.setExceptionToThrow(
new ReadTimeoutException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), ConsistencyLevel.ANY, 0, 1, false));
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_RETRY, 1);
testRunner.clearTransferState();
processor.setExceptionToThrow(
new InvalidQueryException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), "invalid query"));
testRunner.run(1, true, true);
// No files transferred to failure if there was no incoming connection
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_FAILURE, 0);
testRunner.clearTransferState();
processor.setExceptionToThrow(new ProcessException());
testRunner.run(1, true, true);
// No files transferred to failure if there was no incoming connection
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_FAILURE, 0);
testRunner.clearTransferState();
processor.setExceptionToThrow(null);
}
@Test
public void testProcessorJsonOutput() {
setUpStandardProcessorConfig();
testRunner.setIncomingConnection(false);
// Test JSON output
testRunner.setProperty(QueryCassandra.OUTPUT_FORMAT, QueryCassandra.JSON_FORMAT);
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_SUCCESS, 1);
List<MockFlowFile> files = testRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS);
assertNotNull(files);
assertEquals(1, files.size(), "One file should be transferred to success");
assertEquals("{\"results\":[{\"user_id\":\"user1\",\"first_name\":\"Joe\",\"last_name\":\"Smith\","
+ "\"emails\":[\"jsmith@notareal.com\"],\"top_places\":[\"New York, NY\",\"Santa Clara, CA\"],"
+ "\"todo\":{\"2016-01-03 05:00:00+0000\":\"Set my alarm \\\"for\\\" a month from now\"},"
+ "\"registered\":\"false\",\"scale\":1.0,\"metric\":2.0},"
+ "{\"user_id\":\"user2\",\"first_name\":\"Mary\",\"last_name\":\"Jones\","
+ "\"emails\":[\"mjones@notareal.com\"],\"top_places\":[\"Orlando, FL\"],"
+ "\"todo\":{\"2016-02-03 05:00:00+0000\":\"Get milk and bread\"},"
+ "\"registered\":\"true\",\"scale\":3.0,\"metric\":4.0}]}",
new String(files.get(0).toByteArray()));
}
@Test
public void testProcessorJsonOutputFragmentAttributes() {
processor = new MockQueryCassandraTwoRounds();
testRunner = TestRunners.newTestRunner(processor);
setUpStandardProcessorConfig();
testRunner.setIncomingConnection(false);
testRunner.setProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE, "1");
// Test JSON output
testRunner.setProperty(QueryCassandra.OUTPUT_FORMAT, QueryCassandra.JSON_FORMAT);
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_SUCCESS, 2);
List<MockFlowFile> files = testRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS);
assertNotNull(files);
assertEquals(2, files.size(), "Two files should be transferred to success");
String indexIdentifier = null;
for (int i = 0; i < files.size(); i++) {
MockFlowFile flowFile = files.get(i);
flowFile.assertAttributeEquals(QueryCassandra.FRAGMENT_INDEX, String.valueOf(i));
if (indexIdentifier == null) {
indexIdentifier = flowFile.getAttribute(QueryCassandra.FRAGMENT_ID);
} else {
flowFile.assertAttributeEquals(QueryCassandra.FRAGMENT_ID, indexIdentifier);
}
flowFile.assertAttributeEquals(QueryCassandra.FRAGMENT_COUNT, String.valueOf(files.size()));
}
}
@Test
public void testProcessorELConfigJsonOutput() {
setUpStandardProcessorConfig();
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "${hosts}");
testRunner.setProperty(QueryCassandra.CQL_SELECT_QUERY, "${query}");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "${pass}");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "${user}");
testRunner.setProperty(AbstractCassandraProcessor.CHARSET, "${charset}");
testRunner.setProperty(QueryCassandra.QUERY_TIMEOUT, "${timeout}");
testRunner.setProperty(QueryCassandra.FETCH_SIZE, "${fetch}");
testRunner.setProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE, "${max-rows-per-flow}");
testRunner.setIncomingConnection(false);
testRunner.assertValid();
testRunner.setEnvironmentVariableValue("hosts", "localhost:9042");
testRunner.setEnvironmentVariableValue("user", "username");
testRunner.setEnvironmentVariableValue("pass", "password");
testRunner.setEnvironmentVariableValue("charset", "UTF-8");
testRunner.setEnvironmentVariableValue("timeout", "30 sec");
testRunner.setEnvironmentVariableValue("fetch", "0");
testRunner.setEnvironmentVariableValue("max-rows-per-flow", "0");
// Test JSON output
testRunner.setProperty(QueryCassandra.OUTPUT_FORMAT, QueryCassandra.JSON_FORMAT);
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_SUCCESS, 1);
List<MockFlowFile> files = testRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS);
assertNotNull(files);
assertEquals(1, files.size(), "One file should be transferred to success");
assertEquals("{\"results\":[{\"user_id\":\"user1\",\"first_name\":\"Joe\",\"last_name\":\"Smith\","
+ "\"emails\":[\"jsmith@notareal.com\"],\"top_places\":[\"New York, NY\",\"Santa Clara, CA\"],"
+ "\"todo\":{\"2016-01-03 05:00:00+0000\":\"Set my alarm \\\"for\\\" a month from now\"},"
+ "\"registered\":\"false\",\"scale\":1.0,\"metric\":2.0},"
+ "{\"user_id\":\"user2\",\"first_name\":\"Mary\",\"last_name\":\"Jones\","
+ "\"emails\":[\"mjones@notareal.com\"],\"top_places\":[\"Orlando, FL\"],"
+ "\"todo\":{\"2016-02-03 05:00:00+0000\":\"Get milk and bread\"},"
+ "\"registered\":\"true\",\"scale\":3.0,\"metric\":4.0}]}",
new String(files.get(0).toByteArray()));
}
@Test
public void testProcessorJsonOutputWithQueryTimeout() {
setUpStandardProcessorConfig();
testRunner.setProperty(QueryCassandra.QUERY_TIMEOUT, "5 sec");
testRunner.setIncomingConnection(false);
// Test JSON output
testRunner.setProperty(QueryCassandra.OUTPUT_FORMAT, QueryCassandra.JSON_FORMAT);
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(QueryCassandra.REL_SUCCESS, 1);
List<MockFlowFile> files = testRunner.getFlowFilesForRelationship(QueryCassandra.REL_SUCCESS);
assertNotNull(files);
assertEquals(1, files.size(), "One file should be transferred to success");
}
@Test
public void testProcessorEmptyFlowFile() {
setUpStandardProcessorConfig();
// Run with empty flowfile
testRunner.setIncomingConnection(true);
processor.setExceptionToThrow(null);
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_SUCCESS, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorEmptyFlowFileMaxRowsPerFlowFileEqOne() {
processor = new MockQueryCassandraTwoRounds();
testRunner = TestRunners.newTestRunner(processor);
setUpStandardProcessorConfig();
testRunner.setIncomingConnection(true);
testRunner.setProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE, "1");
processor.setExceptionToThrow(null);
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_SUCCESS, 2);
testRunner.clearTransferState();
}
@Test
public void testProcessorEmptyFlowFileAndNoHostAvailableException() {
setUpStandardProcessorConfig();
// Test exceptions
processor.setExceptionToThrow(new NoHostAvailableException(new HashMap<>()));
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_RETRY, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorEmptyFlowFileAndInetSocketAddressConsistencyLevelANY() {
setUpStandardProcessorConfig();
processor.setExceptionToThrow(
new ReadTimeoutException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), ConsistencyLevel.ANY, 0, 1, false));
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_RETRY, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorEmptyFlowFileAndInetSocketAddressDefault() {
setUpStandardProcessorConfig();
processor.setExceptionToThrow(
new InvalidQueryException(new SniEndPoint(new InetSocketAddress("localhost", 9042), ""), "invalid query"));
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_FAILURE, 1);
testRunner.clearTransferState();
}
@Test
public void testProcessorEmptyFlowFileAndExceptionsProcessException() {
setUpStandardProcessorConfig();
processor.setExceptionToThrow(new ProcessException());
testRunner.enqueue("".getBytes());
testRunner.run(1, true, true);
testRunner.assertTransferCount(QueryCassandra.REL_FAILURE, 1);
}
@Test
public void testCreateSchemaOneColumn() throws Exception {
ResultSet rs = CassandraQueryTestUtil.createMockResultSetOneColumn();
Schema schema = QueryCassandra.createSchema(rs);
assertNotNull(schema);
assertEquals(schema.getName(), "users");
}
@Test
public void testCreateSchema() throws Exception {
ResultSet rs = CassandraQueryTestUtil.createMockResultSet(true);
Schema schema = QueryCassandra.createSchema(rs);
assertNotNull(schema);
assertEquals(Schema.Type.RECORD, schema.getType());
// Check record fields, starting with user_id
Schema.Field field = schema.getField("user_id");
assertNotNull(field);
Schema fieldSchema = field.schema();
Schema.Type type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, fieldSchema.getTypes().get(1).getType());
field = schema.getField("first_name");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, fieldSchema.getTypes().get(1).getType());
field = schema.getField("last_name");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, fieldSchema.getTypes().get(1).getType());
field = schema.getField("emails");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
// Should be a union of null and array
assertEquals(Schema.Type.UNION, type);
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.ARRAY, fieldSchema.getTypes().get(1).getType());
Schema arraySchema = fieldSchema.getTypes().get(1);
// Assert individual array element types are unions of null and String
Schema elementSchema = arraySchema.getElementType();
assertEquals(Schema.Type.UNION, elementSchema.getType());
assertEquals(Schema.Type.NULL, elementSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, elementSchema.getTypes().get(1).getType());
field = schema.getField("top_places");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
// Should be a union of null and array
assertEquals(Schema.Type.UNION, type);
assertEquals(Schema.Type.ARRAY, fieldSchema.getTypes().get(1).getType());
arraySchema = fieldSchema.getTypes().get(1);
// Assert individual array element types are unions of null and String
elementSchema = arraySchema.getElementType();
assertEquals(Schema.Type.UNION, elementSchema.getType());
assertEquals(Schema.Type.NULL, elementSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, elementSchema.getTypes().get(1).getType());
field = schema.getField("todo");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
// Should be a union of null and map
assertEquals(Schema.Type.UNION, type);
assertEquals(Schema.Type.MAP, fieldSchema.getTypes().get(1).getType());
Schema mapSchema = fieldSchema.getTypes().get(1);
// Assert individual map value types are unions of null and String
Schema valueSchema = mapSchema.getValueType();
assertEquals(Schema.Type.NULL, valueSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.STRING, valueSchema.getTypes().get(1).getType());
field = schema.getField("registered");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.BOOLEAN, fieldSchema.getTypes().get(1).getType());
field = schema.getField("scale");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.FLOAT, fieldSchema.getTypes().get(1).getType());
field = schema.getField("metric");
assertNotNull(field);
fieldSchema = field.schema();
type = fieldSchema.getType();
assertEquals(Schema.Type.UNION, type);
// Assert individual union types, first is null
assertEquals(Schema.Type.NULL, fieldSchema.getTypes().get(0).getType());
assertEquals(Schema.Type.DOUBLE, fieldSchema.getTypes().get(1).getType());
}
@Test
public void testConvertToAvroStream() throws Exception {
processor = new MockQueryCassandraTwoRounds();
testRunner = TestRunners.newTestRunner(processor);
setUpStandardProcessorConfig();
ResultSet rs = CassandraQueryTestUtil.createMockResultSet(false);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
long numberOfRows = QueryCassandra.convertToAvroStream(rs, 0, baos, 0, null);
assertEquals(2, numberOfRows);
}
@Test
public void testConvertToJSONStream() throws Exception {
setUpStandardProcessorConfig();
ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
long numberOfRows = QueryCassandra.convertToJsonStream(rs, 0, baos, StandardCharsets.UTF_8,
0, null);
assertEquals(2, numberOfRows);
}
@Test
public void testDefaultDateFormatInConvertToJSONStream() throws Exception {
ResultSet rs = CassandraQueryTestUtil.createMockDateResultSet();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final DateTimeFormatter formatter = DateTimeFormatter.ofPattern(QueryCassandra.TIMESTAMP_FORMAT_PATTERN.getDefaultValue());
long numberOfRows = QueryCassandra.convertToJsonStream(Optional.of(testRunner.getProcessContext()), rs, 0, baos,
StandardCharsets.UTF_8, 0, null);
assertEquals(1, numberOfRows);
Map<String, List<Map<String, String>>> map = new ObjectMapper().readValue(baos.toByteArray(), HashMap.class);
String date = map.get("results").get(0).get("date");
assertEquals(formatter.format(CassandraQueryTestUtil.TEST_DATE.toInstant().atOffset(ZoneOffset.UTC)), date);
}
@Test
public void testCustomDateFormatInConvertToJSONStream() throws Exception {
MockProcessContext context = (MockProcessContext) testRunner.getProcessContext();
ResultSet rs = CassandraQueryTestUtil.createMockDateResultSet();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final String customDateFormat = "yyyy-MM-dd HH:mm:ss.SSSZ";
context.setProperty(QueryCassandra.TIMESTAMP_FORMAT_PATTERN, customDateFormat);
final DateTimeFormatter formatter = DateTimeFormatter.ofPattern(customDateFormat);
long numberOfRows = QueryCassandra.convertToJsonStream(Optional.of(context), rs, 0, baos, StandardCharsets.UTF_8, 0, null);
assertEquals(1, numberOfRows);
Map<String, List<Map<String, String>>> map = new ObjectMapper().readValue(baos.toByteArray(), HashMap.class);
String date = map.get("results").get(0).get("date");
assertEquals(formatter.format(CassandraQueryTestUtil.TEST_DATE.toInstant().atOffset(ZoneOffset.UTC)), date);
}
private void setUpStandardProcessorConfig() {
testRunner.setProperty(AbstractCassandraProcessor.CONSISTENCY_LEVEL, "ONE");
testRunner.setProperty(AbstractCassandraProcessor.CONTACT_POINTS, "localhost:9042");
testRunner.setProperty(QueryCassandra.CQL_SELECT_QUERY, "select * from test");
testRunner.setProperty(AbstractCassandraProcessor.PASSWORD, "password");
testRunner.setProperty(AbstractCassandraProcessor.USERNAME, "username");
testRunner.setProperty(QueryCassandra.MAX_ROWS_PER_FLOW_FILE, "0");
}
/**
* Provides a stubbed processor instance for testing
*/
private static class MockQueryCassandra extends QueryCassandra {
private Exception exceptionToThrow = null;
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster mockCluster = mock(Cluster.class);
try {
Metadata mockMetadata = mock(Metadata.class);
when(mockMetadata.getClusterName()).thenReturn("cluster1");
when(mockCluster.getMetadata()).thenReturn(mockMetadata);
Session mockSession = mock(Session.class);
when(mockCluster.connect()).thenReturn(mockSession);
when(mockCluster.connect(anyString())).thenReturn(mockSession);
Configuration config = Configuration.builder().build();
when(mockCluster.getConfiguration()).thenReturn(config);
ResultSetFuture future = mock(ResultSetFuture.class);
ResultSet rs = CassandraQueryTestUtil.createMockResultSet(false);
when(future.getUninterruptibly()).thenReturn(rs);
try {
doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
} catch (TimeoutException te) {
throw new IllegalArgumentException("Mocked cluster doesn't time out");
}
if (exceptionToThrow != null) {
when(mockSession.execute(anyString(), any(), any())).thenThrow(exceptionToThrow);
when(mockSession.execute(anyString())).thenThrow(exceptionToThrow);
} else {
when(mockSession.execute(anyString(),any(), any())).thenReturn(rs);
when(mockSession.execute(anyString())).thenReturn(rs);
}
} catch (Exception e) {
fail(e.getMessage());
}
return mockCluster;
}
public void setExceptionToThrow(Exception e) {
this.exceptionToThrow = e;
}
}
private static class MockQueryCassandraTwoRounds extends MockQueryCassandra {
private Exception exceptionToThrow = null;
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
String username, String password, String compressionType) {
Cluster mockCluster = mock(Cluster.class);
try {
Metadata mockMetadata = mock(Metadata.class);
when(mockMetadata.getClusterName()).thenReturn("cluster1");
when(mockCluster.getMetadata()).thenReturn(mockMetadata);
Session mockSession = mock(Session.class);
when(mockCluster.connect()).thenReturn(mockSession);
when(mockCluster.connect(anyString())).thenReturn(mockSession);
Configuration config = Configuration.builder().build();
when(mockCluster.getConfiguration()).thenReturn(config);
ResultSetFuture future = mock(ResultSetFuture.class);
ResultSet rs = CassandraQueryTestUtil.createMockResultSet(true);
when(future.getUninterruptibly()).thenReturn(rs);
try {
doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
} catch (TimeoutException te) {
throw new IllegalArgumentException("Mocked cluster doesn't time out");
}
if (exceptionToThrow != null) {
when(mockSession.execute(anyString(), any(), any())).thenThrow(exceptionToThrow);
when(mockSession.execute(anyString())).thenThrow(exceptionToThrow);
} else {
when(mockSession.execute(anyString(),any(), any())).thenReturn(rs);
when(mockSession.execute(anyString())).thenReturn(rs);
}
} catch (Exception e) {
fail(e.getMessage());
}
return mockCluster;
}
public void setExceptionToThrow(Exception e) {
this.exceptionToThrow = e;
}
}
}

View File

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>nifi-cassandra-bundle</artifactId>
<groupId>org.apache.nifi</groupId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>nifi-cassandra-services-api-nar</artifactId>
<packaging>nar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-standard-shared-nar</artifactId>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
</dependency>
</dependencies>
</project>

View File

@ -1,266 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
APACHE NIFI SUBCOMPONENTS:
The Apache NiFi project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
This product bundles 'asm' which is available under a 3-Clause BSD style license.
For details see http://asm.ow2.org/asmdex-license.html
Copyright (c) 2012 France Télécom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
The binary distribution of this product bundles 'JNR x86asm' under an MIT
style license.
Copyright (C) 2010 Wayne Meissner
Copyright (c) 2008-2009, Petr Kobalicek <kobalicek.petr@gmail.com>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,227 +0,0 @@
nifi-cassandra-services-api-nar
Copyright 2016-2020 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
******************
Apache Software License v2
******************
(ASLv2) The Netty Project
The following NOTICE information applies:
Copyright 2014 The Netty Project
-------------------------------------------------------------------------------
This product contains the extensions to Java Collections Framework which has
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
* LICENSE:
* license/LICENSE.jsr166y.txt (Public Domain)
* HOMEPAGE:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
This product contains a modified version of Robert Harder's Public Domain
Base64 Encoder and Decoder, which can be obtained at:
* LICENSE:
* license/LICENSE.base64.txt (Public Domain)
* HOMEPAGE:
* http://iharder.sourceforge.net/current/java/base64/
This product contains a modified portion of 'Webbit', an event based
WebSocket and HTTP server, which can be obtained at:
* LICENSE:
* license/LICENSE.webbit.txt (BSD License)
* HOMEPAGE:
* https://github.com/joewalnes/webbit
This product contains a modified portion of 'SLF4J', a simple logging
facade for Java, which can be obtained at:
* LICENSE:
* license/LICENSE.slf4j.txt (MIT License)
* HOMEPAGE:
* http://www.slf4j.org/
This product contains a modified portion of 'Apache Harmony', an open source
Java SE, which can be obtained at:
* LICENSE:
* license/LICENSE.harmony.txt (Apache License 2.0)
* HOMEPAGE:
* http://archive.apache.org/dist/harmony/
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
and decompression library written by Matthew J. Francis. It can be obtained at:
* LICENSE:
* license/LICENSE.jbzip2.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jbzip2/
This product contains a modified portion of 'libdivsufsort', a C API library to construct
the suffix array and the Burrows-Wheeler transformed string for any input string of
a constant-size alphabet written by Yuta Mori. It can be obtained at:
* LICENSE:
* license/LICENSE.libdivsufsort.txt (MIT License)
* HOMEPAGE:
* https://github.com/y-256/libdivsufsort
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
which can be obtained at:
* LICENSE:
* license/LICENSE.jctools.txt (ASL2 License)
* HOMEPAGE:
* https://github.com/JCTools/JCTools
This product optionally depends on 'JZlib', a re-implementation of zlib in
pure Java, which can be obtained at:
* LICENSE:
* license/LICENSE.jzlib.txt (BSD style License)
* HOMEPAGE:
* http://www.jcraft.com/jzlib/
This product optionally depends on 'Compress-LZF', a Java library for encoding and
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
* LICENSE:
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/ning/compress
This product optionally depends on 'lz4', a LZ4 Java compression
and decompression library written by Adrien Grand. It can be obtained at:
* LICENSE:
* license/LICENSE.lz4.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jpountz/lz4-java
This product optionally depends on 'lzma-java', a LZMA Java compression
and decompression library, which can be obtained at:
* LICENSE:
* license/LICENSE.lzma-java.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jponge/lzma-java
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
and decompression library written by William Kinney. It can be obtained at:
* LICENSE:
* license/LICENSE.jfastlz.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jfastlz/
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
interchange format, which can be obtained at:
* LICENSE:
* license/LICENSE.protobuf.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/protobuf
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
a temporary self-signed X.509 certificate when the JVM does not provide the
equivalent functionality. It can be obtained at:
* LICENSE:
* license/LICENSE.bouncycastle.txt (MIT License)
* HOMEPAGE:
* http://www.bouncycastle.org/
This product optionally depends on 'Snappy', a compression library produced
by Google Inc, which can be obtained at:
* LICENSE:
* license/LICENSE.snappy.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/snappy
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
* LICENSE:
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
* HOMEPAGE:
* http://www.jboss.org/jbossmarshalling
This product optionally depends on 'Caliper', Google's micro-
benchmarking framework, which can be obtained at:
* LICENSE:
* license/LICENSE.caliper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/caliper
This product optionally depends on 'Apache Log4J', a logging framework, which
can be obtained at:
* LICENSE:
* license/LICENSE.log4j.txt (Apache License 2.0)
* HOMEPAGE:
* http://logging.apache.org/log4j/
This product optionally depends on 'Aalto XML', an ultra-high performance
non-blocking XML processor, which can be obtained at:
* LICENSE:
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
* HOMEPAGE:
* http://wiki.fasterxml.com/AaltoHome
This product contains a modified version of 'HPACK', a Java implementation of
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
* LICENSE:
* license/LICENSE.hpack.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/twitter/hpack
This product contains a modified portion of 'Apache Commons Lang', a Java library
provides utilities for the java.lang API, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-lang.txt (Apache License 2.0)
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains a forked and modified version of Tomcat Native
* LICENSE:
* ASL2
* HOMEPAGE:
* http://tomcat.apache.org/native-doc/
* https://svn.apache.org/repos/asf/tomcat/native/
(ASLv2) Guava
The following NOTICE information applies:
Guava
Copyright 2015 The Guava Authors
(ASLv2) Dropwizard Metrics
The following NOTICE information applies:
Copyright (c) 2010-2013 Coda Hale, Yammer.com
This product includes software developed by Coda Hale and Yammer, Inc.
This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
LongAdder), which was released with the following comments:
Written by Doug Lea with assistance from members of JCP JSR-166
Expert Group and released to the public domain, as explained at
http://creativecommons.org/publicdomain/zero/1.0/
************************
Eclipse Public License 1.0
************************
The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
(EPL 2.0)(GPL 2)(LGPL 2.1) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt

View File

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>nifi-cassandra-bundle</artifactId>
<groupId>org.apache.nifi</groupId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>nifi-cassandra-services-api</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>${cassandra.sdk.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -1,35 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Session;
import org.apache.nifi.controller.ControllerService;
public interface CassandraSessionProviderService extends ControllerService {
/**
* Obtains a Cassandra session instance
* @return {@link Session}
*/
Session getCassandraSession();
/**
* Obtains a Cassandra cluster instance
* @return {@link Cluster}
*/
Cluster getCluster();
}

View File

@ -1,56 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>nifi-cassandra-bundle</artifactId>
<groupId>org.apache.nifi</groupId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>nifi-cassandra-services-nar</artifactId>
<packaging>nar</packaging>
<dependencyManagement>
<dependencies>
<!-- Provided in nifi-cassandra-services-api-nar -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api-nar</artifactId>
<version>2.0.0-SNAPSHOT</version>
<type>nar</type>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-distributedmapcache-service</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>

View File

@ -1,352 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
APACHE NIFI SUBCOMPONENTS:
The Apache NiFi project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
This product bundles 'libffi' which is available under an MIT style license.
libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
see https://github.com/java-native-access/jna/blob/master/native/libffi/LICENSE
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This product bundles 'asm' which is available under a 3-Clause BSD style license.
For details see http://asm.ow2.org/asmdex-license.html
Copyright (c) 2012 France Télécom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
The binary distribution of this product bundles 'Bouncy Castle JDK 1.5'
under an MIT style license.
Copyright (c) 2000 - 2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
The binary distribution of this product bundles 'JNR x86asm' under an MIT
style license.
Copyright (C) 2010 Wayne Meissner
Copyright (c) 2008-2009, Petr Kobalicek <kobalicek.petr@gmail.com>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
This product bundles 'logback' which is dual-licensed under the EPL v1.0
and the LGPL 2.1.
Logback: the reliable, generic, fast and flexible logging framework.
Copyright (C) 1999-2017, QOS.ch. All rights reserved.
This program and the accompanying materials are dual-licensed under
either the terms of the Eclipse Public License v1.0 as published by
the Eclipse Foundation or (per the licensee's choosing) under the
terms of the GNU Lesser General Public License version 2.1 as
published by the Free Software Foundation.
The binary distribution of this product bundles 'ANTLR 3' which is available
under a "3-clause BSD" license. For details see http://www.antlr.org/license.html
Copyright (c) 2012 Terence Parr and Sam Harwell
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name of the author nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,292 +0,0 @@
nifi-cassandra-services-nar
Copyright 2016-2020 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
******************
Apache Software License v2
******************
The following binary components are provided under the Apache Software License v2
(ASLv2) DataStax Java Driver for Apache Cassandra - Core
The following NOTICE information applies:
DataStax Java Driver for Apache Cassandra - Core
Copyright (C) 2012-2017 DataStax Inc.
(ASLv2) Jackson JSON processor
The following NOTICE information applies:
# Jackson JSON processor
Jackson is a high-performance, Free/Open Source JSON processing library.
It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
been in development since 2007.
It is currently developed by a community of developers, as well as supported
commercially by FasterXML.com.
## Licensing
Jackson core and extension components may licensed under different licenses.
To find the details that apply to this artifact see the accompanying LICENSE file.
For more information, including possible other licensing options, contact
FasterXML.com (http://fasterxml.com).
## Credits
A list of contributors may be found from CREDITS file, which is included
in some artifacts (usually source distributions); but is always available
from the source code management (SCM) system project uses.
(ASLv2) Apache Commons Codec
The following NOTICE information applies:
Apache Commons Codec
Copyright 2002-2014 The Apache Software Foundation
src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
contains test data from http://aspell.net/test/orig/batch0.tab.
Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
===============================================================================
The content of package org.apache.commons.codec.language.bm has been translated
from the original php source code available at http://stevemorse.org/phoneticinfo.htm
with permission from the original authors.
Original source copyright:
Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
(ASLv2) Apache Commons Lang
The following NOTICE information applies:
Apache Commons Lang
Copyright 2001-2017 The Apache Software Foundation
This product includes software from the Spring Framework,
under the Apache License 2.0 (see: StringUtils.containsWhitespace())
(ASLv2) Guava
The following NOTICE information applies:
Guava
Copyright 2015 The Guava Authors
(ASLv2) JSON-SMART
The following NOTICE information applies:
Copyright 2011 JSON-SMART authors
(ASLv2) Dropwizard Metrics
The following NOTICE information applies:
Copyright (c) 2010-2013 Coda Hale, Yammer.com
This product includes software developed by Coda Hale and Yammer, Inc.
This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
LongAdder), which was released with the following comments:
Written by Doug Lea with assistance from members of JCP JSR-166
Expert Group and released to the public domain, as explained at
http://creativecommons.org/publicdomain/zero/1.0/
(ASLv2) The Netty Project
The following NOTICE information applies:
Copyright 2014 The Netty Project
-------------------------------------------------------------------------------
This product contains the extensions to Java Collections Framework which has
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
* LICENSE:
* license/LICENSE.jsr166y.txt (Public Domain)
* HOMEPAGE:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
This product contains a modified version of Robert Harder's Public Domain
Base64 Encoder and Decoder, which can be obtained at:
* LICENSE:
* license/LICENSE.base64.txt (Public Domain)
* HOMEPAGE:
* http://iharder.sourceforge.net/current/java/base64/
This product contains a modified portion of 'Webbit', an event based
WebSocket and HTTP server, which can be obtained at:
* LICENSE:
* license/LICENSE.webbit.txt (BSD License)
* HOMEPAGE:
* https://github.com/joewalnes/webbit
This product contains a modified portion of 'SLF4J', a simple logging
facade for Java, which can be obtained at:
* LICENSE:
* license/LICENSE.slf4j.txt (MIT License)
* HOMEPAGE:
* http://www.slf4j.org/
This product contains a modified portion of 'Apache Harmony', an open source
Java SE, which can be obtained at:
* LICENSE:
* license/LICENSE.harmony.txt (Apache License 2.0)
* HOMEPAGE:
* http://archive.apache.org/dist/harmony/
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
and decompression library written by Matthew J. Francis. It can be obtained at:
* LICENSE:
* license/LICENSE.jbzip2.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jbzip2/
This product contains a modified portion of 'libdivsufsort', a C API library to construct
the suffix array and the Burrows-Wheeler transformed string for any input string of
a constant-size alphabet written by Yuta Mori. It can be obtained at:
* LICENSE:
* license/LICENSE.libdivsufsort.txt (MIT License)
* HOMEPAGE:
* https://github.com/y-256/libdivsufsort
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
which can be obtained at:
* LICENSE:
* license/LICENSE.jctools.txt (ASL2 License)
* HOMEPAGE:
* https://github.com/JCTools/JCTools
This product optionally depends on 'JZlib', a re-implementation of zlib in
pure Java, which can be obtained at:
* LICENSE:
* license/LICENSE.jzlib.txt (BSD style License)
* HOMEPAGE:
* http://www.jcraft.com/jzlib/
This product optionally depends on 'Compress-LZF', a Java library for encoding and
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
* LICENSE:
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/ning/compress
This product optionally depends on 'lz4', a LZ4 Java compression
and decompression library written by Adrien Grand. It can be obtained at:
* LICENSE:
* license/LICENSE.lz4.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jpountz/lz4-java
This product optionally depends on 'lzma-java', a LZMA Java compression
and decompression library, which can be obtained at:
* LICENSE:
* license/LICENSE.lzma-java.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jponge/lzma-java
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
and decompression library written by William Kinney. It can be obtained at:
* LICENSE:
* license/LICENSE.jfastlz.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jfastlz/
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
interchange format, which can be obtained at:
* LICENSE:
* license/LICENSE.protobuf.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/protobuf
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
a temporary self-signed X.509 certificate when the JVM does not provide the
equivalent functionality. It can be obtained at:
* LICENSE:
* license/LICENSE.bouncycastle.txt (MIT License)
* HOMEPAGE:
* http://www.bouncycastle.org/
This product optionally depends on 'Snappy', a compression library produced
by Google Inc, which can be obtained at:
* LICENSE:
* license/LICENSE.snappy.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/snappy
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
* LICENSE:
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
* HOMEPAGE:
* http://www.jboss.org/jbossmarshalling
This product optionally depends on 'Caliper', Google's micro-
benchmarking framework, which can be obtained at:
* LICENSE:
* license/LICENSE.caliper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/caliper
This product optionally depends on 'Apache Log4J', a logging framework, which
can be obtained at:
* LICENSE:
* license/LICENSE.log4j.txt (Apache License 2.0)
* HOMEPAGE:
* http://logging.apache.org/log4j/
This product optionally depends on 'Aalto XML', an ultra-high performance
non-blocking XML processor, which can be obtained at:
* LICENSE:
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
* HOMEPAGE:
* http://wiki.fasterxml.com/AaltoHome
This product contains a modified version of 'HPACK', a Java implementation of
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
* LICENSE:
* license/LICENSE.hpack.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/twitter/hpack
This product contains a modified portion of 'Apache Commons Lang', a Java library
provides utilities for the java.lang API, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-lang.txt (Apache License 2.0)
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains a forked and modified version of Tomcat Native
* LICENSE:
* ASL2
* HOMEPAGE:
* http://tomcat.apache.org/native-doc/
* https://svn.apache.org/repos/asf/tomcat/native/
(ASLv2) Objenesis
The following NOTICE information applies:
Objenesis
Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita
************************
Eclipse Public License 1.0
************************
The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
(EPL 2.0)(GPL 2)(LGPL 2.1) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
(EPL 1.0)(LGPL 2.1) Logback Classic (ch.qos.logback:logback-classic:jar:1.2.6 - http://logback.qos.ch/)
(EPL 1.0)(LGPL 2.1) Logback Core (ch.qos.logback:logback-core:jar:1.2.6 - http://logback.qos.ch/)

View File

@ -1,62 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>nifi-cassandra-bundle</artifactId>
<groupId>org.apache.nifi</groupId>
<version>2.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>nifi-cassandra-services</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-utils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>${cassandra.sdk.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-ssl-context-service-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-framework-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -1,315 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.service;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.ProtocolOptions;
import com.datastax.driver.core.RemoteEndpointAwareJdkSSLOptions;
import com.datastax.driver.core.SSLOptions;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SocketOptions;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import javax.net.ssl.SSLContext;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.annotation.lifecycle.OnDisabled;
import org.apache.nifi.annotation.lifecycle.OnEnabled;
import org.apache.nifi.cassandra.CassandraSessionProviderService;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.controller.AbstractControllerService;
import org.apache.nifi.controller.ConfigurationContext;
import org.apache.nifi.controller.ControllerServiceInitializationContext;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.security.util.ClientAuth;
import org.apache.nifi.ssl.SSLContextService;
@Tags({"cassandra", "dbcp", "database", "connection", "pooling"})
@CapabilityDescription("Provides connection session for Cassandra processors to work with Apache Cassandra.")
public class CassandraSessionProvider extends AbstractControllerService implements CassandraSessionProviderService {
public static final int DEFAULT_CASSANDRA_PORT = 9042;
// Common descriptors
public static final PropertyDescriptor CONTACT_POINTS = new PropertyDescriptor.Builder()
.name("Cassandra Contact Points")
.description("Contact points are addresses of Cassandra nodes. The list of contact points should be "
+ "comma-separated and in hostname:port format. Example node1:port,node2:port,...."
+ " The default client port for Cassandra is 9042, but the port(s) must be explicitly specified.")
.required(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.HOSTNAME_PORT_LIST_VALIDATOR)
.build();
public static final PropertyDescriptor KEYSPACE = new PropertyDescriptor.Builder()
.name("Keyspace")
.description("The Cassandra Keyspace to connect to. If no keyspace is specified, the query will need to " +
"include the keyspace name before any table reference, in case of 'query' native processors or " +
"if the processor supports the 'Table' property, the keyspace name has to be provided with the " +
"table name in the form of <KEYSPACE>.<TABLE>")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
public static final PropertyDescriptor PROP_SSL_CONTEXT_SERVICE = new PropertyDescriptor.Builder()
.name("SSL Context Service")
.description("The SSL Context Service used to provide client certificate information for TLS/SSL "
+ "connections.")
.required(false)
.identifiesControllerService(SSLContextService.class)
.build();
public static final PropertyDescriptor CLIENT_AUTH = new PropertyDescriptor.Builder()
.name("Client Auth")
.description("Client authentication policy when connecting to secure (TLS/SSL) cluster. "
+ "Possible values are REQUIRED, WANT, NONE. This property is only used when an SSL Context "
+ "has been defined and enabled.")
.required(false)
.allowableValues(ClientAuth.values())
.defaultValue("REQUIRED")
.build();
public static final PropertyDescriptor USERNAME = new PropertyDescriptor.Builder()
.name("Username")
.description("Username to access the Cassandra cluster")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
public static final PropertyDescriptor PASSWORD = new PropertyDescriptor.Builder()
.name("Password")
.description("Password to access the Cassandra cluster")
.required(false)
.sensitive(true)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build();
public static final PropertyDescriptor CONSISTENCY_LEVEL = new PropertyDescriptor.Builder()
.name("Consistency Level")
.description("The strategy for how many replicas must respond before results are returned.")
.required(true)
.allowableValues(ConsistencyLevel.values())
.defaultValue("ONE")
.build();
static final PropertyDescriptor COMPRESSION_TYPE = new PropertyDescriptor.Builder()
.name("Compression Type")
.description("Enable compression at transport-level requests and responses")
.required(false)
.allowableValues(ProtocolOptions.Compression.values())
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.defaultValue("NONE")
.build();
static final PropertyDescriptor READ_TIMEOUT_MS = new PropertyDescriptor.Builder()
.name("read-timeout-ms")
.displayName("Read Timout (ms)")
.description("Read timeout (in milliseconds). 0 means no timeout. If no value is set, the underlying default will be used.")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR)
.build();
static final PropertyDescriptor CONNECT_TIMEOUT_MS = new PropertyDescriptor.Builder()
.name("connect-timeout-ms")
.displayName("Connect Timeout (ms)")
.description("Connection timeout (in milliseconds). 0 means no timeout. If no value is set, the underlying default will be used.")
.required(false)
.expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
.addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR)
.build();
private List<PropertyDescriptor> properties;
private Cluster cluster;
private Session cassandraSession;
@Override
public void init(final ControllerServiceInitializationContext context) {
List<PropertyDescriptor> props = new ArrayList<>();
props.add(CONTACT_POINTS);
props.add(CLIENT_AUTH);
props.add(CONSISTENCY_LEVEL);
props.add(COMPRESSION_TYPE);
props.add(KEYSPACE);
props.add(USERNAME);
props.add(PASSWORD);
props.add(PROP_SSL_CONTEXT_SERVICE);
props.add(READ_TIMEOUT_MS);
props.add(CONNECT_TIMEOUT_MS);
properties = props;
}
@Override
public List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return properties;
}
@OnEnabled
public void onEnabled(final ConfigurationContext context) {
connectToCassandra(context);
}
@OnDisabled
public void onDisabled(){
if (cassandraSession != null) {
cassandraSession.close();
cassandraSession = null;
}
if (cluster != null) {
cluster.close();
cluster = null;
}
}
@Override
public Cluster getCluster() {
if (cluster != null) {
return cluster;
} else {
throw new ProcessException("Unable to get the Cassandra cluster detail.");
}
}
@Override
public Session getCassandraSession() {
if (cassandraSession != null) {
return cassandraSession;
} else {
throw new ProcessException("Unable to get the Cassandra session.");
}
}
private void connectToCassandra(ConfigurationContext context) {
if (cluster == null) {
ComponentLog log = getLogger();
final String contactPointList = context.getProperty(CONTACT_POINTS).evaluateAttributeExpressions().getValue();
final String consistencyLevel = context.getProperty(CONSISTENCY_LEVEL).getValue();
final String compressionType = context.getProperty(COMPRESSION_TYPE).getValue();
List<InetSocketAddress> contactPoints = getContactPoints(contactPointList);
// Set up the client for secure (SSL/TLS communications) if configured to do so
final SSLContextService sslService =
context.getProperty(PROP_SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class);
final SSLContext sslContext;
if (sslService == null) {
sslContext = null;
} else {
sslContext = sslService.createContext();;
}
final String username, password;
PropertyValue usernameProperty = context.getProperty(USERNAME).evaluateAttributeExpressions();
PropertyValue passwordProperty = context.getProperty(PASSWORD).evaluateAttributeExpressions();
if (usernameProperty != null && passwordProperty != null) {
username = usernameProperty.getValue();
password = passwordProperty.getValue();
} else {
username = null;
password = null;
}
final Integer readTimeoutMillis = context.getProperty(READ_TIMEOUT_MS).evaluateAttributeExpressions().asInteger();
final Integer connectTimeoutMillis = context.getProperty(CONNECT_TIMEOUT_MS).evaluateAttributeExpressions().asInteger();
// Create the cluster and connect to it
Cluster newCluster = createCluster(contactPoints, sslContext, username, password, compressionType, readTimeoutMillis, connectTimeoutMillis);
PropertyValue keyspaceProperty = context.getProperty(KEYSPACE).evaluateAttributeExpressions();
final Session newSession;
if (keyspaceProperty != null) {
newSession = newCluster.connect(keyspaceProperty.getValue());
} else {
newSession = newCluster.connect();
}
newCluster.getConfiguration().getQueryOptions().setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel));
Metadata metadata = newCluster.getMetadata();
log.info("Connected to Cassandra cluster: {}", new Object[]{metadata.getClusterName()});
cluster = newCluster;
cassandraSession = newSession;
}
}
private List<InetSocketAddress> getContactPoints(String contactPointList) {
if (contactPointList == null) {
return null;
}
final String[] contactPointStringList = contactPointList.split(",");
List<InetSocketAddress> contactPoints = new ArrayList<>();
for (String contactPointEntry : contactPointStringList) {
String[] addresses = contactPointEntry.split(":");
final String hostName = addresses[0].trim();
final int port = (addresses.length > 1) ? Integer.parseInt(addresses[1].trim()) : DEFAULT_CASSANDRA_PORT;
contactPoints.add(new InetSocketAddress(hostName, port));
}
return contactPoints;
}
private Cluster createCluster(final List<InetSocketAddress> contactPoints, final SSLContext sslContext,
final String username, final String password, final String compressionType,
final Integer readTimeoutMillis, final Integer connectTimeoutMillis) {
Cluster.Builder builder = Cluster.builder().addContactPointsWithPorts(contactPoints);
if (sslContext != null) {
final SSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions.builder()
.withSSLContext(sslContext)
.build();
builder = builder.withSSL(sslOptions);
}
if (username != null && password != null) {
builder = builder.withCredentials(username, password);
}
if (ProtocolOptions.Compression.SNAPPY.name().equals(compressionType)) {
builder = builder.withCompression(ProtocolOptions.Compression.SNAPPY);
} else if (ProtocolOptions.Compression.LZ4.name().equals(compressionType)) {
builder = builder.withCompression(ProtocolOptions.Compression.LZ4);
}
SocketOptions socketOptions = new SocketOptions();
if (readTimeoutMillis != null) {
socketOptions.setReadTimeoutMillis(readTimeoutMillis);
}
if (connectTimeoutMillis != null) {
socketOptions.setConnectTimeoutMillis(connectTimeoutMillis);
}
builder.withSocketOptions(socketOptions);
return builder.build();
}
}

View File

@ -1,16 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.nifi.service.CassandraSessionProvider

View File

@ -1,51 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.service;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.processor.AbstractProcessor;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import java.util.Collections;
import java.util.List;
/**
* Mock Cassandra processor for testing CassandraSessionProvider
*/
public class MockCassandraProcessor extends AbstractProcessor{
private static PropertyDescriptor CASSANDRA_SESSION_PROVIDER = new PropertyDescriptor.Builder()
.name("cassandra-session-provider")
.displayName("Cassandra Session Provider")
.required(true)
.description("Controller Service to obtain a Cassandra connection session")
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.identifiesControllerService(CassandraSessionProvider.class)
.build();
@Override
public List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return Collections.singletonList(CASSANDRA_SESSION_PROVIDER);
}
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
}
}

View File

@ -1,59 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.service;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.util.TestRunner;
import org.apache.nifi.util.TestRunners;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestCassandraSessionProvider {
private static TestRunner runner;
private static CassandraSessionProvider sessionProvider;
@BeforeAll
public static void setup() throws InitializationException {
MockCassandraProcessor mockCassandraProcessor = new MockCassandraProcessor();
sessionProvider = new CassandraSessionProvider();
runner = TestRunners.newTestRunner(mockCassandraProcessor);
runner.addControllerService("cassandra-session-provider", sessionProvider);
}
@Test
public void testGetPropertyDescriptors() {
List<PropertyDescriptor> properties = sessionProvider.getPropertyDescriptors();
assertEquals(10, properties.size());
assertTrue(properties.contains(CassandraSessionProvider.CLIENT_AUTH));
assertTrue(properties.contains(CassandraSessionProvider.CONSISTENCY_LEVEL));
assertTrue(properties.contains(CassandraSessionProvider.CONTACT_POINTS));
assertTrue(properties.contains(CassandraSessionProvider.KEYSPACE));
assertTrue(properties.contains(CassandraSessionProvider.PASSWORD));
assertTrue(properties.contains(CassandraSessionProvider.PROP_SSL_CONTEXT_SERVICE));
assertTrue(properties.contains(CassandraSessionProvider.USERNAME));
}
}

View File

@ -1,58 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>nifi-standard-shared-bom</artifactId>
<groupId>org.apache.nifi</groupId>
<version>2.0.0-SNAPSHOT</version>
<relativePath>../nifi-standard-shared-bundle/nifi-standard-shared-bom</relativePath>
</parent>
<properties>
<cassandra.sdk.version>3.11.5</cassandra.sdk.version>
<cassandra.guava.version>19.0</cassandra.guava.version>
</properties>
<artifactId>nifi-cassandra-bundle</artifactId>
<packaging>pom</packaging>
<modules>
<module>nifi-cassandra-processors</module>
<module>nifi-cassandra-nar</module>
<module>nifi-cassandra-distributedmapcache-service</module>
<module>nifi-cassandra-services-api</module>
<module>nifi-cassandra-services-api-nar</module>
<module>nifi-cassandra-services</module>
<module>nifi-cassandra-services-nar</module>
</modules>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-processors</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${cassandra.guava.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@ -61,7 +61,6 @@
<module>nifi-splunk-bundle</module>
<module>nifi-jms-bundle</module>
<module>nifi-beats-bundle</module>
<module>nifi-cassandra-bundle</module>
<module>nifi-hive-bundle</module>
<module>nifi-site-to-site-reporting-bundle</module>
<module>nifi-mqtt-bundle</module>
@ -231,11 +230,6 @@
<artifactId>nifi-http-context-map</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-cassandra-services-api</artifactId>
<version>2.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-volatile-provenance-repository</artifactId>