mirror of https://github.com/apache/nifi.git
NIFI-10975 Add Kubernetes Leader Election and State Provider (#6779)
* NIFI-10975 Added Kubernetes Leader Election and State Provider - Added Kubernetes Leader Election Manager based on Kubernetes Leases - Added Kubernetes State Provider based on Kubernetes ConfigMaps - Added nifi-kubernetes-client for generalized access to Fabric8 Kubernetes Client - Added nifi.cluster.leader.election.implementation Property defaulting to CuratorLeaderElectionManager - Refactored LeaderElectionManager to nifi-framework-api for Extension Discovering Manager - Refactored shared ZooKeeper configuration to nifi-framework-cluster-zookeeper * NIFI-10975 Updated Kubernetes Client and StateMap - Upgraded Kubernetes Client from 6.2.0 to 6.3.0 - Added getStateVersion to StateMap and deprecated getVersion - Updated Docker start.sh with additional properties * NIFI-10975 Corrected MockStateManager.assertStateSet() * NIFI-10975 Upgraded Kubernetes Client from 6.3.0 to 6.3.1 * NIFI-10975 Corrected unregister leader and disabled release on cancel * NIFI-10975 Corrected findLeader handling of Lease expiration - Changed LeaderElectionManager.getLeader() return to Optional String * NIFI-10975 Corrected StandardNiFiServiceFacade handling of Optional Leader * NIFI-10975 Changed getLeader() to call findLeader() to avoid stale cached values * NIFI-10975 Updated LeaderElectionCommand to run LeaderElector in loop * NIFI-10975 Rebased on project version 2.0.0-SNAPSHOT * NIFI-10975 Corrected Gson and AspectJ versions - Updated versions to match current main branch and avoid reverting
This commit is contained in:
parent
d3908dede8
commit
512155ba2f
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.nifi.components.state;
|
package org.apache.nifi.components.state;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides a representation of a component's state at some point in time.
|
* Provides a representation of a component's state at some point in time.
|
||||||
|
@ -29,13 +30,27 @@ public interface StateMap {
|
||||||
* Though this number is monotonically increasing, it should not be expected to increment always
|
* Though this number is monotonically increasing, it should not be expected to increment always
|
||||||
* from X to X+1. I.e., version numbers may be skipped.
|
* from X to X+1. I.e., version numbers may be skipped.
|
||||||
*
|
*
|
||||||
|
* @deprecated This method should be replaced with getStateVersion()
|
||||||
|
*
|
||||||
* @return the version associated with the state
|
* @return the version associated with the state
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
long getVersion();
|
long getVersion();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the value associated with the given key
|
* Get state version is not guaranteed to be numeric, but can be used to compare against an expected version.
|
||||||
|
* The default implementation uses the available version number and considers -1 as indicating an empty version
|
||||||
*
|
*
|
||||||
|
* @return State version or empty when not known
|
||||||
|
*/
|
||||||
|
default Optional<String> getStateVersion() {
|
||||||
|
final long version = getVersion();
|
||||||
|
return version == -1 ? Optional.empty() : Optional.of(String.valueOf(version));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the value associated with the given key
|
||||||
|
*n
|
||||||
* @param key the key whose value should be retrieved
|
* @param key the key whose value should be retrieved
|
||||||
* @return the value associated with the given key, or <code>null</code> if no value is associated
|
* @return the value associated with the given key, or <code>null</code> if no value is associated
|
||||||
* with this key.
|
* with this key.
|
||||||
|
|
|
@ -919,6 +919,12 @@ language governing permissions and limitations under the License. -->
|
||||||
<version>2.0.0-SNAPSHOT</version>
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
<type>nar</type>
|
<type>nar</type>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-nar</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
<type>nar</type>
|
||||||
|
</dependency>
|
||||||
<!-- AspectJ library needed by the Java Agent used for native library loading (see bootstrap.conf) -->
|
<!-- AspectJ library needed by the Java Agent used for native library loading (see bootstrap.conf) -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.aspectj</groupId>
|
<groupId>org.aspectj</groupId>
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-commons</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-kubernetes-client</artifactId>
|
||||||
|
<description>Minimal abstraction for access Kubernetes REST API Resources</description>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstraction for providing a configured Kubernetes Client
|
||||||
|
*/
|
||||||
|
public interface KubernetesClientProvider {
|
||||||
|
/**
|
||||||
|
* Get configured Kubernetes Client
|
||||||
|
*
|
||||||
|
* @return Kubernetes Client
|
||||||
|
*/
|
||||||
|
KubernetesClient getKubernetesClient();
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kubernetes Namespace Provider
|
||||||
|
*/
|
||||||
|
public interface NamespaceProvider {
|
||||||
|
/**
|
||||||
|
* Get Namespace
|
||||||
|
*
|
||||||
|
* @return Kubernetes Namespace
|
||||||
|
*/
|
||||||
|
String getNamespace();
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Service Account Namespace Provider based on standard file location
|
||||||
|
*/
|
||||||
|
public class ServiceAccountNamespaceProvider implements NamespaceProvider {
|
||||||
|
protected static final String NAMESPACE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
|
||||||
|
|
||||||
|
protected static final String DEFAULT_NAMESPACE = "default";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Namespace from Service Account location or return default namespace when not found
|
||||||
|
*
|
||||||
|
* @return Kubernetes Namespace
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String getNamespace() {
|
||||||
|
final Path namespacePath = Paths.get(NAMESPACE_PATH);
|
||||||
|
return Files.isReadable(namespacePath) ? getNamespace(namespacePath) : DEFAULT_NAMESPACE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getNamespace(final Path namespacePath) {
|
||||||
|
try {
|
||||||
|
final byte[] bytes = Files.readAllBytes(namespacePath);
|
||||||
|
return new String(bytes, StandardCharsets.UTF_8).trim();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new UncheckedIOException("Read Service Account namespace failed", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClientBuilder;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard implementation of Kubernetes Client Provider with default properties
|
||||||
|
*/
|
||||||
|
public class StandardKubernetesClientProvider implements KubernetesClientProvider {
|
||||||
|
private volatile KubernetesClient kubernetesClient;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Kubernetes Client with default configuration discovery
|
||||||
|
*
|
||||||
|
* @return Kubernetes Client
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public KubernetesClient getKubernetesClient() {
|
||||||
|
if (kubernetesClient == null) {
|
||||||
|
kubernetesClient = buildKubernetesClient();
|
||||||
|
}
|
||||||
|
return kubernetesClient;
|
||||||
|
}
|
||||||
|
|
||||||
|
private KubernetesClient buildKubernetesClient() {
|
||||||
|
return new KubernetesClientBuilder().build();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
|
||||||
|
class ServiceAccountNamespaceProviderTest {
|
||||||
|
ServiceAccountNamespaceProvider provider;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setProvider() {
|
||||||
|
provider = new ServiceAccountNamespaceProvider();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testGetNamespace() {
|
||||||
|
final String namespace = provider.getNamespace();
|
||||||
|
|
||||||
|
final Path namespacePath = Paths.get(ServiceAccountNamespaceProvider.NAMESPACE_PATH);
|
||||||
|
if (Files.isReadable(namespacePath)) {
|
||||||
|
assertNotNull(namespace);
|
||||||
|
} else {
|
||||||
|
assertEquals(ServiceAccountNamespaceProvider.DEFAULT_NAMESPACE, namespace);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.client;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.Timeout;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
|
||||||
|
class StandardKubernetesClientProviderTest {
|
||||||
|
StandardKubernetesClientProvider provider;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setProvider() {
|
||||||
|
provider = new StandardKubernetesClientProvider();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Timeout(5)
|
||||||
|
@Test
|
||||||
|
void testGetKubernetesClient() {
|
||||||
|
final KubernetesClient kubernetesClient = provider.getKubernetesClient();
|
||||||
|
|
||||||
|
assertNotNull(kubernetesClient);
|
||||||
|
}
|
||||||
|
}
|
|
@ -252,6 +252,7 @@ public class NiFiProperties extends ApplicationProperties {
|
||||||
public static final String CLUSTER_PROTOCOL_HEARTBEAT_INTERVAL = "nifi.cluster.protocol.heartbeat.interval";
|
public static final String CLUSTER_PROTOCOL_HEARTBEAT_INTERVAL = "nifi.cluster.protocol.heartbeat.interval";
|
||||||
public static final String CLUSTER_PROTOCOL_HEARTBEAT_MISSABLE_MAX = "nifi.cluster.protocol.heartbeat.missable.max";
|
public static final String CLUSTER_PROTOCOL_HEARTBEAT_MISSABLE_MAX = "nifi.cluster.protocol.heartbeat.missable.max";
|
||||||
public static final String CLUSTER_PROTOCOL_IS_SECURE = "nifi.cluster.protocol.is.secure";
|
public static final String CLUSTER_PROTOCOL_IS_SECURE = "nifi.cluster.protocol.is.secure";
|
||||||
|
public static final String CLUSTER_LEADER_ELECTION_IMPLEMENTATION = "nifi.cluster.leader.election.implementation";
|
||||||
|
|
||||||
// cluster node properties
|
// cluster node properties
|
||||||
public static final String CLUSTER_IS_NODE = "nifi.cluster.is.node";
|
public static final String CLUSTER_IS_NODE = "nifi.cluster.is.node";
|
||||||
|
@ -405,6 +406,7 @@ public class NiFiProperties extends ApplicationProperties {
|
||||||
public static final String DEFAULT_CLUSTER_NODE_READ_TIMEOUT = "5 sec";
|
public static final String DEFAULT_CLUSTER_NODE_READ_TIMEOUT = "5 sec";
|
||||||
public static final String DEFAULT_CLUSTER_NODE_CONNECTION_TIMEOUT = "5 sec";
|
public static final String DEFAULT_CLUSTER_NODE_CONNECTION_TIMEOUT = "5 sec";
|
||||||
public static final int DEFAULT_CLUSTER_NODE_MAX_CONCURRENT_REQUESTS = 100;
|
public static final int DEFAULT_CLUSTER_NODE_MAX_CONCURRENT_REQUESTS = 100;
|
||||||
|
public static final String DEFAULT_CLUSTER_LEADER_ELECTION_IMPLEMENTATION = "CuratorLeaderElectionManager";
|
||||||
|
|
||||||
// cluster node defaults
|
// cluster node defaults
|
||||||
public static final int DEFAULT_CLUSTER_NODE_PROTOCOL_THREADS = 10;
|
public static final int DEFAULT_CLUSTER_NODE_PROTOCOL_THREADS = 10;
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
<module>nifi-hl7-query-language</module>
|
<module>nifi-hl7-query-language</module>
|
||||||
<module>nifi-json-utils</module>
|
<module>nifi-json-utils</module>
|
||||||
<module>nifi-jetty-configuration</module>
|
<module>nifi-jetty-configuration</module>
|
||||||
|
<module>nifi-kubernetes-client</module>
|
||||||
<module>nifi-logging-utils</module>
|
<module>nifi-logging-utils</module>
|
||||||
<module>nifi-metrics</module>
|
<module>nifi-metrics</module>
|
||||||
<module>nifi-parameter</module>
|
<module>nifi-parameter</module>
|
||||||
|
|
|
@ -89,6 +89,10 @@ prop_replace 'nifi.cluster.flow.election.max.wait.time' "${NIFI_ELECTION_MAX
|
||||||
prop_replace 'nifi.cluster.flow.election.max.candidates' "${NIFI_ELECTION_MAX_CANDIDATES:-}"
|
prop_replace 'nifi.cluster.flow.election.max.candidates' "${NIFI_ELECTION_MAX_CANDIDATES:-}"
|
||||||
prop_replace 'nifi.web.proxy.context.path' "${NIFI_WEB_PROXY_CONTEXT_PATH:-}"
|
prop_replace 'nifi.web.proxy.context.path' "${NIFI_WEB_PROXY_CONTEXT_PATH:-}"
|
||||||
|
|
||||||
|
# Set leader election and state management properties
|
||||||
|
prop_replace 'nifi.cluster.leader.election.implementation' "${NIFI_LEADER_ELECTION_IMPLEMENTATION:-CuratorLeaderElectionManager}"
|
||||||
|
prop_replace 'nifi.state.management.provider.cluster' "${NIFI_STATE_MANAGEMENT_CLUSTER_PROVIDER:-zk-provider}"
|
||||||
|
|
||||||
# Set analytics properties
|
# Set analytics properties
|
||||||
prop_replace 'nifi.analytics.predict.enabled' "${NIFI_ANALYTICS_PREDICT_ENABLED:-false}"
|
prop_replace 'nifi.analytics.predict.enabled' "${NIFI_ANALYTICS_PREDICT_ENABLED:-false}"
|
||||||
prop_replace 'nifi.analytics.predict.interval' "${NIFI_ANALYTICS_PREDICT_INTERVAL:-3 mins}"
|
prop_replace 'nifi.analytics.predict.interval' "${NIFI_ANALYTICS_PREDICT_INTERVAL:-3 mins}"
|
||||||
|
|
|
@ -4193,6 +4193,16 @@ Configure these properties for cluster nodes.
|
||||||
|====
|
|====
|
||||||
|*Property*|*Description*
|
|*Property*|*Description*
|
||||||
|`nifi.cluster.is.node`|Set this to `true` if the instance is a node in a cluster. The default value is `false`.
|
|`nifi.cluster.is.node`|Set this to `true` if the instance is a node in a cluster. The default value is `false`.
|
||||||
|
|`nifi.cluster.leader.election.implementation`|The Cluster Leader Election implementation class name or simple class
|
||||||
|
name.
|
||||||
|
|
||||||
|
The default value is `CuratorLeaderElectionManager` for ZooKeeper Leader Election using `nifi.zookeeper` settings.
|
||||||
|
|
||||||
|
The implementation can be set to `KubernetesLeaderElectionManager` for Leader Election using
|
||||||
|
link:https://kubernetes.io/docs/concepts/architecture/leases/[Kubernetes Leases]. The Kubernetes namespace for Leases
|
||||||
|
will be read from the
|
||||||
|
link:https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/[Service Account] namespace secret.
|
||||||
|
The Kubernetes namespace will be set to `default` if the Service Account secret is not found.
|
||||||
|`nifi.cluster.node.address`|The fully qualified address of the node. It is blank by default.
|
|`nifi.cluster.node.address`|The fully qualified address of the node. It is blank by default.
|
||||||
|`nifi.cluster.node.protocol.port`|The node's protocol port. It is blank by default.
|
|`nifi.cluster.node.protocol.port`|The node's protocol port. It is blank by default.
|
||||||
|`nifi.cluster.node.protocol.max.threads`|The maximum number of threads that should be used to communicate with other nodes in the cluster. This property defaults to `50`. When a request is made to one node, it must be forwarded to the coordinator. The coordinator then replicates it to all nodes. There could be up to `n+2` threads for a given request, where `n` = number of nodes in your cluster. As an example, if 4 requests are made, a 5 node cluster will use `4 * 7 = 28` threads.
|
|`nifi.cluster.node.protocol.max.threads`|The maximum number of threads that should be used to communicate with other nodes in the cluster. This property defaults to `50`. When a request is made to one node, it must be forwarded to the coordinator. The coordinator then replicates it to all nodes. There could be up to `n+2` threads for a given request, where `n` = number of nodes in your cluster. As an example, if 4 requests are made, a 5 node cluster will use `4 * 7 = 28` threads.
|
||||||
|
|
|
@ -14,12 +14,15 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.nifi.controller.leader.election;
|
package org.apache.nifi.controller.leader.election;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Leader Election Manager abstracts cluster election registration and status
|
||||||
|
*/
|
||||||
public interface LeaderElectionManager {
|
public interface LeaderElectionManager {
|
||||||
/**
|
/**
|
||||||
* Starts managing leader elections for all registered roles
|
* Starts managing leader elections for all registered roles
|
||||||
|
@ -62,10 +65,10 @@ public interface LeaderElectionManager {
|
||||||
* known or if the role was registered without providing a Participant ID, this will return <code>null</code>.
|
* known or if the role was registered without providing a Participant ID, this will return <code>null</code>.
|
||||||
*
|
*
|
||||||
* @param roleName the name of the role
|
* @param roleName the name of the role
|
||||||
* @return the Participant ID of the node that is elected leader, or <code>null</code> if either no leader is known or the leader
|
* @return the Participant ID of the node that is elected leader, or <code>empty</code> if either no leader is known or the leader
|
||||||
* did not register with a Participant ID.
|
* did not register with a Participant ID.
|
||||||
*/
|
*/
|
||||||
String getLeader(String roleName);
|
Optional<String> getLeader(String roleName);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes the role with the given name from this manager. If this
|
* Removes the role with the given name from this manager. If this
|
||||||
|
@ -85,11 +88,6 @@ public interface LeaderElectionManager {
|
||||||
*/
|
*/
|
||||||
boolean isLeader(String roleName);
|
boolean isLeader(String roleName);
|
||||||
|
|
||||||
/**
|
|
||||||
* @return <code>true</code> if the manager is stopped, false otherwise.
|
|
||||||
*/
|
|
||||||
boolean isStopped();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stops managing leader elections and relinquishes the role as leader
|
* Stops managing leader elections and relinquishes the role as leader
|
||||||
* for all registered roles. If the LeaderElectionManager is later started
|
* for all registered roles. If the LeaderElectionManager is later started
|
||||||
|
@ -97,14 +95,6 @@ public interface LeaderElectionManager {
|
||||||
*/
|
*/
|
||||||
void stop();
|
void stop();
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns <code>true</code> if a leader has been elected for the given role, <code>false</code> otherwise.
|
|
||||||
*
|
|
||||||
* @param roleName the name of the role
|
|
||||||
* @return <code>true</code> if a leader has been elected, <code>false</code> otherwise.
|
|
||||||
*/
|
|
||||||
boolean isLeaderElected(String roleName);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a Map of Role Name to the number of times that the leader has been detected as changing in the given time period. Note that
|
* Returns a Map of Role Name to the number of times that the leader has been detected as changing in the given time period. Note that
|
||||||
* the amount of time that these counts is stored and the precision is implementation specific.
|
* the amount of time that these counts is stored and the precision is implementation specific.
|
|
@ -14,7 +14,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.nifi.controller.leader.election;
|
package org.apache.nifi.controller.leader.election;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -24,12 +23,12 @@ package org.apache.nifi.controller.leader.election;
|
||||||
*/
|
*/
|
||||||
public interface LeaderElectionStateChangeListener {
|
public interface LeaderElectionStateChangeListener {
|
||||||
/**
|
/**
|
||||||
* This method is invoked whenever this node is elected leader
|
* Invoked when the running node is elected as leader
|
||||||
*/
|
*/
|
||||||
void onLeaderElection();
|
void onStartLeading();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method is invoked whenever this node no longer is the elected leader.
|
* Invoked when the running node is no longer elected as leader
|
||||||
*/
|
*/
|
||||||
void onLeaderRelinquish();
|
void onStopLeading();
|
||||||
}
|
}
|
|
@ -29,6 +29,9 @@ import java.util.Map;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
|
||||||
public class MockStateManager implements StateManager {
|
public class MockStateManager implements StateManager {
|
||||||
private final AtomicInteger versionIndex = new AtomicInteger(0);
|
private final AtomicInteger versionIndex = new AtomicInteger(0);
|
||||||
|
|
||||||
|
@ -260,7 +263,7 @@ public class MockStateManager implements StateManager {
|
||||||
*/
|
*/
|
||||||
public void assertStateSet(final Scope scope) {
|
public void assertStateSet(final Scope scope) {
|
||||||
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
|
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
|
||||||
Assertions.assertNotSame(-1L, stateMap.getVersion(), "Expected state to be set for Scope " + scope + ", but it was not set");
|
assertTrue(stateMap.getStateVersion().isPresent(), "Expected state to be set for Scope " + scope + ", but it was not set");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -278,7 +281,7 @@ public class MockStateManager implements StateManager {
|
||||||
*/
|
*/
|
||||||
public void assertStateNotSet(final Scope scope) {
|
public void assertStateNotSet(final Scope scope) {
|
||||||
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
|
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
|
||||||
Assertions.assertEquals(-1L, stateMap.getVersion(), "Expected state not to be set for Scope " + scope + ", but it was set");
|
assertFalse(stateMap.getStateVersion().isPresent(), "Expected state not to be set for Scope " + scope + ", but it was set");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -414,7 +414,7 @@ public class ListS3 extends AbstractS3Processor implements VerifiableProcessor {
|
||||||
|
|
||||||
private void restoreState(final ProcessSession session) throws IOException {
|
private void restoreState(final ProcessSession session) throws IOException {
|
||||||
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) {
|
if (!stateMap.getStateVersion().isPresent() || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) {
|
||||||
forcefullyUpdateListing(0L, Collections.emptySet());
|
forcefullyUpdateListing(0L, Collections.emptySet());
|
||||||
} else {
|
} else {
|
||||||
final long timestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP));
|
final long timestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP));
|
||||||
|
|
|
@ -398,7 +398,7 @@ public abstract class AbstractListProcessor<T extends ListableEntity> extends Ab
|
||||||
|
|
||||||
// Check if state already exists for this path. If so, we have already migrated the state.
|
// Check if state already exists for this path. If so, we have already migrated the state.
|
||||||
final StateMap stateMap = context.getStateManager().getState(getStateScope(context));
|
final StateMap stateMap = context.getStateManager().getState(getStateScope(context));
|
||||||
if (stateMap.getVersion() == -1L) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
try {
|
try {
|
||||||
// Migrate state from the old way of managing state (distributed cache service and local file)
|
// Migrate state from the old way of managing state (distributed cache service and local file)
|
||||||
// to the new mechanism (State Manager).
|
// to the new mechanism (State Manager).
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-cluster-zookeeper</artifactId>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-properties</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-utils</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>ch.qos.logback</groupId>
|
||||||
|
<artifactId>logback-classic</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-framework</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-recipes</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<!-- metrics-core required for ZooKeeper Server -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.dropwizard.metrics</groupId>
|
||||||
|
<artifactId>metrics-core</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<!-- snappy-java required for ZooKeeper Server -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.xerial.snappy</groupId>
|
||||||
|
<artifactId>snappy-java</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-lang3</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -15,7 +15,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.nifi.controller.cluster;
|
package org.apache.nifi.framework.cluster.zookeeper;
|
||||||
|
|
||||||
import org.apache.curator.utils.ZookeeperFactory;
|
import org.apache.curator.utils.ZookeeperFactory;
|
||||||
import org.apache.zookeeper.Watcher;
|
import org.apache.zookeeper.Watcher;
|
|
@ -14,7 +14,7 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.nifi.controller.cluster;
|
package org.apache.nifi.framework.cluster.zookeeper;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.nifi.util.FormatUtils;
|
import org.apache.nifi.util.FormatUtils;
|
|
@ -28,7 +28,7 @@ import org.apache.nifi.cluster.protocol.AbstractNodeProtocolSender;
|
||||||
import org.apache.nifi.cluster.protocol.ProtocolContext;
|
import org.apache.nifi.cluster.protocol.ProtocolContext;
|
||||||
import org.apache.nifi.cluster.protocol.ProtocolException;
|
import org.apache.nifi.cluster.protocol.ProtocolException;
|
||||||
import org.apache.nifi.cluster.protocol.message.ProtocolMessage;
|
import org.apache.nifi.cluster.protocol.message.ProtocolMessage;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.io.socket.SocketConfiguration;
|
import org.apache.nifi.io.socket.SocketConfiguration;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||||
|
|
|
@ -17,10 +17,9 @@
|
||||||
|
|
||||||
package org.apache.nifi.cluster.coordination.node;
|
package org.apache.nifi.cluster.coordination.node;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.nifi.cluster.exception.NoClusterCoordinatorException;
|
import org.apache.nifi.cluster.exception.NoClusterCoordinatorException;
|
||||||
import org.apache.nifi.cluster.protocol.AbstractNodeProtocolSender;
|
import org.apache.nifi.cluster.protocol.AbstractNodeProtocolSender;
|
||||||
import org.apache.nifi.cluster.protocol.ProtocolContext;
|
import org.apache.nifi.cluster.protocol.ProtocolContext;
|
||||||
|
@ -42,16 +41,17 @@ public class LeaderElectionNodeProtocolSender extends AbstractNodeProtocolSender
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected InetSocketAddress getServiceAddress() throws IOException {
|
protected InetSocketAddress getServiceAddress() {
|
||||||
final String address = electionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
final Optional<String> leaderAddress = electionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
|
|
||||||
if (StringUtils.isEmpty(address)) {
|
if (!leaderAddress.isPresent()) {
|
||||||
throw new NoClusterCoordinatorException("No node has yet been elected Cluster Coordinator. Cannot establish connection to cluster yet.");
|
throw new NoClusterCoordinatorException("No node has yet been elected Cluster Coordinator. Cannot establish connection to cluster");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final String address = leaderAddress.get();
|
||||||
final String[] splits = address.split(":");
|
final String[] splits = address.split(":");
|
||||||
if (splits.length != 2) {
|
if (splits.length != 2) {
|
||||||
final String message = String.format("Attempted to determine Cluster Coordinator address. Zookeeper indicates "
|
final String message = String.format("Attempted to determine Cluster Coordinator address. Manager indicates "
|
||||||
+ "that address is %s, but this is not in the expected format of <hostname>:<port>", address);
|
+ "that address is %s, but this is not in the expected format of <hostname>:<port>", address);
|
||||||
logger.error(message);
|
logger.error(message);
|
||||||
throw new ProtocolException(message);
|
throw new ProtocolException(message);
|
||||||
|
@ -67,14 +67,13 @@ public class LeaderElectionNodeProtocolSender extends AbstractNodeProtocolSender
|
||||||
throw new NumberFormatException("Port must be in the range of 1 - 65535 but got " + port);
|
throw new NumberFormatException("Port must be in the range of 1 - 65535 but got " + port);
|
||||||
}
|
}
|
||||||
} catch (final NumberFormatException nfe) {
|
} catch (final NumberFormatException nfe) {
|
||||||
final String message = String.format("Attempted to determine Cluster Coordinator address. Zookeeper indicates "
|
final String message = String.format("Attempted to determine Cluster Coordinator address. Manager indicates "
|
||||||
+ "that address is %s, but the port is not a valid port number", address);
|
+ "that address is %s, but the port is not a valid port number", address);
|
||||||
logger.error(message);
|
logger.error(message);
|
||||||
throw new ProtocolException(message);
|
throw new ProtocolException(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
final InetSocketAddress socketAddress = InetSocketAddress.createUnresolved(hostname, port);
|
return InetSocketAddress.createUnresolved(hostname, port);
|
||||||
return socketAddress;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,7 @@ import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
@ -146,7 +147,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
this.leaderElectionManager = leaderElectionManager;
|
this.leaderElectionManager = leaderElectionManager;
|
||||||
this.flowElection = flowElection;
|
this.flowElection = flowElection;
|
||||||
this.nodeProtocolSender = nodeProtocolSender;
|
this.nodeProtocolSender = nodeProtocolSender;
|
||||||
this.stateManager = stateManagerProvider.getStateManager("Cluster Coordinator");
|
this.stateManager = stateManagerProvider.getStateManager(ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
|
|
||||||
recoverState();
|
recoverState();
|
||||||
|
|
||||||
|
@ -250,7 +251,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
if (localId != null) {
|
if (localId != null) {
|
||||||
final NodeConnectionStatus shutdownStatus = new NodeConnectionStatus(localId, DisconnectionCode.NODE_SHUTDOWN);
|
final NodeConnectionStatus shutdownStatus = new NodeConnectionStatus(localId, DisconnectionCode.NODE_SHUTDOWN);
|
||||||
updateNodeStatus(shutdownStatus, false);
|
updateNodeStatus(shutdownStatus, false);
|
||||||
logger.info("Successfully notified other nodes that I am shutting down");
|
logger.info("Node ID [{}] Disconnection Code [{}] send completed", localId, DisconnectionCode.NODE_SHUTDOWN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,7 +298,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
return localNodeId;
|
return localNodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getElectedActiveCoordinatorAddress() {
|
private Optional<String> getElectedActiveCoordinatorAddress() {
|
||||||
return leaderElectionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
return leaderElectionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -726,14 +727,14 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NodeIdentifier getPrimaryNode() {
|
public NodeIdentifier getPrimaryNode() {
|
||||||
final String primaryNodeAddress = leaderElectionManager.getLeader(ClusterRoles.PRIMARY_NODE);
|
final Optional<String> primaryNodeLeader = leaderElectionManager.getLeader(ClusterRoles.PRIMARY_NODE);
|
||||||
if (primaryNodeAddress == null) {
|
if (!primaryNodeLeader.isPresent()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodeStatuses.values().stream()
|
return nodeStatuses.values().stream()
|
||||||
.map(NodeConnectionStatus::getNodeIdentifier)
|
.map(NodeConnectionStatus::getNodeIdentifier)
|
||||||
.filter(nodeId -> primaryNodeAddress.equals(nodeId.getSocketAddress() + ":" + nodeId.getSocketPort()))
|
.filter(nodeId -> primaryNodeLeader.get().equals(nodeId.getSocketAddress() + ":" + nodeId.getSocketPort()))
|
||||||
.findFirst()
|
.findFirst()
|
||||||
.orElse(null);
|
.orElse(null);
|
||||||
}
|
}
|
||||||
|
@ -744,25 +745,25 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
}
|
}
|
||||||
|
|
||||||
private NodeIdentifier getElectedActiveCoordinatorNode(final boolean warnOnError) {
|
private NodeIdentifier getElectedActiveCoordinatorNode(final boolean warnOnError) {
|
||||||
String electedNodeAddress;
|
final Optional<String> electedActiveCoordinatorAddress;
|
||||||
try {
|
try {
|
||||||
electedNodeAddress = getElectedActiveCoordinatorAddress();
|
electedActiveCoordinatorAddress = getElectedActiveCoordinatorAddress();
|
||||||
} catch (final NoClusterCoordinatorException ncce) {
|
} catch (final NoClusterCoordinatorException ncce) {
|
||||||
logger.debug("There is currently no elected active Cluster Coordinator");
|
logger.debug("There is currently no elected active Cluster Coordinator");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (electedNodeAddress == null || electedNodeAddress.trim().isEmpty()) {
|
if (!electedActiveCoordinatorAddress.isPresent()) {
|
||||||
logger.debug("There is currently no elected active Cluster Coordinator");
|
logger.debug("There is currently no elected active Cluster Coordinator");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
electedNodeAddress = electedNodeAddress.trim();
|
final String electedNodeAddress = electedActiveCoordinatorAddress.get().trim();
|
||||||
|
|
||||||
final int colonLoc = electedNodeAddress.indexOf(':');
|
final int colonLoc = electedNodeAddress.indexOf(':');
|
||||||
if (colonLoc < 1) {
|
if (colonLoc < 1) {
|
||||||
if (warnOnError) {
|
if (warnOnError) {
|
||||||
logger.warn("Failed to determine which node is elected active Cluster Coordinator: ZooKeeper reports the address as {}, but this is not a valid address", electedNodeAddress);
|
logger.warn("Failed to determine which node is elected active Cluster Coordinator: Manager reports the address as {}, but this is not a valid address", electedNodeAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
@ -775,7 +776,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
electedNodePort = Integer.parseInt(portString);
|
electedNodePort = Integer.parseInt(portString);
|
||||||
} catch (final NumberFormatException nfe) {
|
} catch (final NumberFormatException nfe) {
|
||||||
if (warnOnError) {
|
if (warnOnError) {
|
||||||
logger.warn("Failed to determine which node is elected active Cluster Coordinator: ZooKeeper reports the address as {}, but this is not a valid address", electedNodeAddress);
|
logger.warn("Failed to determine which node is elected active Cluster Coordinator: Manager reports the address as {}, but this is not a valid address", electedNodeAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
@ -788,7 +789,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
.orElse(null);
|
.orElse(null);
|
||||||
|
|
||||||
if (electedNodeId == null && warnOnError) {
|
if (electedNodeId == null && warnOnError) {
|
||||||
logger.debug("Failed to determine which node is elected active Cluster Coordinator: ZooKeeper reports the address as {},"
|
logger.debug("Failed to determine which node is elected active Cluster Coordinator: Manager reports the address as {},"
|
||||||
+ "but there is no node with this address. Will attempt to communicate with node to determine its information", electedNodeAddress);
|
+ "but there is no node with this address. Will attempt to communicate with node to determine its information", electedNodeAddress);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -807,7 +808,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
return existingStatus.getNodeIdentifier();
|
return existingStatus.getNodeIdentifier();
|
||||||
}
|
}
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
logger.warn("Failed to determine which node is elected active Cluster Coordinator: ZooKeeper reports the address as {}, but there is no node with this address. "
|
logger.warn("Failed to determine which node is elected active Cluster Coordinator: Manager reports the address as {}, but there is no node with this address. "
|
||||||
+ "Attempted to determine the node's information but failed to retrieve its information due to {}", electedNodeAddress, e.toString());
|
+ "Attempted to determine the node's information but failed to retrieve its information due to {}", electedNodeAddress, e.toString());
|
||||||
|
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
|
@ -1446,7 +1447,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<NodeIdentifier, NodeWorkload> getClusterWorkload() throws IOException {
|
public Map<NodeIdentifier, NodeWorkload> getClusterWorkload() {
|
||||||
final ClusterWorkloadRequestMessage request = new ClusterWorkloadRequestMessage();
|
final ClusterWorkloadRequestMessage request = new ClusterWorkloadRequestMessage();
|
||||||
final ClusterWorkloadResponseMessage response = nodeProtocolSender.clusterWorkload(request);
|
final ClusterWorkloadResponseMessage response = nodeProtocolSender.clusterWorkload(request);
|
||||||
return response.getNodeWorkloads();
|
return response.getNodeWorkloads();
|
||||||
|
|
|
@ -37,8 +37,8 @@
|
||||||
|
|
||||||
<!-- Leader Election Manager -->
|
<!-- Leader Election Manager -->
|
||||||
<bean id="leaderElectionManager" class="org.apache.nifi.spring.LeaderElectionManagerFactoryBean">
|
<bean id="leaderElectionManager" class="org.apache.nifi.spring.LeaderElectionManagerFactoryBean">
|
||||||
<property name="numThreads" value="4" />
|
|
||||||
<property name="properties" ref="nifiProperties" />
|
<property name="properties" ref="nifiProperties" />
|
||||||
|
<property name="extensionManager" ref="extensionManager" />
|
||||||
</bean>
|
</bean>
|
||||||
|
|
||||||
<bean id="flowElection" class="org.apache.nifi.cluster.coordination.flow.PopularVoteFlowElectionFactoryBean">
|
<bean id="flowElection" class="org.apache.nifi.cluster.coordination.flow.PopularVoteFlowElectionFactoryBean">
|
||||||
|
|
|
@ -1,99 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.nifi.cluster.coordination.node
|
|
||||||
|
|
||||||
import org.apache.nifi.cluster.coordination.flow.FlowElection
|
|
||||||
import org.apache.nifi.cluster.firewall.ClusterNodeFirewall
|
|
||||||
import org.apache.nifi.cluster.protocol.NodeIdentifier
|
|
||||||
import org.apache.nifi.cluster.protocol.NodeProtocolSender
|
|
||||||
import org.apache.nifi.cluster.protocol.impl.ClusterCoordinationProtocolSenderListener
|
|
||||||
import org.apache.nifi.cluster.protocol.message.OffloadMessage
|
|
||||||
import org.apache.nifi.components.state.Scope
|
|
||||||
import org.apache.nifi.components.state.StateManager
|
|
||||||
import org.apache.nifi.components.state.StateManagerProvider
|
|
||||||
import org.apache.nifi.controller.leader.election.LeaderElectionManager
|
|
||||||
import org.apache.nifi.events.EventReporter
|
|
||||||
import org.apache.nifi.reporting.Severity
|
|
||||||
import org.apache.nifi.state.MockStateMap
|
|
||||||
import org.apache.nifi.util.NiFiProperties
|
|
||||||
import org.apache.nifi.web.revision.RevisionManager
|
|
||||||
import spock.lang.Specification
|
|
||||||
import spock.util.concurrent.BlockingVariable
|
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit
|
|
||||||
|
|
||||||
class NodeClusterCoordinatorSpec extends Specification {
|
|
||||||
def "requestNodeOffload"() {
|
|
||||||
given: 'mocked collaborators'
|
|
||||||
def clusterCoordinationProtocolSenderListener = Mock(ClusterCoordinationProtocolSenderListener)
|
|
||||||
def eventReporter = Mock EventReporter
|
|
||||||
def stateManager = Mock StateManager
|
|
||||||
def stateMap = new MockStateMap([:], 1)
|
|
||||||
stateManager.getState(_ as Scope) >> stateMap
|
|
||||||
def stateManagerProvider = Mock StateManagerProvider
|
|
||||||
stateManagerProvider.getStateManager(_ as String) >> stateManager
|
|
||||||
|
|
||||||
and: 'a NodeClusterCoordinator that manages node status in a synchronized list'
|
|
||||||
List<NodeConnectionStatus> nodeStatuses = [].asSynchronized()
|
|
||||||
def clusterCoordinator = new NodeClusterCoordinator(clusterCoordinationProtocolSenderListener, eventReporter, Mock(LeaderElectionManager),
|
|
||||||
Mock(FlowElection), Mock(ClusterNodeFirewall),
|
|
||||||
Mock(RevisionManager), NiFiProperties.createBasicNiFiProperties('src/test/resources/conf/nifi.properties', [:]),
|
|
||||||
Mock(NodeProtocolSender), stateManagerProvider) {
|
|
||||||
@Override
|
|
||||||
void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean notifyAllNodes, boolean waitForCoordinator) {
|
|
||||||
nodeStatuses.add(updatedStatus)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
and: 'two nodes'
|
|
||||||
def nodeIdentifier1 = createNodeIdentifier 1
|
|
||||||
def nodeIdentifier2 = createNodeIdentifier 2
|
|
||||||
|
|
||||||
and: 'node 1 is connected, node 2 is disconnected'
|
|
||||||
clusterCoordinator.updateNodeStatus new NodeConnectionStatus(nodeIdentifier1, NodeConnectionState.CONNECTED)
|
|
||||||
clusterCoordinator.updateNodeStatus new NodeConnectionStatus(nodeIdentifier2, NodeConnectionState.DISCONNECTED)
|
|
||||||
while (nodeStatuses.size() < 2) {
|
|
||||||
Thread.sleep(10)
|
|
||||||
}
|
|
||||||
nodeStatuses.clear()
|
|
||||||
|
|
||||||
def waitForReportEvent = new BlockingVariable(5, TimeUnit.SECONDS)
|
|
||||||
|
|
||||||
when: 'a node is requested to offload'
|
|
||||||
clusterCoordinator.requestNodeOffload nodeIdentifier2, OffloadCode.OFFLOADED, 'unit test for offloading node'
|
|
||||||
waitForReportEvent.get()
|
|
||||||
|
|
||||||
then: 'no exceptions are thrown'
|
|
||||||
noExceptionThrown()
|
|
||||||
|
|
||||||
and: 'expected methods on collaborators are invoked'
|
|
||||||
1 * clusterCoordinationProtocolSenderListener.offload({ OffloadMessage msg -> msg.nodeId == nodeIdentifier2 } as OffloadMessage)
|
|
||||||
1 * eventReporter.reportEvent(Severity.INFO, 'Clustering', { msg -> msg.contains "$nodeIdentifier2.apiAddress:$nodeIdentifier2.apiPort" } as String) >> {
|
|
||||||
waitForReportEvent.set(it)
|
|
||||||
}
|
|
||||||
|
|
||||||
and: 'the status of the offloaded node is known by the cluster coordinator to be offloading'
|
|
||||||
nodeStatuses[0].nodeIdentifier == nodeIdentifier2
|
|
||||||
nodeStatuses[0].state == NodeConnectionState.OFFLOADING
|
|
||||||
}
|
|
||||||
|
|
||||||
private static NodeIdentifier createNodeIdentifier(final int index) {
|
|
||||||
new NodeIdentifier("node-id-$index", "localhost", 8000 + index, "localhost", 9000 + index,
|
|
||||||
"localhost", 10000 + index, 11000 + index, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,50 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.nifi.cluster.integration
|
|
||||||
|
|
||||||
import org.apache.nifi.cluster.coordination.node.DisconnectionCode
|
|
||||||
import org.apache.nifi.cluster.coordination.node.OffloadCode
|
|
||||||
import spock.lang.Specification
|
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit
|
|
||||||
|
|
||||||
class OffloadNodeITSpec extends Specification {
|
|
||||||
def "requestNodeOffload"() {
|
|
||||||
given: 'a cluster with 3 nodes'
|
|
||||||
System.setProperty 'nifi.properties.file.path', 'src/test/resources/conf/nifi.properties'
|
|
||||||
def cluster = new Cluster()
|
|
||||||
cluster.start()
|
|
||||||
cluster.createNode()
|
|
||||||
def nodeToOffload = cluster.createNode()
|
|
||||||
cluster.createNode()
|
|
||||||
cluster.waitUntilAllNodesConnected 20, TimeUnit.SECONDS
|
|
||||||
|
|
||||||
when: 'the node to offload is disconnected successfully'
|
|
||||||
cluster.currentClusterCoordinator.clusterCoordinator.requestNodeDisconnect nodeToOffload.identifier, DisconnectionCode.USER_DISCONNECTED,
|
|
||||||
'integration test user disconnect'
|
|
||||||
cluster.currentClusterCoordinator.assertNodeDisconnects nodeToOffload.identifier, 10, TimeUnit.SECONDS
|
|
||||||
|
|
||||||
and: 'the node to offload is requested to offload'
|
|
||||||
nodeToOffload.getClusterCoordinator().requestNodeOffload nodeToOffload.identifier, OffloadCode.OFFLOADED, 'integration test offload'
|
|
||||||
|
|
||||||
then: 'the node has been successfully offloaded'
|
|
||||||
cluster.currentClusterCoordinator.assertNodeIsOffloaded nodeToOffload.identifier, 10, TimeUnit.SECONDS
|
|
||||||
|
|
||||||
cleanup:
|
|
||||||
cluster.stop()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,172 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.nifi.cluster.integration;
|
|
||||||
|
|
||||||
import org.apache.curator.RetryPolicy;
|
|
||||||
import org.apache.curator.framework.CuratorFramework;
|
|
||||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
|
||||||
import org.apache.curator.retry.RetryNTimes;
|
|
||||||
import org.apache.curator.test.TestingServer;
|
|
||||||
import org.apache.nifi.cluster.coordination.flow.FlowElection;
|
|
||||||
import org.apache.nifi.cluster.coordination.flow.PopularVoteFlowElection;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.ClusterRoles;
|
|
||||||
import org.apache.nifi.controller.status.history.StatusHistoryRepository;
|
|
||||||
import org.apache.nifi.controller.status.history.VolatileComponentStatusRepository;
|
|
||||||
import org.apache.nifi.nar.ExtensionDiscoveringManager;
|
|
||||||
import org.apache.nifi.nar.StandardExtensionDiscoveringManager;
|
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
public class Cluster {
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(Cluster.class);
|
|
||||||
|
|
||||||
private final Set<Node> nodes = new HashSet<>();
|
|
||||||
private final TestingServer zookeeperServer;
|
|
||||||
|
|
||||||
private final long flowElectionTimeoutMillis;
|
|
||||||
private final Integer flowElectionMaxNodes;
|
|
||||||
|
|
||||||
public Cluster() throws IOException {
|
|
||||||
this(3, TimeUnit.SECONDS, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Cluster(final long flowElectionTimeout, final TimeUnit flowElectionTimeUnit, final Integer flowElectionMaxNodes) throws IOException {
|
|
||||||
try {
|
|
||||||
zookeeperServer = new TestingServer();
|
|
||||||
} catch (final Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.flowElectionTimeoutMillis = flowElectionTimeUnit.toMillis(flowElectionTimeout);
|
|
||||||
this.flowElectionMaxNodes = flowElectionMaxNodes;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public void start() {
|
|
||||||
try {
|
|
||||||
zookeeperServer.start();
|
|
||||||
} catch (final RuntimeException e) {
|
|
||||||
throw e;
|
|
||||||
} catch (final Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (getZooKeeperConnectString() == null) {
|
|
||||||
try {
|
|
||||||
Thread.sleep(100L);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info("Start ZooKeeper Server on Port {}, with temporary directory {}", zookeeperServer.getPort(), zookeeperServer.getTempDirectory());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void stop() {
|
|
||||||
for (final Node node : nodes) {
|
|
||||||
try {
|
|
||||||
if (node.isRunning()) {
|
|
||||||
node.stop();
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.error("Failed to shut down " + node, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
zookeeperServer.stop();
|
|
||||||
zookeeperServer.close();
|
|
||||||
} catch (final Exception e) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public String getZooKeeperConnectString() {
|
|
||||||
return zookeeperServer.getConnectString();
|
|
||||||
}
|
|
||||||
|
|
||||||
public Set<Node> getNodes() {
|
|
||||||
return Collections.unmodifiableSet(nodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
public CuratorFramework createCuratorClient() {
|
|
||||||
final RetryPolicy retryPolicy = new RetryNTimes(20, 500);
|
|
||||||
final CuratorFramework curatorClient = CuratorFrameworkFactory.builder()
|
|
||||||
.connectString(getZooKeeperConnectString())
|
|
||||||
.sessionTimeoutMs(3000)
|
|
||||||
.connectionTimeoutMs(3000)
|
|
||||||
.retryPolicy(retryPolicy)
|
|
||||||
.defaultData(new byte[0])
|
|
||||||
.build();
|
|
||||||
|
|
||||||
curatorClient.start();
|
|
||||||
return curatorClient;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Node createNode() {
|
|
||||||
final Map<String, String> addProps = new HashMap<>();
|
|
||||||
addProps.put(NiFiProperties.ZOOKEEPER_CONNECT_STRING, getZooKeeperConnectString());
|
|
||||||
addProps.put(NiFiProperties.CLUSTER_IS_NODE, "true");
|
|
||||||
|
|
||||||
final NiFiProperties nifiProperties = NiFiProperties.createBasicNiFiProperties("src/test/resources/conf/nifi.properties", addProps);
|
|
||||||
|
|
||||||
final ExtensionDiscoveringManager extensionManager = new StandardExtensionDiscoveringManager();
|
|
||||||
final FlowElection flowElection = new PopularVoteFlowElection(flowElectionTimeoutMillis, TimeUnit.MILLISECONDS, flowElectionMaxNodes);
|
|
||||||
final StatusHistoryRepository statusHistoryRepository = new VolatileComponentStatusRepository(nifiProperties);
|
|
||||||
final Node node = new Node(nifiProperties, extensionManager, flowElection, statusHistoryRepository);
|
|
||||||
node.start();
|
|
||||||
nodes.add(node);
|
|
||||||
|
|
||||||
return node;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Node getCurrentClusterCoordinator() {
|
|
||||||
return getNodes().stream().filter(node -> node.hasRole(ClusterRoles.CLUSTER_COORDINATOR)).findFirst().orElse(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Node waitForClusterCoordinator(final long time, final TimeUnit timeUnit) {
|
|
||||||
return ClusterUtils.waitUntilNonNull(time, timeUnit,
|
|
||||||
() -> getNodes().stream().filter(node -> node.hasRole(ClusterRoles.CLUSTER_COORDINATOR)).findFirst().orElse(null));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Node waitForPrimaryNode(final long time, final TimeUnit timeUnit) {
|
|
||||||
return ClusterUtils.waitUntilNonNull(time, timeUnit,
|
|
||||||
() -> getNodes().stream().filter(node -> node.hasRole(ClusterRoles.PRIMARY_NODE)).findFirst().orElse(null));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Waits for each node in the cluster to connect. The time given is the maximum amount of time to wait for each node to connect, not for
|
|
||||||
* the entire cluster to connect.
|
|
||||||
*
|
|
||||||
* @param time the max amount of time to wait for a node to connect
|
|
||||||
* @param timeUnit the unit of time that the given <code>time</code> value represents
|
|
||||||
*/
|
|
||||||
public void waitUntilAllNodesConnected(final long time, final TimeUnit timeUnit) {
|
|
||||||
for (final Node node : nodes) {
|
|
||||||
node.waitUntilConnected(time, timeUnit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,248 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.nifi.cluster.integration;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import org.apache.nifi.cluster.coordination.node.ClusterRoles;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.DisconnectionCode;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.NodeConnectionState;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus;
|
|
||||||
import org.apache.nifi.cluster.protocol.NodeIdentifier;
|
|
||||||
import org.junit.jupiter.api.AfterEach;
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.Timeout;
|
|
||||||
|
|
||||||
public class ClusterConnectionIT {
|
|
||||||
private Cluster cluster;
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void setup() {
|
|
||||||
System.setProperty("nifi.properties.file.path", "src/test/resources/conf/nifi.properties");
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
public void createCluster() throws IOException {
|
|
||||||
cluster = new Cluster();
|
|
||||||
cluster.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterEach
|
|
||||||
public void destroyCluster() {
|
|
||||||
if (cluster != null) {
|
|
||||||
cluster.stop();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 20)
|
|
||||||
public void testSingleNode() {
|
|
||||||
final Node firstNode = cluster.createNode();
|
|
||||||
firstNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
firstNode.waitUntilElectedForRole(ClusterRoles.CLUSTER_COORDINATOR, 10, TimeUnit.SECONDS);
|
|
||||||
firstNode.waitUntilElectedForRole(ClusterRoles.PRIMARY_NODE, 10, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 60)
|
|
||||||
public void testThreeNodeCluster() {
|
|
||||||
cluster.createNode();
|
|
||||||
cluster.createNode();
|
|
||||||
cluster.createNode();
|
|
||||||
|
|
||||||
cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
final Node clusterCoordinator = cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS);
|
|
||||||
final Node primaryNode = cluster.waitForPrimaryNode(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("\n\n");
|
|
||||||
System.out.println("Cluster Coordinator = " + clusterCoordinator);
|
|
||||||
System.out.println("Primary Node = " + primaryNode);
|
|
||||||
System.out.println("\n\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 60)
|
|
||||||
public void testNewCoordinatorElected() throws IOException {
|
|
||||||
final Node firstNode = cluster.createNode();
|
|
||||||
final Node secondNode = cluster.createNode();
|
|
||||||
|
|
||||||
cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
final Node clusterCoordinator = cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS);
|
|
||||||
clusterCoordinator.stop();
|
|
||||||
|
|
||||||
final Node otherNode = firstNode == clusterCoordinator ? secondNode : firstNode;
|
|
||||||
otherNode.waitUntilElectedForRole(ClusterRoles.CLUSTER_COORDINATOR, 10, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 60)
|
|
||||||
public void testReconnectGetsCorrectClusterTopology() throws IOException {
|
|
||||||
final Node firstNode = cluster.createNode();
|
|
||||||
final Node secondNode = cluster.createNode();
|
|
||||||
final Node thirdNode = cluster.createNode();
|
|
||||||
|
|
||||||
cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
// shutdown node
|
|
||||||
secondNode.stop();
|
|
||||||
|
|
||||||
System.out.println("\n\nNode 2 Shut Down\n\n");
|
|
||||||
|
|
||||||
// wait for node 1 and 3 to recognize that node 2 is gone
|
|
||||||
Stream.of(firstNode, thirdNode).forEach(node -> {
|
|
||||||
node.assertNodeDisconnects(secondNode.getIdentifier(), 10, TimeUnit.SECONDS);
|
|
||||||
});
|
|
||||||
|
|
||||||
// restart node
|
|
||||||
secondNode.start();
|
|
||||||
System.out.println("\n\nNode 2 Restarted\n\n");
|
|
||||||
|
|
||||||
secondNode.waitUntilConnected(20, TimeUnit.SECONDS);
|
|
||||||
System.out.println("\n\nNode 2 Reconnected\n\n");
|
|
||||||
|
|
||||||
// wait for all 3 nodes to agree that node 2 is connected
|
|
||||||
Stream.of(firstNode, secondNode, thirdNode).forEach(node -> {
|
|
||||||
ClusterUtils.waitUntilConditionMet(5, TimeUnit.SECONDS,
|
|
||||||
() -> firstNode.getClusterCoordinator().getConnectionStatus(secondNode.getIdentifier()).getState() == NodeConnectionState.CONNECTED);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Ensure that all 3 nodes see a cluster of 3 connected nodes.
|
|
||||||
Stream.of(firstNode, secondNode, thirdNode).forEach(node -> {
|
|
||||||
node.assertNodeIsConnected(firstNode.getIdentifier());
|
|
||||||
node.assertNodeIsConnected(secondNode.getIdentifier());
|
|
||||||
node.assertNodeIsConnected(thirdNode.getIdentifier());
|
|
||||||
});
|
|
||||||
|
|
||||||
// Ensure that we get both a cluster coordinator and a primary node elected
|
|
||||||
cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS);
|
|
||||||
cluster.waitForPrimaryNode(10, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 60)
|
|
||||||
public void testRestartAllNodes() throws IOException {
|
|
||||||
final Node firstNode = cluster.createNode();
|
|
||||||
final Node secondNode = cluster.createNode();
|
|
||||||
final Node thirdNode = cluster.createNode();
|
|
||||||
|
|
||||||
firstNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("**** Node 1 Connected ****");
|
|
||||||
secondNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("**** Node 2 Connected ****");
|
|
||||||
thirdNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("**** Node 3 Connected ****");
|
|
||||||
|
|
||||||
// shutdown node
|
|
||||||
firstNode.stop();
|
|
||||||
secondNode.stop();
|
|
||||||
thirdNode.stop();
|
|
||||||
|
|
||||||
System.out.println("\n\nRestarting all nodes\n\n");
|
|
||||||
thirdNode.start();
|
|
||||||
firstNode.start();
|
|
||||||
secondNode.start();
|
|
||||||
|
|
||||||
|
|
||||||
firstNode.waitUntilConnected(20, TimeUnit.SECONDS);
|
|
||||||
System.out.println("\n\n\n**** Node 1 Re-Connected ****\n\n\n");
|
|
||||||
secondNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("**** Node 2 Re-Connected ****");
|
|
||||||
thirdNode.waitUntilConnected(10, TimeUnit.SECONDS);
|
|
||||||
System.out.println("**** Node 3 Re-Connected ****");
|
|
||||||
|
|
||||||
// wait for all 3 nodes to agree that node 2 is connected
|
|
||||||
Stream.of(firstNode, secondNode, thirdNode).forEach(node -> {
|
|
||||||
ClusterUtils.waitUntilConditionMet(5, TimeUnit.SECONDS,
|
|
||||||
() -> firstNode.getClusterCoordinator().getConnectionStatus(secondNode.getIdentifier()).getState() == NodeConnectionState.CONNECTED);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Ensure that all 3 nodes see a cluster of 3 connected nodes.
|
|
||||||
Stream.of(firstNode, secondNode, thirdNode).forEach(node -> {
|
|
||||||
node.assertNodeConnects(firstNode.getIdentifier(), 10, TimeUnit.SECONDS);
|
|
||||||
node.assertNodeConnects(secondNode.getIdentifier(), 10, TimeUnit.SECONDS);
|
|
||||||
node.assertNodeConnects(thirdNode.getIdentifier(), 10, TimeUnit.SECONDS);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Ensure that we get both a cluster coordinator and a primary node elected
|
|
||||||
cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS);
|
|
||||||
cluster.waitForPrimaryNode(10, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 30)
|
|
||||||
public void testHeartbeatsMonitored() {
|
|
||||||
final Node firstNode = cluster.createNode();
|
|
||||||
final Node secondNode = cluster.createNode();
|
|
||||||
|
|
||||||
cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
final Node nodeToSuspend = firstNode;
|
|
||||||
final Node otherNode = secondNode;
|
|
||||||
|
|
||||||
nodeToSuspend.suspendHeartbeating();
|
|
||||||
|
|
||||||
// Heartbeat interval in nifi.properties is set to 1 sec. This means that the node should be kicked out
|
|
||||||
// due to lack of heartbeat after 8 times this amount of time, or 8 seconds.
|
|
||||||
otherNode.assertNodeDisconnects(nodeToSuspend.getIdentifier(), 12, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
nodeToSuspend.resumeHeartbeating();
|
|
||||||
otherNode.assertNodeConnects(nodeToSuspend.getIdentifier(), 10, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Timeout(value = 60)
|
|
||||||
public void testNodeInheritsClusterTopologyOnHeartbeat() throws InterruptedException {
|
|
||||||
final Node node1 = cluster.createNode();
|
|
||||||
final Node node2 = cluster.createNode();
|
|
||||||
final Node node3 = cluster.createNode();
|
|
||||||
|
|
||||||
cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS);
|
|
||||||
final Node coordinator = cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
final NodeIdentifier node4NotReallyInCluster = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9283, "localhost", 9284, "localhost", 9286, "localhost", 9285, null, false, null);
|
|
||||||
|
|
||||||
final Map<NodeIdentifier, NodeConnectionStatus> replacementStatuses = new HashMap<>();
|
|
||||||
replacementStatuses.put(node1.getIdentifier(), new NodeConnectionStatus(node1.getIdentifier(), DisconnectionCode.USER_DISCONNECTED));
|
|
||||||
replacementStatuses.put(node4NotReallyInCluster, new NodeConnectionStatus(node4NotReallyInCluster, NodeConnectionState.CONNECTING));
|
|
||||||
|
|
||||||
// reset coordinator status so that other nodes with get its now-fake view of the cluster
|
|
||||||
coordinator.getClusterCoordinator().resetNodeStatuses(replacementStatuses);
|
|
||||||
final List<NodeConnectionStatus> expectedStatuses = coordinator.getClusterCoordinator().getConnectionStatuses();
|
|
||||||
|
|
||||||
// give nodes a bit to heartbeat in. We need to wait long enough that each node heartbeats.
|
|
||||||
// But we need to not wait more than 8 seconds because that's when nodes start getting kicked out.
|
|
||||||
Thread.sleep(6000L);
|
|
||||||
|
|
||||||
for (final Node node : new Node[] {node1, node2, node3}) {
|
|
||||||
assertEquals(expectedStatuses, node.getClusterCoordinator().getConnectionStatuses());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,60 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.nifi.cluster.integration;
|
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.function.BooleanSupplier;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
public class ClusterUtils {
|
|
||||||
|
|
||||||
public static void waitUntilConditionMet(final long time, final TimeUnit timeUnit, final BooleanSupplier test) {
|
|
||||||
waitUntilConditionMet(time, timeUnit, test, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void waitUntilConditionMet(final long time, final TimeUnit timeUnit, final BooleanSupplier test, final Supplier<String> errorMessageSupplier) {
|
|
||||||
final long nanosToWait = timeUnit.toNanos(time);
|
|
||||||
final long start = System.nanoTime();
|
|
||||||
final long maxTime = start + nanosToWait;
|
|
||||||
|
|
||||||
while (!test.getAsBoolean()) {
|
|
||||||
if (System.nanoTime() > maxTime) {
|
|
||||||
if (errorMessageSupplier == null) {
|
|
||||||
throw new AssertionError("Condition never occurred after waiting " + time + " " + timeUnit);
|
|
||||||
} else {
|
|
||||||
throw new AssertionError("Condition never occurred after waiting " + time + " " + timeUnit + " : " + errorMessageSupplier.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T> T waitUntilNonNull(final long time, final TimeUnit timeUnit, final Supplier<T> test) {
|
|
||||||
final long nanosToWait = timeUnit.toNanos(time);
|
|
||||||
final long start = System.nanoTime();
|
|
||||||
final long maxTime = start + nanosToWait;
|
|
||||||
|
|
||||||
T returnVal;
|
|
||||||
while ((returnVal = test.get()) == null) {
|
|
||||||
if (System.nanoTime() > maxTime) {
|
|
||||||
throw new AssertionError("Condition never occurred after waiting " + time + " " + timeUnit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return returnVal;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,401 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.nifi.cluster.integration;
|
|
||||||
|
|
||||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
|
||||||
import org.apache.nifi.authorization.Authorizer;
|
|
||||||
import org.apache.nifi.bundle.Bundle;
|
|
||||||
import org.apache.nifi.cluster.ReportedEvent;
|
|
||||||
import org.apache.nifi.cluster.coordination.flow.FlowElection;
|
|
||||||
import org.apache.nifi.cluster.coordination.heartbeat.ClusterProtocolHeartbeatMonitor;
|
|
||||||
import org.apache.nifi.cluster.coordination.heartbeat.HeartbeatMonitor;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.LeaderElectionNodeProtocolSender;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.NodeClusterCoordinator;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.NodeConnectionState;
|
|
||||||
import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus;
|
|
||||||
import org.apache.nifi.cluster.protocol.ClusterCoordinationProtocolSender;
|
|
||||||
import org.apache.nifi.cluster.protocol.NodeIdentifier;
|
|
||||||
import org.apache.nifi.cluster.protocol.NodeProtocolSender;
|
|
||||||
import org.apache.nifi.cluster.protocol.ProtocolContext;
|
|
||||||
import org.apache.nifi.cluster.protocol.ProtocolListener;
|
|
||||||
import org.apache.nifi.cluster.protocol.impl.ClusterCoordinationProtocolSenderListener;
|
|
||||||
import org.apache.nifi.cluster.protocol.impl.NodeProtocolSenderListener;
|
|
||||||
import org.apache.nifi.cluster.protocol.impl.SocketProtocolListener;
|
|
||||||
import org.apache.nifi.cluster.protocol.impl.StandardClusterCoordinationProtocolSender;
|
|
||||||
import org.apache.nifi.cluster.protocol.jaxb.JaxbProtocolContext;
|
|
||||||
import org.apache.nifi.cluster.protocol.jaxb.message.JaxbProtocolUtils;
|
|
||||||
import org.apache.nifi.cluster.protocol.message.ProtocolMessage;
|
|
||||||
import org.apache.nifi.components.state.Scope;
|
|
||||||
import org.apache.nifi.controller.FlowController;
|
|
||||||
import org.apache.nifi.controller.StandardFlowService;
|
|
||||||
import org.apache.nifi.controller.leader.election.CuratorLeaderElectionManager;
|
|
||||||
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
|
||||||
import org.apache.nifi.controller.repository.FlowFileEventRepository;
|
|
||||||
import org.apache.nifi.controller.status.history.StatusHistoryRepository;
|
|
||||||
import org.apache.nifi.encrypt.PropertyEncryptorFactory;
|
|
||||||
import org.apache.nifi.engine.FlowEngine;
|
|
||||||
import org.apache.nifi.events.EventReporter;
|
|
||||||
import org.apache.nifi.io.socket.ServerSocketConfiguration;
|
|
||||||
import org.apache.nifi.io.socket.SocketConfiguration;
|
|
||||||
import org.apache.nifi.nar.ExtensionDiscoveringManager;
|
|
||||||
import org.apache.nifi.nar.ExtensionManager;
|
|
||||||
import org.apache.nifi.nar.SystemBundle;
|
|
||||||
import org.apache.nifi.registry.VariableRegistry;
|
|
||||||
import org.apache.nifi.reporting.BulletinRepository;
|
|
||||||
import org.apache.nifi.reporting.Severity;
|
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
|
||||||
import org.apache.nifi.web.revision.RevisionManager;
|
|
||||||
import org.apache.nifi.web.revision.RevisionSnapshot;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.UncheckedIOException;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.ServerSocket;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
|
|
||||||
public class Node {
|
|
||||||
private final NodeIdentifier nodeId;
|
|
||||||
private final NiFiProperties nodeProperties;
|
|
||||||
private final ExtensionManager extensionManager;
|
|
||||||
|
|
||||||
private final List<ReportedEvent> reportedEvents = Collections.synchronizedList(new ArrayList<ReportedEvent>());
|
|
||||||
private final RevisionManager revisionManager;
|
|
||||||
private final FlowElection flowElection;
|
|
||||||
private final StatusHistoryRepository statusHistoryRepository;
|
|
||||||
|
|
||||||
private NodeClusterCoordinator clusterCoordinator;
|
|
||||||
private NodeProtocolSender protocolSender;
|
|
||||||
private FlowController flowController;
|
|
||||||
private StandardFlowService flowService;
|
|
||||||
private LeaderElectionManager electionManager;
|
|
||||||
|
|
||||||
private ProtocolListener protocolListener;
|
|
||||||
|
|
||||||
private volatile boolean running = false;
|
|
||||||
|
|
||||||
private ScheduledExecutorService executor = new FlowEngine(8, "Node tasks", true);
|
|
||||||
|
|
||||||
|
|
||||||
public Node(final NiFiProperties properties, final ExtensionDiscoveringManager extensionManager, final FlowElection flowElection,
|
|
||||||
final StatusHistoryRepository statusHistoryRepository) {
|
|
||||||
this(createNodeId(), properties, extensionManager, flowElection, statusHistoryRepository);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Node(final NodeIdentifier nodeId, final NiFiProperties properties, final ExtensionDiscoveringManager extensionManager,
|
|
||||||
final FlowElection flowElection, final StatusHistoryRepository statusHistoryRepository) {
|
|
||||||
this.nodeId = nodeId;
|
|
||||||
this.nodeProperties = new NiFiProperties() {
|
|
||||||
@Override
|
|
||||||
public String getProperty(String key) {
|
|
||||||
if(key.equals(NiFiProperties.CLUSTER_NODE_PROTOCOL_PORT)){
|
|
||||||
return String.valueOf(nodeId.getSocketPort());
|
|
||||||
}else if(key.equals(NiFiProperties.WEB_HTTP_PORT)){
|
|
||||||
return String.valueOf(nodeId.getApiPort());
|
|
||||||
}else if(key.equals(NiFiProperties.LOAD_BALANCE_PORT)){
|
|
||||||
return String.valueOf(nodeId.getLoadBalancePort());
|
|
||||||
}else {
|
|
||||||
return properties.getProperty(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Set<String> getPropertyKeys() {
|
|
||||||
final Set<String> keys = new HashSet<>(properties.getPropertyKeys());
|
|
||||||
keys.add(NiFiProperties.CLUSTER_NODE_PROTOCOL_PORT);
|
|
||||||
keys.add(NiFiProperties.WEB_HTTP_PORT);
|
|
||||||
return keys;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
final Bundle systemBundle = SystemBundle.create(properties);
|
|
||||||
extensionManager.discoverExtensions(systemBundle, Collections.emptySet());
|
|
||||||
this.extensionManager = extensionManager;
|
|
||||||
|
|
||||||
revisionManager = Mockito.mock(RevisionManager.class);
|
|
||||||
RevisionSnapshot revisionSnapshot = new RevisionSnapshot(Collections.emptyList(), 0L);
|
|
||||||
Mockito.when(revisionManager.getAllRevisions()).thenReturn(revisionSnapshot);
|
|
||||||
|
|
||||||
electionManager = new CuratorLeaderElectionManager(4, nodeProperties);
|
|
||||||
this.flowElection = flowElection;
|
|
||||||
this.statusHistoryRepository = statusHistoryRepository;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private static NodeIdentifier createNodeId() {
|
|
||||||
return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", createPort(), "localhost", createPort(), "localhost", createPort(), "localhost", null, null, false, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized void start() {
|
|
||||||
running = true;
|
|
||||||
|
|
||||||
protocolSender = createNodeProtocolSender();
|
|
||||||
clusterCoordinator = createClusterCoordinator();
|
|
||||||
clusterCoordinator.setLocalNodeIdentifier(nodeId);
|
|
||||||
// clusterCoordinator.setConnected(true);
|
|
||||||
|
|
||||||
final HeartbeatMonitor heartbeatMonitor = createHeartbeatMonitor();
|
|
||||||
flowController = FlowController.createClusteredInstance(Mockito.mock(FlowFileEventRepository.class), nodeProperties,
|
|
||||||
null, null, PropertyEncryptorFactory.getPropertyEncryptor(nodeProperties), protocolSender, Mockito.mock(BulletinRepository.class), clusterCoordinator,
|
|
||||||
heartbeatMonitor, electionManager, VariableRegistry.EMPTY_REGISTRY, extensionManager, revisionManager, statusHistoryRepository);
|
|
||||||
|
|
||||||
try {
|
|
||||||
flowController.initializeFlow();
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
final NodeProtocolSenderListener senderListener = new NodeProtocolSenderListener(protocolSender, protocolListener);
|
|
||||||
try {
|
|
||||||
flowController.getStateManagerProvider().getStateManager("Cluster Node Configuration").setState(Collections.singletonMap("Node UUID", nodeId.getId()), Scope.LOCAL);
|
|
||||||
|
|
||||||
flowService = StandardFlowService.createClusteredInstance(flowController, nodeProperties, senderListener, clusterCoordinator,
|
|
||||||
revisionManager, Mockito.mock(Authorizer.class));
|
|
||||||
|
|
||||||
flowService.start();
|
|
||||||
|
|
||||||
flowService.load(null);
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void stop() throws IOException {
|
|
||||||
running = false;
|
|
||||||
|
|
||||||
flowController.shutdown(true);
|
|
||||||
flowService.stop(true);
|
|
||||||
|
|
||||||
clusterCoordinator.shutdown();
|
|
||||||
executor.shutdownNow();
|
|
||||||
|
|
||||||
// protocol listener is closed by flow controller
|
|
||||||
}
|
|
||||||
|
|
||||||
public void suspendHeartbeating() {
|
|
||||||
flowController.suspendHeartbeats();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void resumeHeartbeating() {
|
|
||||||
flowController.resumeHeartbeats();
|
|
||||||
}
|
|
||||||
|
|
||||||
public NodeIdentifier getIdentifier() {
|
|
||||||
return nodeId;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return new HashCodeBuilder().append(nodeId).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
if (obj == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (obj == this) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (!(obj instanceof Node)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return getIdentifier().equals(((Node) obj).getIdentifier());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "Node[id=" + getIdentifier() + ", started=" + isRunning() + "]";
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isRunning() {
|
|
||||||
return running;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static int createPort() {
|
|
||||||
// get an unused port
|
|
||||||
while (true) {
|
|
||||||
try (ServerSocket ss = new ServerSocket(0)) {
|
|
||||||
return ss.getLocalPort();
|
|
||||||
} catch (final IOException ioe) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public NodeConnectionStatus getConnectionStatus() {
|
|
||||||
return clusterCoordinator.getConnectionStatus(nodeId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private NodeProtocolSender createNodeProtocolSender() {
|
|
||||||
final SocketConfiguration socketConfig = new SocketConfiguration();
|
|
||||||
socketConfig.setSocketTimeout(3000);
|
|
||||||
socketConfig.setReuseAddress(true);
|
|
||||||
|
|
||||||
final ProtocolContext<ProtocolMessage> protocolContext = new JaxbProtocolContext<>(JaxbProtocolUtils.JAXB_CONTEXT);
|
|
||||||
final NodeProtocolSender protocolSender = new LeaderElectionNodeProtocolSender(socketConfig, protocolContext, electionManager);
|
|
||||||
return protocolSender;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private ClusterCoordinationProtocolSender createCoordinatorProtocolSender() {
|
|
||||||
final SocketConfiguration socketConfig = new SocketConfiguration();
|
|
||||||
socketConfig.setSocketTimeout(3000);
|
|
||||||
socketConfig.setReuseAddress(true);
|
|
||||||
|
|
||||||
final ProtocolContext<ProtocolMessage> protocolContext = new JaxbProtocolContext<>(JaxbProtocolUtils.JAXB_CONTEXT);
|
|
||||||
return new StandardClusterCoordinationProtocolSender(socketConfig, protocolContext, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
private HeartbeatMonitor createHeartbeatMonitor() {
|
|
||||||
return new ClusterProtocolHeartbeatMonitor(clusterCoordinator, protocolListener, nodeProperties);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private NodeClusterCoordinator createClusterCoordinator() {
|
|
||||||
final EventReporter eventReporter = new EventReporter() {
|
|
||||||
@Override
|
|
||||||
public void reportEvent(Severity severity, String category, String message) {
|
|
||||||
reportedEvents.add(new ReportedEvent(nodeId, severity, message));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
final ServerSocketConfiguration serverSocketConfiguration = new ServerSocketConfiguration();
|
|
||||||
serverSocketConfiguration.setSocketTimeout(5000);
|
|
||||||
final ProtocolContext<ProtocolMessage> protocolContext = new JaxbProtocolContext<>(JaxbProtocolUtils.JAXB_CONTEXT);
|
|
||||||
|
|
||||||
protocolListener = new SocketProtocolListener(3, Integer.parseInt(nodeProperties.getProperty(NiFiProperties.CLUSTER_NODE_PROTOCOL_PORT)), serverSocketConfiguration, protocolContext);
|
|
||||||
try {
|
|
||||||
protocolListener.start();
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
final ClusterCoordinationProtocolSenderListener protocolSenderListener = new ClusterCoordinationProtocolSenderListener(createCoordinatorProtocolSender(), protocolListener);
|
|
||||||
try {
|
|
||||||
return new NodeClusterCoordinator(protocolSenderListener, eventReporter, electionManager, flowElection, null,
|
|
||||||
revisionManager, nodeProperties, extensionManager, protocolSender);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new UncheckedIOException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public NodeClusterCoordinator getClusterCoordinator() {
|
|
||||||
return clusterCoordinator;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// Methods for checking conditions
|
|
||||||
//
|
|
||||||
public boolean isConnected() {
|
|
||||||
final NodeConnectionStatus status = getConnectionStatus();
|
|
||||||
if (status == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return status.getState() == NodeConnectionState.CONNECTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Methods to wait for conditions
|
|
||||||
//
|
|
||||||
public void waitUntilConnected(final long time, final TimeUnit timeUnit) {
|
|
||||||
ClusterUtils.waitUntilConditionMet(time, timeUnit, () -> isConnected());
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getClusterAddress() {
|
|
||||||
final InetSocketAddress address = nodeProperties.getClusterNodeProtocolAddress();
|
|
||||||
return address.getHostName() + ":" + address.getPort();
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean hasRole(final String roleName) {
|
|
||||||
final String leaderAddress = electionManager.getLeader(roleName);
|
|
||||||
if (leaderAddress == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return leaderAddress.equals(getClusterAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void waitUntilElectedForRole(final String roleName, final long time, final TimeUnit timeUnit) {
|
|
||||||
ClusterUtils.waitUntilConditionMet(time, timeUnit, () -> hasRole(roleName));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assertions
|
|
||||||
/**
|
|
||||||
* Assert that the node with the given ID connects (According to this node!) within the given amount of time
|
|
||||||
*
|
|
||||||
* @param nodeId id of the node
|
|
||||||
* @param time how long to wait
|
|
||||||
* @param timeUnit unit of time provided by the 'time' argument
|
|
||||||
*/
|
|
||||||
public void assertNodeConnects(final NodeIdentifier nodeId, final long time, final TimeUnit timeUnit) {
|
|
||||||
ClusterUtils.waitUntilConditionMet(time, timeUnit,
|
|
||||||
() -> getClusterCoordinator().getConnectionStatus(nodeId).getState() == NodeConnectionState.CONNECTED,
|
|
||||||
() -> "Connection Status is " + getClusterCoordinator().getConnectionStatus(nodeId).toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Assert that the node with the given ID disconnects (According to this node!) within the given amount of time
|
|
||||||
*
|
|
||||||
* @param nodeId id of the node
|
|
||||||
* @param time how long to wait
|
|
||||||
* @param timeUnit unit of time provided by the 'time' argument
|
|
||||||
*/
|
|
||||||
public void assertNodeDisconnects(final NodeIdentifier nodeId, final long time, final TimeUnit timeUnit) {
|
|
||||||
ClusterUtils.waitUntilConditionMet(time, timeUnit,
|
|
||||||
() -> getClusterCoordinator().getConnectionStatus(nodeId).getState() == NodeConnectionState.DISCONNECTED,
|
|
||||||
() -> "Connection Status is " + getClusterCoordinator().getConnectionStatus(nodeId).toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Asserts that the node with the given ID is currently connected (According to this node!)
|
|
||||||
*
|
|
||||||
* @param nodeId id of the node
|
|
||||||
*/
|
|
||||||
public void assertNodeIsConnected(final NodeIdentifier nodeId) {
|
|
||||||
assertEquals(NodeConnectionState.CONNECTED, getClusterCoordinator().getConnectionStatus(nodeId).getState());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Assert that the node with the given ID is offloaded (according to this node!) within the given amount of time
|
|
||||||
*
|
|
||||||
* @param nodeId id of the node
|
|
||||||
* @param time how long to wait
|
|
||||||
* @param timeUnit unit of time provided by the 'time' argument
|
|
||||||
*/
|
|
||||||
public void assertNodeIsOffloaded(final NodeIdentifier nodeId, final long time, final TimeUnit timeUnit) {
|
|
||||||
ClusterUtils.waitUntilConditionMet(time, timeUnit,
|
|
||||||
() -> getClusterCoordinator().getConnectionStatus(nodeId).getState() == NodeConnectionState.OFFLOADED,
|
|
||||||
() -> "Connection Status is " + getClusterCoordinator().getConnectionStatus(nodeId).toString());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,116 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.nifi.cluster.integration;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import org.apache.nifi.components.PropertyDescriptor;
|
|
||||||
import org.apache.nifi.components.ValidationContext;
|
|
||||||
import org.apache.nifi.components.ValidationResult;
|
|
||||||
import org.apache.nifi.components.state.Scope;
|
|
||||||
import org.apache.nifi.components.state.StateMap;
|
|
||||||
import org.apache.nifi.components.state.StateProvider;
|
|
||||||
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
|
||||||
import org.apache.nifi.controller.state.StandardStateMap;
|
|
||||||
|
|
||||||
public class NopStateProvider implements StateProvider {
|
|
||||||
private final String id = UUID.randomUUID().toString();
|
|
||||||
private final Map<String, Map<String, String>> componentStateMap = new HashMap<>();
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<ValidationResult> validate(ValidationContext context) {
|
|
||||||
return Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public PropertyDescriptor getPropertyDescriptor(String name) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onPropertyModified(PropertyDescriptor descriptor, String oldValue, String newValue) {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<PropertyDescriptor> getPropertyDescriptors() {
|
|
||||||
return Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getIdentifier() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void initialize(StateProviderInitializationContext context) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void shutdown() {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void setState(Map<String, String> state, String componentId) throws IOException {
|
|
||||||
final Map<String, String> stateMap = componentStateMap.computeIfAbsent(componentId, compId -> new HashMap<String, String>());
|
|
||||||
stateMap.clear();
|
|
||||||
stateMap.putAll(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized StateMap getState(String componentId) throws IOException {
|
|
||||||
return new StandardStateMap(componentStateMap.computeIfAbsent(componentId, compId -> new HashMap<String, String>()), 0L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean replace(StateMap oldValue, Map<String, String> newValue, String componentId) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void clear(String componentId) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onComponentRemoved(String componentId) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void enable() {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void disable() {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isEnabled() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Scope[] getSupportedScopes() {
|
|
||||||
return new Scope[] {Scope.LOCAL};
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -93,6 +93,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
@ -116,9 +117,11 @@ import java.util.stream.Collectors;
|
||||||
* <p/>
|
* <p/>
|
||||||
*/
|
*/
|
||||||
public class StandardProcessSession implements ProcessSession, ProvenanceEventEnricher {
|
public class StandardProcessSession implements ProcessSession, ProvenanceEventEnricher {
|
||||||
|
private static final long VERSION_INCREMENT = 1;
|
||||||
|
private static final String INITIAL_VERSION = String.valueOf(VERSION_INCREMENT);
|
||||||
private static final AtomicLong idGenerator = new AtomicLong(0L);
|
private static final AtomicLong idGenerator = new AtomicLong(0L);
|
||||||
private static final AtomicLong enqueuedIndex = new AtomicLong(0L);
|
private static final AtomicLong enqueuedIndex = new AtomicLong(0L);
|
||||||
private static final StateMap EMPTY_STATE_MAP = new StandardStateMap(Collections.emptyMap(), -1L);
|
private static final StateMap EMPTY_STATE_MAP = new StandardStateMap(Collections.emptyMap(), Optional.empty());
|
||||||
|
|
||||||
// determines how many things must be transferred, removed, modified in order to avoid logging the FlowFile ID's on commit/rollback
|
// determines how many things must be transferred, removed, modified in order to avoid logging the FlowFile ID's on commit/rollback
|
||||||
public static final int VERBOSE_LOG_THRESHOLD = 10;
|
public static final int VERBOSE_LOG_THRESHOLD = 10;
|
||||||
|
@ -685,7 +688,8 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
final StateManager stateManager = context.getStateManager();
|
final StateManager stateManager = context.getStateManager();
|
||||||
if (checkpoint.localState != null) {
|
if (checkpoint.localState != null) {
|
||||||
final StateMap stateMap = stateManager.getState(Scope.LOCAL);
|
final StateMap stateMap = stateManager.getState(Scope.LOCAL);
|
||||||
if (stateMap.getVersion() < checkpoint.localState.getVersion()) {
|
final Optional<String> stateVersion = stateMap.getStateVersion();
|
||||||
|
if (!stateVersion.equals(checkpoint.localState.getStateVersion())) {
|
||||||
LOG.debug("Updating State Manager's Local State");
|
LOG.debug("Updating State Manager's Local State");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -695,14 +699,15 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Will not update State Manager's Local State because the State Manager reports the latest version as {}, which is newer than the session's known version of {}.",
|
LOG.debug("Will not update State Manager's Local State because the State Manager reports the latest version as {}, which is newer than the session's known version of {}.",
|
||||||
stateMap.getVersion(), checkpoint.localState.getVersion());
|
stateVersion, checkpoint.localState.getStateVersion());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cluster state
|
// Update cluster state
|
||||||
if (checkpoint.clusterState != null) {
|
if (checkpoint.clusterState != null) {
|
||||||
final StateMap stateMap = stateManager.getState(Scope.CLUSTER);
|
final StateMap stateMap = stateManager.getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() < checkpoint.clusterState.getVersion()) {
|
final Optional<String> stateVersion = stateMap.getStateVersion();
|
||||||
|
if (!stateVersion.equals(checkpoint.clusterState.getStateVersion())) {
|
||||||
LOG.debug("Updating State Manager's Cluster State");
|
LOG.debug("Updating State Manager's Cluster State");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -712,7 +717,7 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Will not update State Manager's Cluster State because the State Manager reports the latest version as {}, which is newer than the session's known version of {}.",
|
LOG.debug("Will not update State Manager's Cluster State because the State Manager reports the latest version as {}, which is newer than the session's known version of {}.",
|
||||||
stateMap.getVersion(), checkpoint.clusterState.getVersion());
|
stateVersion, checkpoint.clusterState.getStateVersion());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3816,8 +3821,9 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setState(final Map<String, String> state, final Scope scope) throws IOException {
|
public void setState(final Map<String, String> state, final Scope scope) throws IOException {
|
||||||
final long currentVersion = getState(scope).getVersion();
|
final Optional<String> currentVersion = getState(scope).getStateVersion();
|
||||||
final StateMap stateMap = new StandardStateMap(state, currentVersion + 1);
|
final String version = currentVersion.map(this::getIncrementedVersion).orElse(INITIAL_VERSION);
|
||||||
|
final StateMap stateMap = new StandardStateMap(state, Optional.of(version));
|
||||||
setState(stateMap, scope);
|
setState(stateMap, scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3858,8 +3864,8 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
@Override
|
@Override
|
||||||
public boolean replaceState(final StateMap oldValue, final Map<String, String> newValue, final Scope scope) throws IOException {
|
public boolean replaceState(final StateMap oldValue, final Map<String, String> newValue, final Scope scope) throws IOException {
|
||||||
final StateMap current = getState(scope);
|
final StateMap current = getState(scope);
|
||||||
if (current.getVersion() == -1 && (oldValue == null || oldValue.getVersion() == -1)) {
|
if (!current.getStateVersion().isPresent() && (oldValue == null || !oldValue.getStateVersion().isPresent())) {
|
||||||
final StateMap stateMap = new StandardStateMap(newValue, 1L);
|
final StateMap stateMap = new StandardStateMap(newValue, Optional.of(INITIAL_VERSION));
|
||||||
setState(stateMap, scope);
|
setState(stateMap, scope);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -3868,8 +3874,9 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current.getVersion() == oldValue.getVersion() && current.toMap().equals(oldValue.toMap())) {
|
if (current.getStateVersion().equals(oldValue.getStateVersion()) && current.toMap().equals(oldValue.toMap())) {
|
||||||
final StateMap stateMap = new StandardStateMap(newValue, current.getVersion() + 1);
|
final String version = current.getStateVersion().map(this::getIncrementedVersion).orElse(INITIAL_VERSION);
|
||||||
|
final StateMap stateMap = new StandardStateMap(newValue, Optional.of(version));
|
||||||
setState(stateMap, scope);
|
setState(stateMap, scope);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -3887,6 +3894,12 @@ public class StandardProcessSession implements ProcessSession, ProvenanceEventEn
|
||||||
return "StandardProcessSession[id=" + sessionId + "]";
|
return "StandardProcessSession[id=" + sessionId + "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getIncrementedVersion(final String currentVersion) {
|
||||||
|
final long versionNumber = Long.parseLong(currentVersion);
|
||||||
|
final long version = versionNumber + VERSION_INCREMENT;
|
||||||
|
return String.valueOf(version);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Callback interface used to poll a FlowFileQueue, in order to perform
|
* Callback interface used to poll a FlowFileQueue, in order to perform
|
||||||
* functional programming-type of polling a queue
|
* functional programming-type of polling a queue
|
||||||
|
|
|
@ -19,21 +19,30 @@ package org.apache.nifi.controller.state;
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.nifi.components.state.StateMap;
|
import org.apache.nifi.components.state.StateMap;
|
||||||
|
|
||||||
public class StandardStateMap implements StateMap {
|
public class StandardStateMap implements StateMap {
|
||||||
private final Map<String, String> stateValues;
|
private static final int EMPTY_VERSION = -1;
|
||||||
private final long version;
|
|
||||||
|
|
||||||
public StandardStateMap(final Map<String, String> stateValues, final long version) {
|
private final Map<String, String> stateValues;
|
||||||
this.stateValues = Collections.unmodifiableMap(stateValues == null ? Collections.<String, String> emptyMap() : stateValues);
|
|
||||||
this.version = version;
|
private final Optional<String> stateVersion;
|
||||||
|
|
||||||
|
public StandardStateMap(final Map<String, String> stateValues, final Optional<String> stateVersion) {
|
||||||
|
this.stateValues = Collections.unmodifiableMap(stateValues == null ? Collections.emptyMap() : stateValues);
|
||||||
|
this.stateVersion = stateVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getVersion() {
|
public long getVersion() {
|
||||||
return version;
|
return stateVersion.map(version -> version.hashCode()).orElse(EMPTY_VERSION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Optional<String> getStateVersion() {
|
||||||
|
return stateVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -48,6 +57,6 @@ public class StandardStateMap implements StateMap {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "StandardStateMap[version=" + version + ", values=" + stateValues + "]";
|
return "StandardStateMap[version=" + stateVersion + ", values=" + stateValues + "]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -283,7 +283,7 @@ public class StandardDataValve implements DataValve {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stateMap.getVersion() < 0) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
logger.debug("No state to recover for {}", this);
|
logger.debug("No state to recover for {}", this);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,21 @@
|
||||||
<artifactId>nifi-framework-components</artifactId>
|
<artifactId>nifi-framework-components</artifactId>
|
||||||
<version>2.0.0-SNAPSHOT</version>
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-leader-election-shared</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-zookeeper-leader-election</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-cluster-zookeeper</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.nifi</groupId>
|
<groupId>org.apache.nifi</groupId>
|
||||||
<artifactId>nifi-expression-language</artifactId>
|
<artifactId>nifi-expression-language</artifactId>
|
||||||
|
|
|
@ -732,10 +732,10 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
// flow that is different from the rest of the cluster (especially an empty flow) and then
|
// flow that is different from the rest of the cluster (especially an empty flow) and then
|
||||||
// kicking everyone out. This way, we instead inherit the cluster flow before we attempt to be
|
// kicking everyone out. This way, we instead inherit the cluster flow before we attempt to be
|
||||||
// the coordinator.
|
// the coordinator.
|
||||||
LOG.info("Checking if there is already a Cluster Coordinator Elected...");
|
LOG.info("Checking for elected Cluster Coordinator...");
|
||||||
final String clusterCoordinatorAddress = leaderElectionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
final Optional<String> clusterCoordinatorLeader = leaderElectionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
if (StringUtils.isEmpty(clusterCoordinatorAddress)) {
|
if (!clusterCoordinatorLeader.isPresent()) {
|
||||||
LOG.info("It appears that no Cluster Coordinator has been Elected yet. Registering for Cluster Coordinator Role.");
|
LOG.info("No Cluster Coordinator elected: Registering for Cluster Coordinator election");
|
||||||
registerForClusterCoordinator(true);
|
registerForClusterCoordinator(true);
|
||||||
} else {
|
} else {
|
||||||
// At this point, we have determined that there is a Cluster Coordinator elected. It is important to note, though,
|
// At this point, we have determined that there is a Cluster Coordinator elected. It is important to note, though,
|
||||||
|
@ -746,8 +746,8 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
// to that address has not started. ZooKeeper/Curator will recognize this after a while and delete the ZNode. As a result,
|
// to that address has not started. ZooKeeper/Curator will recognize this after a while and delete the ZNode. As a result,
|
||||||
// we may later determine that there is in fact no Cluster Coordinator. If this happens, we will automatically register for
|
// we may later determine that there is in fact no Cluster Coordinator. If this happens, we will automatically register for
|
||||||
// Cluster Coordinator through the StandardFlowService.
|
// Cluster Coordinator through the StandardFlowService.
|
||||||
LOG.info("The Election for Cluster Coordinator has already begun (Leader is {}). Will not register to be elected for this role until after connecting "
|
LOG.info("Cluster Coordinator [{}] elected: Not registering for election until after connecting "
|
||||||
+ "to the cluster and inheriting the cluster's flow.", clusterCoordinatorAddress);
|
+ "to the cluster and inheriting the flow", clusterCoordinatorLeader.get());
|
||||||
registerForClusterCoordinator(false);
|
registerForClusterCoordinator(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2377,9 +2377,10 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
|
|
||||||
leaderElectionManager.register(ClusterRoles.CLUSTER_COORDINATOR, new LeaderElectionStateChangeListener() {
|
leaderElectionManager.register(ClusterRoles.CLUSTER_COORDINATOR, new LeaderElectionStateChangeListener() {
|
||||||
@Override
|
@Override
|
||||||
public synchronized void onLeaderRelinquish() {
|
public synchronized void onStopLeading() {
|
||||||
LOG.info("This node is no longer the elected Active Cluster Coordinator");
|
LOG.info("This node is no longer the elected Active {}", ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
bulletinRepository.addBulletin(BulletinFactory.createBulletin("Cluster Coordinator", Severity.INFO.name(), participantId + " is no longer the Cluster Coordinator"));
|
final String message = String.format("%s is no longer the elected Active %s", participantId, ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
|
bulletinRepository.addBulletin(BulletinFactory.createBulletin(ClusterRoles.CLUSTER_COORDINATOR, Severity.INFO.name(), message));
|
||||||
|
|
||||||
// We do not want to stop the heartbeat monitor. This is because even though ZooKeeper offers guarantees
|
// We do not want to stop the heartbeat monitor. This is because even though ZooKeeper offers guarantees
|
||||||
// that watchers will see changes on a ZNode in the order they happened, there does not seem to be any
|
// that watchers will see changes on a ZNode in the order they happened, there does not seem to be any
|
||||||
|
@ -2392,9 +2393,10 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void onLeaderElection() {
|
public synchronized void onStartLeading() {
|
||||||
LOG.info("This node elected Active Cluster Coordinator");
|
LOG.info("This node has been elected Active {}", ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
bulletinRepository.addBulletin(BulletinFactory.createBulletin("Cluster Coordinator", Severity.INFO.name(), participantId + " has been elected the Cluster Coordinator"));
|
final String message = String.format("%s has been elected Active %s", participantId, ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
|
bulletinRepository.addBulletin(BulletinFactory.createBulletin(ClusterRoles.CLUSTER_COORDINATOR, Severity.INFO.name(), message ));
|
||||||
|
|
||||||
// Purge any heartbeats that we already have. If we don't do this, we can have a scenario where we receive heartbeats
|
// Purge any heartbeats that we already have. If we don't do this, we can have a scenario where we receive heartbeats
|
||||||
// from a node, and then another node becomes Cluster Coordinator. As a result, we stop receiving heartbeats. Now that
|
// from a node, and then another node becomes Cluster Coordinator. As a result, we stop receiving heartbeats. Now that
|
||||||
|
@ -2411,12 +2413,12 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
|
|
||||||
leaderElectionManager.register(ClusterRoles.PRIMARY_NODE, new LeaderElectionStateChangeListener() {
|
leaderElectionManager.register(ClusterRoles.PRIMARY_NODE, new LeaderElectionStateChangeListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onLeaderElection() {
|
public void onStartLeading() {
|
||||||
setPrimary(true);
|
setPrimary(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onLeaderRelinquish() {
|
public void onStopLeading() {
|
||||||
setPrimary(false);
|
setPrimary(false);
|
||||||
}
|
}
|
||||||
}, participantId);
|
}, participantId);
|
||||||
|
@ -2546,7 +2548,7 @@ public class FlowController implements ReportingTaskProvider, Authorizable, Node
|
||||||
// Emit a bulletin detailing the fact that the primary node state has changed
|
// Emit a bulletin detailing the fact that the primary node state has changed
|
||||||
if (oldBean == null || oldBean.isPrimary() != primary) {
|
if (oldBean == null || oldBean.isPrimary() != primary) {
|
||||||
final String message = primary ? "This node has been elected Primary Node" : "This node is no longer Primary Node";
|
final String message = primary ? "This node has been elected Primary Node" : "This node is no longer Primary Node";
|
||||||
final Bulletin bulletin = BulletinFactory.createBulletin("Primary Node", Severity.INFO.name(), message);
|
final Bulletin bulletin = BulletinFactory.createBulletin(ClusterRoles.PRIMARY_NODE, Severity.INFO.name(), message);
|
||||||
bulletinRepository.addBulletin(bulletin);
|
bulletinRepository.addBulletin(bulletin);
|
||||||
LOG.info(message);
|
LOG.info(message);
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ import java.util.Date;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
@ -62,12 +63,8 @@ public class ClusterProtocolHeartbeater implements Heartbeater {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getHeartbeatAddress() {
|
public String getHeartbeatAddress() {
|
||||||
final String heartbeatAddress = electionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
final Optional<String> clusterCoordinator = electionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR);
|
||||||
if (heartbeatAddress == null) {
|
return clusterCoordinator.orElseThrow(() -> new ProtocolException("Unable to send heartbeat: Cluster Coordinator not found"));
|
||||||
throw new ProtocolException("Cannot send heartbeat because there is no Cluster Coordinator currently elected");
|
|
||||||
}
|
|
||||||
|
|
||||||
return heartbeatAddress;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.nifi.controller.leader.election;
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -46,8 +47,8 @@ public class StandaloneLeaderElectionManager implements LeaderElectionManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getLeader(final String roleName) {
|
public Optional<String> getLeader(final String roleName) {
|
||||||
return null;
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -59,11 +60,6 @@ public class StandaloneLeaderElectionManager implements LeaderElectionManager {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isStopped() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void stop() {
|
public void stop() {
|
||||||
}
|
}
|
||||||
|
@ -92,9 +88,4 @@ public class StandaloneLeaderElectionManager implements LeaderElectionManager {
|
||||||
public long getPollCount() {
|
public long getPollCount() {
|
||||||
return -1L;
|
return -1L;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLeaderElected(String roleName) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,15 @@ import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.nifi.components.state.StateMap;
|
import org.apache.nifi.components.state.StateMap;
|
||||||
import org.wali.SerDe;
|
import org.wali.SerDe;
|
||||||
import org.wali.UpdateType;
|
import org.wali.UpdateType;
|
||||||
|
|
||||||
public class StateMapSerDe implements SerDe<StateMapUpdate> {
|
public class StateMapSerDe implements SerDe<StateMapUpdate> {
|
||||||
|
private static final long EMPTY_VERSION = -1;
|
||||||
|
|
||||||
private static final int VERSION = 0;
|
private static final int VERSION = 0;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -44,7 +47,7 @@ public class StateMapSerDe implements SerDe<StateMapUpdate> {
|
||||||
}
|
}
|
||||||
|
|
||||||
final StateMap stateMap = record.getStateMap();
|
final StateMap stateMap = record.getStateMap();
|
||||||
final long recordVersion = stateMap.getVersion();
|
final long recordVersion = stateMap.getStateVersion().map(Long::parseLong).orElse(EMPTY_VERSION);
|
||||||
out.writeLong(recordVersion);
|
out.writeLong(recordVersion);
|
||||||
|
|
||||||
final Map<String, String> map = stateMap.toMap();
|
final Map<String, String> map = stateMap.toMap();
|
||||||
|
@ -89,7 +92,8 @@ public class StateMapSerDe implements SerDe<StateMapUpdate> {
|
||||||
stateValues.put(key, value);
|
stateValues.put(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new StateMapUpdate(new StandardStateMap(stateValues, recordVersion), componentId, updateType);
|
final String stateVersion = String.valueOf(recordVersion);
|
||||||
|
return new StateMapUpdate(new StandardStateMap(stateValues, Optional.of(stateVersion)), componentId, updateType);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
|
@ -52,6 +53,8 @@ import org.wali.WriteAheadRepository;
|
||||||
* Provides state management for local (standalone) state, backed by a write-ahead log
|
* Provides state management for local (standalone) state, backed by a write-ahead log
|
||||||
*/
|
*/
|
||||||
public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
||||||
|
private static final long EMPTY_VERSION = -1;
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(WriteAheadLocalStateProvider.class);
|
private static final Logger logger = LoggerFactory.getLogger(WriteAheadLocalStateProvider.class);
|
||||||
|
|
||||||
private volatile boolean alwaysSync;
|
private volatile boolean alwaysSync;
|
||||||
|
@ -131,14 +134,15 @@ public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
||||||
writeAheadLog = new MinimalLockingWriteAheadLog<>(basePath.toPath(), numPartitions, serde, null);
|
writeAheadLog = new MinimalLockingWriteAheadLog<>(basePath.toPath(), numPartitions, serde, null);
|
||||||
|
|
||||||
final Collection<StateMapUpdate> updates = writeAheadLog.recoverRecords();
|
final Collection<StateMapUpdate> updates = writeAheadLog.recoverRecords();
|
||||||
long maxRecordVersion = -1L;
|
long maxRecordVersion = EMPTY_VERSION;
|
||||||
|
|
||||||
for (final StateMapUpdate update : updates) {
|
for (final StateMapUpdate update : updates) {
|
||||||
if (update.getUpdateType() == UpdateType.DELETE) {
|
if (update.getUpdateType() == UpdateType.DELETE) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
final long recordVersion = update.getStateMap().getVersion();
|
final Optional<String> stateVersion = update.getStateMap().getStateVersion();
|
||||||
|
final long recordVersion = stateVersion.map(Long::parseLong).orElse(EMPTY_VERSION);
|
||||||
if (recordVersion > maxRecordVersion) {
|
if (recordVersion > maxRecordVersion) {
|
||||||
maxRecordVersion = recordVersion;
|
maxRecordVersion = recordVersion;
|
||||||
}
|
}
|
||||||
|
@ -180,7 +184,7 @@ public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
||||||
private ComponentProvider getProvider(final String componentId) {
|
private ComponentProvider getProvider(final String componentId) {
|
||||||
ComponentProvider componentProvider = componentProviders.get(componentId);
|
ComponentProvider componentProvider = componentProviders.get(componentId);
|
||||||
if (componentProvider == null) {
|
if (componentProvider == null) {
|
||||||
final StateMap stateMap = new StandardStateMap(Collections.<String, String> emptyMap(), -1L);
|
final StateMap stateMap = new StandardStateMap(Collections.emptyMap(), Optional.empty());
|
||||||
componentProvider = new ComponentProvider(writeAheadLog, versionGenerator, componentId, stateMap, alwaysSync);
|
componentProvider = new ComponentProvider(writeAheadLog, versionGenerator, componentId, stateMap, alwaysSync);
|
||||||
|
|
||||||
final ComponentProvider existingComponentProvider = componentProviders.putIfAbsent(componentId, componentProvider);
|
final ComponentProvider existingComponentProvider = componentProviders.putIfAbsent(componentId, componentProvider);
|
||||||
|
@ -248,14 +252,14 @@ public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
||||||
// repository at a time for a record with the same key. I.e., many threads can update the repository at once, as long as they
|
// repository at a time for a record with the same key. I.e., many threads can update the repository at once, as long as they
|
||||||
// are not updating the repository with records that have the same identifier.
|
// are not updating the repository with records that have the same identifier.
|
||||||
public synchronized void setState(final Map<String, String> state) throws IOException {
|
public synchronized void setState(final Map<String, String> state) throws IOException {
|
||||||
stateMap = new StandardStateMap(state, versionGenerator.incrementAndGet());
|
stateMap = new StandardStateMap(state, Optional.of(getIncrementedVersion()));
|
||||||
final StateMapUpdate updateRecord = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
final StateMapUpdate updateRecord = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
||||||
wal.update(Collections.singleton(updateRecord), alwaysSync);
|
wal.update(Collections.singleton(updateRecord), alwaysSync);
|
||||||
}
|
}
|
||||||
|
|
||||||
// see above explanation as to why this method is synchronized.
|
// see above explanation as to why this method is synchronized.
|
||||||
public synchronized boolean replace(final StateMap oldValue, final Map<String, String> newValue) throws IOException {
|
public synchronized boolean replace(final StateMap oldValue, final Map<String, String> newValue) throws IOException {
|
||||||
if (stateMap.getVersion() == -1L) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
// state has never been set so return false
|
// state has never been set so return false
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -264,17 +268,21 @@ public class WriteAheadLocalStateProvider extends AbstractStateProvider {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
stateMap = new StandardStateMap(new HashMap<>(newValue), versionGenerator.incrementAndGet());
|
stateMap = new StandardStateMap(new HashMap<>(newValue), Optional.of(getIncrementedVersion()));
|
||||||
final StateMapUpdate updateRecord = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
final StateMapUpdate updateRecord = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
||||||
wal.update(Collections.singleton(updateRecord), alwaysSync);
|
wal.update(Collections.singleton(updateRecord), alwaysSync);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void clear() throws IOException {
|
public synchronized void clear() throws IOException {
|
||||||
stateMap = new StandardStateMap(null, versionGenerator.incrementAndGet());
|
stateMap = new StandardStateMap(null, Optional.of(getIncrementedVersion()));
|
||||||
final StateMapUpdate update = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
final StateMapUpdate update = new StateMapUpdate(stateMap, componentId, UpdateType.UPDATE);
|
||||||
wal.update(Collections.singleton(update), alwaysSync);
|
wal.update(Collections.singleton(update), alwaysSync);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getIncrementedVersion() {
|
||||||
|
return String.valueOf(versionGenerator.incrementAndGet());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class CheckpointTask implements Runnable {
|
private class CheckpointTask implements Runnable {
|
||||||
|
|
|
@ -28,8 +28,8 @@ import org.apache.nifi.components.state.Scope;
|
||||||
import org.apache.nifi.components.state.StateMap;
|
import org.apache.nifi.components.state.StateMap;
|
||||||
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
||||||
import org.apache.nifi.components.state.exception.StateTooLargeException;
|
import org.apache.nifi.components.state.exception.StateTooLargeException;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.controller.cluster.SecureClientZooKeeperFactory;
|
import org.apache.nifi.framework.cluster.zookeeper.SecureClientZooKeeperFactory;
|
||||||
import org.apache.nifi.controller.state.StandardStateMap;
|
import org.apache.nifi.controller.state.StandardStateMap;
|
||||||
import org.apache.nifi.controller.state.providers.AbstractStateProvider;
|
import org.apache.nifi.controller.state.providers.AbstractStateProvider;
|
||||||
import org.apache.nifi.processor.util.StandardValidators;
|
import org.apache.nifi.processor.util.StandardValidators;
|
||||||
|
@ -61,6 +61,7 @@ import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
@ -72,6 +73,8 @@ import java.util.stream.Collectors;
|
||||||
* consistency across configuration interactions.
|
* consistency across configuration interactions.
|
||||||
*/
|
*/
|
||||||
public class ZooKeeperStateProvider extends AbstractStateProvider {
|
public class ZooKeeperStateProvider extends AbstractStateProvider {
|
||||||
|
private static final int EMPTY_VERSION = -1;
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(ZooKeeperStateProvider.class);
|
private static final Logger logger = LoggerFactory.getLogger(ZooKeeperStateProvider.class);
|
||||||
private NiFiProperties nifiProperties;
|
private NiFiProperties nifiProperties;
|
||||||
|
|
||||||
|
@ -344,7 +347,8 @@ public class ZooKeeperStateProvider extends AbstractStateProvider {
|
||||||
stateValues.put(key, value);
|
stateValues.put(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new StandardStateMap(stateValues, recordVersion);
|
final String stateVersion = String.valueOf(recordVersion);
|
||||||
|
return new StandardStateMap(stateValues, Optional.of(stateVersion));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,7 +474,7 @@ public class ZooKeeperStateProvider extends AbstractStateProvider {
|
||||||
} catch (final KeeperException ke) {
|
} catch (final KeeperException ke) {
|
||||||
final Code exceptionCode = ke.code();
|
final Code exceptionCode = ke.code();
|
||||||
if (Code.NONODE == exceptionCode) {
|
if (Code.NONODE == exceptionCode) {
|
||||||
return new StandardStateMap(null, -1L);
|
return new StandardStateMap(null, Optional.empty());
|
||||||
}
|
}
|
||||||
if (Code.SESSIONEXPIRED == exceptionCode) {
|
if (Code.SESSIONEXPIRED == exceptionCode) {
|
||||||
invalidateClient();
|
invalidateClient();
|
||||||
|
@ -488,8 +492,9 @@ public class ZooKeeperStateProvider extends AbstractStateProvider {
|
||||||
public boolean replace(final StateMap oldValue, final Map<String, String> newValue, final String componentId) throws IOException {
|
public boolean replace(final StateMap oldValue, final Map<String, String> newValue, final String componentId) throws IOException {
|
||||||
verifyEnabled();
|
verifyEnabled();
|
||||||
|
|
||||||
|
final int version = oldValue.getStateVersion().map(Integer::parseInt).orElse(EMPTY_VERSION);
|
||||||
try {
|
try {
|
||||||
setState(newValue, (int) oldValue.getVersion(), componentId, false);
|
setState(newValue, version, componentId, false);
|
||||||
return true;
|
return true;
|
||||||
} catch (final NoNodeException nne) {
|
} catch (final NoNodeException nne) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
package org.apache.nifi.controller.state.server;
|
package org.apache.nifi.controller.state.server;
|
||||||
|
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.apache.zookeeper.common.X509Util;
|
import org.apache.zookeeper.common.X509Util;
|
||||||
import org.apache.zookeeper.server.DatadirCleanupManager;
|
import org.apache.zookeeper.server.DatadirCleanupManager;
|
||||||
|
|
|
@ -14,24 +14,32 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.nifi.spring;
|
package org.apache.nifi.spring;
|
||||||
|
|
||||||
import org.apache.nifi.controller.leader.election.CuratorLeaderElectionManager;
|
|
||||||
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
||||||
import org.apache.nifi.controller.leader.election.StandaloneLeaderElectionManager;
|
import org.apache.nifi.controller.leader.election.StandaloneLeaderElectionManager;
|
||||||
|
import org.apache.nifi.nar.ExtensionDefinition;
|
||||||
|
import org.apache.nifi.nar.ExtensionManager;
|
||||||
|
import org.apache.nifi.nar.NarThreadContextClassLoader;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.springframework.beans.factory.FactoryBean;
|
import org.springframework.beans.factory.FactoryBean;
|
||||||
|
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static org.apache.nifi.util.NiFiProperties.CLUSTER_LEADER_ELECTION_IMPLEMENTATION;
|
||||||
|
import static org.apache.nifi.util.NiFiProperties.DEFAULT_CLUSTER_LEADER_ELECTION_IMPLEMENTATION;
|
||||||
|
|
||||||
public class LeaderElectionManagerFactoryBean implements FactoryBean<LeaderElectionManager> {
|
public class LeaderElectionManagerFactoryBean implements FactoryBean<LeaderElectionManager> {
|
||||||
private int numThreads;
|
private ExtensionManager extensionManager;
|
||||||
|
|
||||||
private NiFiProperties properties;
|
private NiFiProperties properties;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public LeaderElectionManager getObject() throws Exception {
|
public LeaderElectionManager getObject() throws Exception {
|
||||||
final boolean isNode = properties.isNode();
|
final boolean isNode = properties.isNode();
|
||||||
if (isNode) {
|
if (isNode) {
|
||||||
return new CuratorLeaderElectionManager(numThreads, properties);
|
return loadClusterLeaderElectionManager();
|
||||||
} else {
|
} else {
|
||||||
return new StandaloneLeaderElectionManager();
|
return new StandaloneLeaderElectionManager();
|
||||||
}
|
}
|
||||||
|
@ -47,11 +55,29 @@ public class LeaderElectionManagerFactoryBean implements FactoryBean<LeaderElect
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setNumThreads(final int numThreads) {
|
public void setExtensionManager(final ExtensionManager extensionManager) {
|
||||||
this.numThreads = numThreads;
|
this.extensionManager = extensionManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setProperties(final NiFiProperties properties) {
|
public void setProperties(final NiFiProperties properties) {
|
||||||
this.properties = properties;
|
this.properties = properties;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private LeaderElectionManager loadClusterLeaderElectionManager() throws ClassNotFoundException, InstantiationException, IllegalAccessException {
|
||||||
|
final String leaderElectionImplementation = properties.getProperty(CLUSTER_LEADER_ELECTION_IMPLEMENTATION, DEFAULT_CLUSTER_LEADER_ELECTION_IMPLEMENTATION);
|
||||||
|
final Set<ExtensionDefinition> extensions = extensionManager.getExtensions(LeaderElectionManager.class);
|
||||||
|
final Optional<ExtensionDefinition> extensionFound = extensions.stream()
|
||||||
|
.filter(extensionDefinition -> {
|
||||||
|
final String extensionClassName = extensionDefinition.getImplementationClassName();
|
||||||
|
return extensionClassName.equals(leaderElectionImplementation) || extensionClassName.endsWith(leaderElectionImplementation);
|
||||||
|
})
|
||||||
|
.findFirst();
|
||||||
|
final ExtensionDefinition extension = extensionFound.orElseThrow(() -> {
|
||||||
|
final String message = String.format("No Extensions Found for %s", LeaderElectionManager.class.getName());
|
||||||
|
return new IllegalStateException(message);
|
||||||
|
});
|
||||||
|
final String extensionImplementationClass = extension.getImplementationClassName();
|
||||||
|
|
||||||
|
return NarThreadContextClassLoader.createInstance(extensionManager, extensionImplementationClass, LeaderElectionManager.class, properties);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.nifi.cluster;
|
package org.apache.nifi.cluster;
|
||||||
|
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,8 @@ import java.util.Map;
|
||||||
|
|
||||||
import org.apache.curator.framework.api.ACLProvider;
|
import org.apache.curator.framework.api.ACLProvider;
|
||||||
import org.apache.curator.framework.imps.DefaultACLProvider;
|
import org.apache.curator.framework.imps.DefaultACLProvider;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.leader.zookeeper.CuratorACLProviderFactory;
|
||||||
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.apache.zookeeper.data.ACL;
|
import org.apache.zookeeper.data.ACL;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
|
|
@ -2495,7 +2495,7 @@ public class StandardProcessSessionIT {
|
||||||
public void testStateRetrievedHasVersion() throws IOException {
|
public void testStateRetrievedHasVersion() throws IOException {
|
||||||
StateMap retrieved = session.getState(Scope.LOCAL);
|
StateMap retrieved = session.getState(Scope.LOCAL);
|
||||||
assertNotNull(retrieved);
|
assertNotNull(retrieved);
|
||||||
assertEquals(-1, retrieved.getVersion());
|
assertFalse(retrieved.getStateVersion().isPresent());
|
||||||
assertEquals(1, stateManager.getRetrievalCount(Scope.LOCAL));
|
assertEquals(1, stateManager.getRetrievalCount(Scope.LOCAL));
|
||||||
assertEquals(0, stateManager.getRetrievalCount(Scope.CLUSTER));
|
assertEquals(0, stateManager.getRetrievalCount(Scope.CLUSTER));
|
||||||
|
|
||||||
|
@ -2505,13 +2505,13 @@ public class StandardProcessSessionIT {
|
||||||
|
|
||||||
retrieved = session.getState(Scope.LOCAL);
|
retrieved = session.getState(Scope.LOCAL);
|
||||||
assertNotNull(retrieved);
|
assertNotNull(retrieved);
|
||||||
assertEquals(0, retrieved.getVersion());
|
assertTrue(retrieved.getStateVersion().isPresent());
|
||||||
assertEquals(Collections.singletonMap("abc", "123"), retrieved.toMap());
|
assertEquals(Collections.singletonMap("abc", "123"), retrieved.toMap());
|
||||||
|
|
||||||
session.setState(Collections.singletonMap("abc", "222"), Scope.LOCAL);
|
session.setState(Collections.singletonMap("abc", "222"), Scope.LOCAL);
|
||||||
retrieved = session.getState(Scope.LOCAL);
|
retrieved = session.getState(Scope.LOCAL);
|
||||||
assertNotNull(retrieved);
|
assertNotNull(retrieved);
|
||||||
assertEquals(1, retrieved.getVersion());
|
assertTrue(retrieved.getStateVersion().isPresent());
|
||||||
|
|
||||||
session.commit();
|
session.commit();
|
||||||
stateManager.assertStateEquals("abc", "222", Scope.LOCAL);
|
stateManager.assertStateEquals("abc", "222", Scope.LOCAL);
|
||||||
|
@ -2519,7 +2519,7 @@ public class StandardProcessSessionIT {
|
||||||
|
|
||||||
retrieved = session.getState(Scope.LOCAL);
|
retrieved = session.getState(Scope.LOCAL);
|
||||||
assertNotNull(retrieved);
|
assertNotNull(retrieved);
|
||||||
assertEquals(1, retrieved.getVersion());
|
assertTrue(retrieved.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -69,6 +69,7 @@ import java.util.Collections;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
@ -86,7 +87,7 @@ public class TestStandardControllerServiceProvider {
|
||||||
@Override
|
@Override
|
||||||
public StateManager getStateManager(final String componentId) {
|
public StateManager getStateManager(final String componentId) {
|
||||||
final StateManager stateManager = Mockito.mock(StateManager.class);
|
final StateManager stateManager = Mockito.mock(StateManager.class);
|
||||||
final StateMap emptyStateMap = new StandardStateMap(Collections.emptyMap(), -1);
|
final StateMap emptyStateMap = new StandardStateMap(Collections.emptyMap(), Optional.empty());
|
||||||
try {
|
try {
|
||||||
Mockito.when(stateManager.getState(any(Scope.class))).thenReturn(emptyStateMap);
|
Mockito.when(stateManager.getState(any(Scope.class))).thenReturn(emptyStateMap);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -28,9 +28,11 @@ import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
|
||||||
public class TestStateMapSerDe {
|
public class TestStateMapSerDe {
|
||||||
|
|
||||||
|
@ -42,7 +44,9 @@ public class TestStateMapSerDe {
|
||||||
final Map<String, String> stateValues = new HashMap<>();
|
final Map<String, String> stateValues = new HashMap<>();
|
||||||
stateValues.put("abc", "xyz");
|
stateValues.put("abc", "xyz");
|
||||||
stateValues.put("cba", "zyx");
|
stateValues.put("cba", "zyx");
|
||||||
final StateMap stateMap = new StandardStateMap(stateValues, 3L);
|
|
||||||
|
String version = "3";
|
||||||
|
final StateMap stateMap = new StandardStateMap(stateValues, Optional.of(version));
|
||||||
final StateMapUpdate record = new StateMapUpdate(stateMap, componentId, UpdateType.CREATE);
|
final StateMapUpdate record = new StateMapUpdate(stateMap, componentId, UpdateType.CREATE);
|
||||||
|
|
||||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||||
|
@ -61,7 +65,9 @@ public class TestStateMapSerDe {
|
||||||
assertEquals(UpdateType.CREATE, update.getUpdateType());
|
assertEquals(UpdateType.CREATE, update.getUpdateType());
|
||||||
final StateMap recoveredStateMap = update.getStateMap();
|
final StateMap recoveredStateMap = update.getStateMap();
|
||||||
|
|
||||||
assertEquals(3L, recoveredStateMap.getVersion());
|
final Optional<String> stateVersion = recoveredStateMap.getStateVersion();
|
||||||
|
assertTrue(stateVersion.isPresent());
|
||||||
|
assertEquals(version, stateVersion.get());
|
||||||
assertEquals(stateValues, recoveredStateMap.toMap());
|
assertEquals(stateValues, recoveredStateMap.toMap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
|
|
||||||
StateMap map = provider.getState(componentId);
|
StateMap map = provider.getState(componentId);
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertEquals(-1, map.getVersion());
|
assertFalse(map.getStateVersion().isPresent());
|
||||||
|
|
||||||
assertNotNull(map.toMap());
|
assertNotNull(map.toMap());
|
||||||
assertTrue(map.toMap().isEmpty());
|
assertTrue(map.toMap().isEmpty());
|
||||||
|
@ -67,7 +67,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
|
|
||||||
map = provider.getState(componentId);
|
map = provider.getState(componentId);
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertEquals(0, map.getVersion());
|
assertTrue(map.getStateVersion().isPresent());
|
||||||
assertEquals("value1", map.get(key));
|
assertEquals("value1", map.get(key));
|
||||||
assertEquals("value1", map.toMap().get(key));
|
assertEquals("value1", map.toMap().get(key));
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
|
|
||||||
map = provider.getState(componentId);
|
map = provider.getState(componentId);
|
||||||
assertEquals("value2", map.get(key));
|
assertEquals("value2", map.get(key));
|
||||||
assertEquals(1L, map.getVersion());
|
assertTrue(map.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -89,7 +89,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
StateMap stateMap = provider.getState(componentId);
|
StateMap stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals("value1", stateMap.get(key));
|
assertEquals("value1", stateMap.get(key));
|
||||||
assertEquals(0, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
|
|
||||||
provider.setState(Collections.singletonMap(key, "intermediate value"), componentId);
|
provider.setState(Collections.singletonMap(key, "intermediate value"), componentId);
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
assertEquals(key, stateMap.toMap().keySet().iterator().next());
|
assertEquals(key, stateMap.toMap().keySet().iterator().next());
|
||||||
assertEquals(1, stateMap.toMap().size());
|
assertEquals(1, stateMap.toMap().size());
|
||||||
assertEquals("intermediate value", stateMap.get(key));
|
assertEquals("intermediate value", stateMap.get(key));
|
||||||
assertEquals(1, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -116,13 +116,13 @@ public abstract class AbstractTestStateProvider {
|
||||||
assertEquals(1, map.size());
|
assertEquals(1, map.size());
|
||||||
assertEquals("value", map.get(key));
|
assertEquals("value", map.get(key));
|
||||||
|
|
||||||
provider.setState(Collections.<String, String> emptyMap(), componentId);
|
provider.setState(Collections.emptyMap(), componentId);
|
||||||
|
|
||||||
final StateMap stateMap = provider.getState(componentId);
|
final StateMap stateMap = provider.getState(componentId);
|
||||||
map = stateMap.toMap();
|
map = stateMap.toMap();
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertTrue(map.isEmpty());
|
assertTrue(map.isEmpty());
|
||||||
assertEquals(1, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -130,21 +130,21 @@ public abstract class AbstractTestStateProvider {
|
||||||
final StateProvider provider = getProvider();
|
final StateProvider provider = getProvider();
|
||||||
StateMap stateMap = provider.getState(componentId);
|
StateMap stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(-1L, stateMap.getVersion());
|
assertFalse(stateMap.getStateVersion().isPresent());
|
||||||
assertTrue(stateMap.toMap().isEmpty());
|
assertTrue(stateMap.toMap().isEmpty());
|
||||||
|
|
||||||
provider.setState(Collections.singletonMap("testClear", "value"), componentId);
|
provider.setState(Collections.singletonMap("testClear", "value"), componentId);
|
||||||
|
|
||||||
stateMap = provider.getState(componentId);
|
stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(0, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
assertEquals("value", stateMap.get("testClear"));
|
assertEquals("value", stateMap.get("testClear"));
|
||||||
|
|
||||||
provider.clear(componentId);
|
provider.clear(componentId);
|
||||||
|
|
||||||
stateMap = provider.getState(componentId);
|
stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(1L, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
assertTrue(stateMap.toMap().isEmpty());
|
assertTrue(stateMap.toMap().isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ public abstract class AbstractTestStateProvider {
|
||||||
|
|
||||||
provider.setState(newValue, componentId);
|
provider.setState(newValue, componentId);
|
||||||
final StateMap stateMap = provider.getState(componentId);
|
final StateMap stateMap = provider.getState(componentId);
|
||||||
assertEquals(0L, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
|
|
||||||
provider.onComponentRemoved(componentId);
|
provider.onComponentRemoved(componentId);
|
||||||
|
|
||||||
|
@ -205,8 +205,8 @@ public abstract class AbstractTestStateProvider {
|
||||||
|
|
||||||
final StateMap stateMapAfterRemoval = provider.getState(componentId);
|
final StateMap stateMapAfterRemoval = provider.getState(componentId);
|
||||||
|
|
||||||
// version should be -1 because the state has been removed entirely.
|
// version should not be present because the state has been removed entirely.
|
||||||
assertEquals(-1L, stateMapAfterRemoval.getVersion());
|
assertFalse(stateMapAfterRemoval.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract StateProvider getProvider();
|
protected abstract StateProvider getProvider();
|
||||||
|
|
|
@ -1,277 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.nifi.controller.state.providers.zookeeper;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.curator.test.InstanceSpec;
|
|
||||||
import org.apache.nifi.attribute.expression.language.StandardPropertyValue;
|
|
||||||
import org.apache.nifi.components.PropertyDescriptor;
|
|
||||||
import org.apache.nifi.components.PropertyValue;
|
|
||||||
import org.apache.nifi.components.state.StateProvider;
|
|
||||||
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
|
||||||
import org.apache.nifi.components.state.exception.StateTooLargeException;
|
|
||||||
import org.apache.nifi.controller.state.providers.AbstractTestStateProvider;
|
|
||||||
import org.apache.nifi.logging.ComponentLog;
|
|
||||||
import org.apache.nifi.mock.MockComponentLogger;
|
|
||||||
import org.apache.nifi.parameter.ParameterLookup;
|
|
||||||
import org.apache.nifi.security.util.TemporaryKeyStoreBuilder;
|
|
||||||
import org.apache.nifi.security.util.TlsConfiguration;
|
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
|
||||||
import org.apache.zookeeper.server.ServerCnxnFactory;
|
|
||||||
import org.apache.zookeeper.server.ZooKeeperServer;
|
|
||||||
import org.junit.jupiter.api.AfterEach;
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.Timeout;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import javax.net.ssl.SSLContext;
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.nio.file.Paths;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.LinkedHashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static org.apache.nifi.leader.election.ITSecureClientZooKeeperFactory.createAndStartServer;
|
|
||||||
import static org.apache.nifi.leader.election.ITSecureClientZooKeeperFactory.createSecureClientProperties;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.fail;
|
|
||||||
|
|
||||||
public class ITZooKeeperStateProvider extends AbstractTestStateProvider {
|
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(ITZooKeeperStateProvider.class);
|
|
||||||
|
|
||||||
private volatile StateProvider provider;
|
|
||||||
private volatile ZooKeeperServer zkServer;
|
|
||||||
private final static Map<PropertyDescriptor, String> stateProviderProperties = new HashMap<>();
|
|
||||||
private static Path tempDir;
|
|
||||||
private static NiFiProperties nifiProperties;
|
|
||||||
|
|
||||||
private static TlsConfiguration tlsConfiguration;
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void setTlsConfiguration() {
|
|
||||||
tlsConfiguration = new TemporaryKeyStoreBuilder().build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
public void setup() throws Exception {
|
|
||||||
tempDir = Paths.get("target/TestZooKeeperStateProvider");
|
|
||||||
final Path dataDir = tempDir.resolve("state");
|
|
||||||
final int clientPort = InstanceSpec.getRandomPort();
|
|
||||||
|
|
||||||
Files.createDirectory(tempDir);
|
|
||||||
|
|
||||||
// Set up the testing server
|
|
||||||
final ServerCnxnFactory serverConnectionFactory = createAndStartServer(
|
|
||||||
dataDir,
|
|
||||||
tempDir,
|
|
||||||
clientPort,
|
|
||||||
Paths.get(tlsConfiguration.getKeystorePath()),
|
|
||||||
tlsConfiguration.getKeystorePassword(),
|
|
||||||
Paths.get(tlsConfiguration.getTruststorePath()),
|
|
||||||
tlsConfiguration.getTruststorePassword()
|
|
||||||
);
|
|
||||||
zkServer = serverConnectionFactory.getZooKeeperServer();
|
|
||||||
|
|
||||||
// Set up state provider (client) TLS properties, normally injected through StateProviderContext annotation
|
|
||||||
nifiProperties = createSecureClientProperties(
|
|
||||||
clientPort,
|
|
||||||
Paths.get(tlsConfiguration.getKeystorePath()),
|
|
||||||
tlsConfiguration.getKeystoreType().getType(),
|
|
||||||
tlsConfiguration.getKeystorePassword(),
|
|
||||||
Paths.get(tlsConfiguration.getTruststorePath()),
|
|
||||||
tlsConfiguration.getTruststoreType().getType(),
|
|
||||||
tlsConfiguration.getTruststorePassword()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Set up state provider properties
|
|
||||||
stateProviderProperties.put(ZooKeeperStateProvider.SESSION_TIMEOUT, "15 secs");
|
|
||||||
stateProviderProperties.put(ZooKeeperStateProvider.ROOT_NODE, "/nifi/team1/testing");
|
|
||||||
stateProviderProperties.put(ZooKeeperStateProvider.ACCESS_CONTROL, ZooKeeperStateProvider.OPEN_TO_WORLD.getValue());
|
|
||||||
final Map<PropertyDescriptor, String> properties = new HashMap<>(stateProviderProperties);
|
|
||||||
properties.put(ZooKeeperStateProvider.CONNECTION_STRING, "localhost:".concat(String.valueOf(clientPort)));
|
|
||||||
this.provider = createProvider(properties);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void initializeProvider(final ZooKeeperStateProvider provider, final Map<PropertyDescriptor, String> properties) throws IOException {
|
|
||||||
provider.setNiFiProperties(nifiProperties);
|
|
||||||
provider.initialize(new StateProviderInitializationContext() {
|
|
||||||
@Override
|
|
||||||
public String getIdentifier() {
|
|
||||||
return "Unit Test Provider Initialization Context";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<PropertyDescriptor, PropertyValue> getProperties() {
|
|
||||||
final Map<PropertyDescriptor, PropertyValue> propValueMap = new HashMap<>();
|
|
||||||
for (final Map.Entry<PropertyDescriptor, String> entry : properties.entrySet()) {
|
|
||||||
propValueMap.put(entry.getKey(), new StandardPropertyValue(entry.getValue(), null, ParameterLookup.EMPTY));
|
|
||||||
}
|
|
||||||
return propValueMap;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String,String> getAllProperties() {
|
|
||||||
final Map<String,String> propValueMap = new LinkedHashMap<>();
|
|
||||||
for (final Map.Entry<PropertyDescriptor, PropertyValue> entry : getProperties().entrySet()) {
|
|
||||||
propValueMap.put(entry.getKey().getName(), entry.getValue().getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_CLIENT_SECURE, Boolean.TRUE.toString());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_KEYSTORE, tlsConfiguration.getKeystorePath());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_KEYSTORE_PASSWD, tlsConfiguration.getKeystorePassword());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_KEYSTORE_TYPE, tlsConfiguration.getKeystoreType().getType());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_TRUSTSTORE, tlsConfiguration.getTruststorePath());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_TRUSTSTORE_PASSWD, tlsConfiguration.getTruststorePassword());
|
|
||||||
propValueMap.put(NiFiProperties.ZOOKEEPER_SECURITY_TRUSTSTORE_TYPE, tlsConfiguration.getTruststoreType().getType());
|
|
||||||
|
|
||||||
return propValueMap;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public PropertyValue getProperty(final PropertyDescriptor property) {
|
|
||||||
final String prop = properties.get(property);
|
|
||||||
return new StandardPropertyValue(prop, null, ParameterLookup.EMPTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This won't be used by the ZooKeeper State Provider. I don't believe there's a way to pass an SSLContext
|
|
||||||
// directly to ZooKeeper anyway.
|
|
||||||
@Override
|
|
||||||
public SSLContext getSSLContext() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ComponentLog getLogger() {
|
|
||||||
return new MockComponentLogger();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private ZooKeeperStateProvider createProvider(final Map<PropertyDescriptor, String> properties) throws Exception {
|
|
||||||
final ZooKeeperStateProvider provider = new ZooKeeperStateProvider();
|
|
||||||
initializeProvider(provider, properties);
|
|
||||||
provider.enable();
|
|
||||||
return provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterEach
|
|
||||||
public void clear() throws IOException {
|
|
||||||
try {
|
|
||||||
if (provider != null) {
|
|
||||||
provider.onComponentRemoved(componentId);
|
|
||||||
provider.disable();
|
|
||||||
provider.shutdown();
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (zkServer != null) {
|
|
||||||
zkServer.shutdown(true);
|
|
||||||
clearDirectories();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void clearDirectories() {
|
|
||||||
try {
|
|
||||||
FileUtils.deleteDirectory(new File(tempDir.toString()));
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.error("Failed to delete: " + tempDir.toString(), e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected StateProvider getProvider() {
|
|
||||||
return provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Timeout(30)
|
|
||||||
@Test
|
|
||||||
public void testStateTooLargeExceptionThrownOnSetState() throws InterruptedException {
|
|
||||||
final Map<String, String> state = new HashMap<>();
|
|
||||||
final StringBuilder sb = new StringBuilder();
|
|
||||||
|
|
||||||
// Build a string that is a little less than 64 KB, because that's
|
|
||||||
// the largest value available for DataOutputStream.writeUTF
|
|
||||||
for (int i = 0; i < 6500; i++) {
|
|
||||||
sb.append("0123456789");
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < 20; i++) {
|
|
||||||
state.put("numbers." + i, sb.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
getProvider().setState(state, componentId);
|
|
||||||
fail("Expected StateTooLargeException");
|
|
||||||
} catch (final StateTooLargeException stle) {
|
|
||||||
// expected behavior.
|
|
||||||
break;
|
|
||||||
} catch (final IOException ioe) {
|
|
||||||
// If we attempt to interact with the server too quickly, we will get a
|
|
||||||
// ZooKeeper ConnectionLoss Exception, which the provider wraps in an IOException.
|
|
||||||
// We will wait 1 second in this case and try again. The test will timeout if this
|
|
||||||
// does not succeeed within 30 seconds.
|
|
||||||
Thread.sleep(1000L);
|
|
||||||
} catch (final Exception e) {
|
|
||||||
logger.error("Something went wrong attempting to set the state in testStateTooLargeExceptionThrownOnSetState()");
|
|
||||||
fail("Expected StateTooLargeException but " + e.getClass() + " was thrown", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Timeout(30)
|
|
||||||
@Test
|
|
||||||
public void testStateTooLargeExceptionThrownOnReplace() throws InterruptedException {
|
|
||||||
final Map<String, String> state = new HashMap<>();
|
|
||||||
final StringBuilder sb = new StringBuilder();
|
|
||||||
|
|
||||||
// Build a string that is a little less than 64 KB, because that's
|
|
||||||
// the largest value available for DataOutputStream.writeUTF
|
|
||||||
for (int i = 0; i < 6500; i++) {
|
|
||||||
sb.append("0123456789");
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < 20; i++) {
|
|
||||||
state.put("numbers." + i, sb.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
final Map<String, String> smallState = new HashMap<>();
|
|
||||||
smallState.put("abc", "xyz");
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
getProvider().setState(smallState, componentId);
|
|
||||||
break;
|
|
||||||
} catch (final IOException ioe) {
|
|
||||||
// If we attempt to interact with the server too quickly, we will get a
|
|
||||||
// ZooKeeper ConnectionLoss Exception, which the provider wraps in an IOException.
|
|
||||||
// We will wait 1 second in this case and try again. The test will timeout if this
|
|
||||||
// does not succeeed within 30 seconds.
|
|
||||||
Thread.sleep(1000L);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assertThrows(StateTooLargeException.class, () -> getProvider().replace(getProvider().getState(componentId), state, componentId));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -22,8 +22,8 @@ import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||||
import org.apache.curator.retry.RetryOneTime;
|
import org.apache.curator.retry.RetryOneTime;
|
||||||
import org.apache.curator.utils.DefaultZookeeperFactory;
|
import org.apache.curator.utils.DefaultZookeeperFactory;
|
||||||
import org.apache.curator.utils.ZookeeperFactory;
|
import org.apache.curator.utils.ZookeeperFactory;
|
||||||
import org.apache.nifi.controller.cluster.SecureClientZooKeeperFactory;
|
import org.apache.nifi.framework.cluster.zookeeper.SecureClientZooKeeperFactory;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.security.util.TemporaryKeyStoreBuilder;
|
import org.apache.nifi.security.util.TemporaryKeyStoreBuilder;
|
||||||
import org.apache.nifi.security.util.TlsConfiguration;
|
import org.apache.nifi.security.util.TlsConfiguration;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
|
|
|
@ -40,7 +40,7 @@ import org.apache.nifi.controller.ProcessorNode;
|
||||||
import org.apache.nifi.controller.StandardSnippet;
|
import org.apache.nifi.controller.StandardSnippet;
|
||||||
import org.apache.nifi.controller.XmlFlowSynchronizer;
|
import org.apache.nifi.controller.XmlFlowSynchronizer;
|
||||||
import org.apache.nifi.controller.flow.StandardFlowManager;
|
import org.apache.nifi.controller.flow.StandardFlowManager;
|
||||||
import org.apache.nifi.controller.leader.election.CuratorLeaderElectionManager;
|
import org.apache.nifi.framework.cluster.leader.zookeeper.CuratorLeaderElectionManager;
|
||||||
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
||||||
import org.apache.nifi.controller.queue.ConnectionEventListener;
|
import org.apache.nifi.controller.queue.ConnectionEventListener;
|
||||||
import org.apache.nifi.controller.queue.FlowFileQueue;
|
import org.apache.nifi.controller.queue.FlowFileQueue;
|
||||||
|
@ -240,7 +240,8 @@ public class FrameworkIntegrationTest {
|
||||||
clusterCoordinator = Mockito.mock(ClusterCoordinator.class);
|
clusterCoordinator = Mockito.mock(ClusterCoordinator.class);
|
||||||
final HeartbeatMonitor heartbeatMonitor = Mockito.mock(HeartbeatMonitor.class);
|
final HeartbeatMonitor heartbeatMonitor = Mockito.mock(HeartbeatMonitor.class);
|
||||||
final NodeProtocolSender protocolSender = Mockito.mock(NodeProtocolSender.class);
|
final NodeProtocolSender protocolSender = Mockito.mock(NodeProtocolSender.class);
|
||||||
final LeaderElectionManager leaderElectionManager = new CuratorLeaderElectionManager(2, nifiProperties);
|
|
||||||
|
final LeaderElectionManager leaderElectionManager = new CuratorLeaderElectionManager(nifiProperties);
|
||||||
|
|
||||||
final NodeIdentifier localNodeId = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 8111, "localhost", 8081,
|
final NodeIdentifier localNodeId = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 8111, "localhost", 8081,
|
||||||
"localhost", 8082, "localhost", 8083, 8084, false, Collections.emptySet());
|
"localhost", 8082, "localhost", 8083, 8084, false, Collections.emptySet());
|
||||||
|
|
|
@ -20,8 +20,8 @@ import org.apache.curator.framework.CuratorFramework;
|
||||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||||
import org.apache.curator.retry.RetryOneTime;
|
import org.apache.curator.retry.RetryOneTime;
|
||||||
import org.apache.curator.test.InstanceSpec;
|
import org.apache.curator.test.InstanceSpec;
|
||||||
import org.apache.nifi.controller.cluster.SecureClientZooKeeperFactory;
|
import org.apache.nifi.framework.cluster.zookeeper.SecureClientZooKeeperFactory;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.nifi.security.util.CertificateUtils;
|
import org.apache.nifi.security.util.CertificateUtils;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.apache.zookeeper.common.ClientX509Util;
|
import org.apache.zookeeper.common.ClientX509Util;
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.spring;
|
||||||
|
|
||||||
|
import org.apache.nifi.bundle.Bundle;
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
||||||
|
import org.apache.nifi.controller.leader.election.StandaloneLeaderElectionManager;
|
||||||
|
import org.apache.nifi.nar.ExtensionDefinition;
|
||||||
|
import org.apache.nifi.nar.ExtensionManager;
|
||||||
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
|
||||||
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
class LeaderElectionManagerFactoryBeanTest {
|
||||||
|
@Mock
|
||||||
|
ExtensionManager extensionManager;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
Bundle bundle;
|
||||||
|
|
||||||
|
LeaderElectionManagerFactoryBean bean;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setBean() {
|
||||||
|
bean = new LeaderElectionManagerFactoryBean();
|
||||||
|
bean.setExtensionManager(extensionManager);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testGetObjectStandalone() throws Exception {
|
||||||
|
final NiFiProperties properties = NiFiProperties.createBasicNiFiProperties(null, Collections.emptyMap());
|
||||||
|
|
||||||
|
bean.setProperties(properties);
|
||||||
|
|
||||||
|
final LeaderElectionManager leaderElectionManager = bean.getObject();
|
||||||
|
|
||||||
|
assertInstanceOf(StandaloneLeaderElectionManager.class, leaderElectionManager);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testGetObjectCluster() throws Exception {
|
||||||
|
final Map<String, String> clusterProperties = new LinkedHashMap<>();
|
||||||
|
clusterProperties.put(NiFiProperties.CLUSTER_IS_NODE, Boolean.TRUE.toString());
|
||||||
|
clusterProperties.put(NiFiProperties.CLUSTER_LEADER_ELECTION_IMPLEMENTATION, MockLeaderElectionManager.class.getSimpleName());
|
||||||
|
|
||||||
|
final NiFiProperties properties = NiFiProperties.createBasicNiFiProperties(null, clusterProperties);
|
||||||
|
|
||||||
|
bean.setProperties(properties);
|
||||||
|
|
||||||
|
when(bundle.getClassLoader()).thenReturn(Thread.currentThread().getContextClassLoader());
|
||||||
|
when(extensionManager.getBundles(eq(MockLeaderElectionManager.class.getName()))).thenReturn(Collections.singletonList(bundle));
|
||||||
|
final ExtensionDefinition extension = new ExtensionDefinition(MockLeaderElectionManager.class.getName(), bundle, LeaderElectionManager.class);
|
||||||
|
when(extensionManager.getExtensions(eq(LeaderElectionManager.class))).thenReturn(Collections.singleton(extension));
|
||||||
|
|
||||||
|
final LeaderElectionManager leaderElectionManager = bean.getObject();
|
||||||
|
|
||||||
|
assertInstanceOf(MockLeaderElectionManager.class, leaderElectionManager);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.spring;
|
||||||
|
|
||||||
|
import org.apache.nifi.controller.leader.election.StandaloneLeaderElectionManager;
|
||||||
|
|
||||||
|
public class MockLeaderElectionManager extends StandaloneLeaderElectionManager {
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-bundle</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-kubernetes-leader-election</artifactId>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-utils</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-leader-election-shared</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-kubernetes-client</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -0,0 +1,316 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.leader.election;
|
||||||
|
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionRole;
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionStateChangeListener;
|
||||||
|
import org.apache.nifi.controller.leader.election.TrackedLeaderElectionManager;
|
||||||
|
import org.apache.nifi.kubernetes.client.KubernetesClientProvider;
|
||||||
|
import org.apache.nifi.kubernetes.client.NamespaceProvider;
|
||||||
|
import org.apache.nifi.kubernetes.client.ServiceAccountNamespaceProvider;
|
||||||
|
import org.apache.nifi.kubernetes.client.StandardKubernetesClientProvider;
|
||||||
|
import org.apache.nifi.kubernetes.leader.election.command.LeaderElectionCommandProvider;
|
||||||
|
import org.apache.nifi.kubernetes.leader.election.command.StandardLeaderElectionCommandProvider;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.ThreadFactory;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kubernetes Leader Election Manager implementation using Kubernetes Lease Resources
|
||||||
|
*/
|
||||||
|
public class KubernetesLeaderElectionManager extends TrackedLeaderElectionManager {
|
||||||
|
private static final boolean INTERRUPT_ENABLED = true;
|
||||||
|
|
||||||
|
private static final int SERVICE_THREADS = 4;
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(KubernetesLeaderElectionManager.class);
|
||||||
|
|
||||||
|
private static final Map<String, String> ROLE_NAMES;
|
||||||
|
|
||||||
|
static {
|
||||||
|
final Map<String, String> roleNames = new LinkedHashMap<>();
|
||||||
|
for (final LeaderElectionRole leaderElectionRole : LeaderElectionRole.values()) {
|
||||||
|
roleNames.put(leaderElectionRole.getRoleName(), leaderElectionRole.getRoleId());
|
||||||
|
}
|
||||||
|
ROLE_NAMES = Collections.unmodifiableMap(roleNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
private final ExecutorService executorService;
|
||||||
|
|
||||||
|
private final AtomicBoolean started = new AtomicBoolean();
|
||||||
|
|
||||||
|
private final Map<String, Future<?>> roleCommands = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
private final Map<String, ParticipantRegistration> roleRegistrations = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
private final Map<String, String> roleLeaders = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
private final LeaderElectionCommandProvider leaderElectionCommandProvider;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kubernetes Leader Election Manager default constructor
|
||||||
|
*/
|
||||||
|
public KubernetesLeaderElectionManager() {
|
||||||
|
executorService = createExecutorService();
|
||||||
|
leaderElectionCommandProvider = createLeaderElectionCommandProvider();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start Manager and register current roles
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void start() {
|
||||||
|
if (started.get()) {
|
||||||
|
logger.debug("Start requested when running");
|
||||||
|
} else {
|
||||||
|
started.getAndSet(true);
|
||||||
|
logger.debug("Started");
|
||||||
|
|
||||||
|
for (final ParticipantRegistration roleRegistration : roleRegistrations.values()) {
|
||||||
|
register(roleRegistration.roleName, roleRegistration.listener, roleRegistration.participantId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop Manager and shutdown running commands
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void stop() {
|
||||||
|
try {
|
||||||
|
leaderElectionCommandProvider.close();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
logger.warn("Leader Election Command Factory close failed", e);
|
||||||
|
}
|
||||||
|
roleLeaders.clear();
|
||||||
|
executorService.shutdown();
|
||||||
|
started.getAndSet(false);
|
||||||
|
logger.debug("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register for Election or Observation based on presence of Participant ID and register for Leader when started
|
||||||
|
*
|
||||||
|
* @param roleName Role Name for registration
|
||||||
|
* @param listener State Change Listener for Leader Events
|
||||||
|
* @param participantId Participant ID or null when registering for Observation
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public synchronized void register(final String roleName, final LeaderElectionStateChangeListener listener, final String participantId) {
|
||||||
|
requireRoleName(roleName);
|
||||||
|
Objects.requireNonNull(listener, "Change Listener required");
|
||||||
|
|
||||||
|
final ParticipantRegistration roleRegistration = new ParticipantRegistration(roleName, participantId, listener);
|
||||||
|
roleRegistrations.put(roleName, roleRegistration);
|
||||||
|
|
||||||
|
final boolean participating = isParticipating(participantId);
|
||||||
|
if (participating) {
|
||||||
|
logger.debug("Registered Participation for Election Role [{}] ID [{}]", roleName, participantId);
|
||||||
|
if (started.get()) {
|
||||||
|
registerLeaderElectionCommand(roleName, listener, participantId);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.info("Registered Observation for Election Role [{}]", roleName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unregister for Leader Election of specified Role and cancel running command
|
||||||
|
*
|
||||||
|
* @param roleName Role Name to be removed from registration
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public synchronized void unregister(final String roleName) {
|
||||||
|
requireRoleName(roleName);
|
||||||
|
|
||||||
|
roleLeaders.remove(roleName);
|
||||||
|
|
||||||
|
final ParticipantRegistration roleRegistration = roleRegistrations.remove(roleName);
|
||||||
|
if (roleRegistration == null) {
|
||||||
|
logger.info("Not registered for Election Role [{}]", roleName);
|
||||||
|
} else {
|
||||||
|
final Future<?> roleCommand = roleCommands.remove(roleName);
|
||||||
|
if (roleCommand == null) {
|
||||||
|
logger.warn("Leader Election Command not found Role [{}] ID [{}]", roleName, roleRegistration.participantId);
|
||||||
|
} else {
|
||||||
|
roleCommand.cancel(INTERRUPT_ENABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Unregistered for Election Role [{}] ID [{}]", roleName, roleRegistration.participantId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine whether current node is participating in Leader Election for specified Role
|
||||||
|
*
|
||||||
|
* @param roleName Role Name to be evaluated
|
||||||
|
* @return Participation status in Leader Election
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isActiveParticipant(final String roleName) {
|
||||||
|
requireRoleName(roleName);
|
||||||
|
final String participantId = getParticipantId(roleName);
|
||||||
|
return isParticipating(participantId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Leader Identifier for Role
|
||||||
|
*
|
||||||
|
* @param roleName Role Name for requested Leader Identifier
|
||||||
|
* @return Leader Identifier or empty when not found
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Optional<String> getLeader(final String roleName) {
|
||||||
|
requireRoleName(roleName);
|
||||||
|
|
||||||
|
final String roleId = getRoleId(roleName);
|
||||||
|
|
||||||
|
final long pollStarted = System.nanoTime();
|
||||||
|
try {
|
||||||
|
final Optional<String> leader = leaderElectionCommandProvider.findLeader(roleId);
|
||||||
|
leader.ifPresent(leaderId -> setRoleLeader(roleName, leaderId));
|
||||||
|
return leader;
|
||||||
|
} finally {
|
||||||
|
final long elapsed = System.nanoTime() - pollStarted;
|
||||||
|
registerPollTime(elapsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine whether current node is the Leader for the specified Role
|
||||||
|
*
|
||||||
|
* @param roleName Role Name to be evaluated
|
||||||
|
* @return Leader Status
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isLeader(final String roleName) {
|
||||||
|
requireRoleName(roleName);
|
||||||
|
final boolean leader;
|
||||||
|
|
||||||
|
final String participantId = getParticipantId(roleName);
|
||||||
|
if (participantId == null) {
|
||||||
|
logger.debug("Role [{}] not participating in Leader election", roleName);
|
||||||
|
leader = false;
|
||||||
|
} else {
|
||||||
|
final Optional<String> leaderAddress = getLeader(roleName);
|
||||||
|
final String leaderId = leaderAddress.orElse(null);
|
||||||
|
leader = participantId.equals(leaderId);
|
||||||
|
if (leader) {
|
||||||
|
logger.debug("Role [{}] Participant ID [{}] is Leader", roleName, participantId);
|
||||||
|
} else {
|
||||||
|
logger.debug("Role [{}] Participant ID [{}] not Leader", roleName, leaderId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return leader;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected ExecutorService createExecutorService() {
|
||||||
|
return Executors.newFixedThreadPool(SERVICE_THREADS, new NamedThreadFactory());
|
||||||
|
}
|
||||||
|
|
||||||
|
protected LeaderElectionCommandProvider createLeaderElectionCommandProvider() {
|
||||||
|
final NamespaceProvider namespaceProvider = new ServiceAccountNamespaceProvider();
|
||||||
|
final String namespace = namespaceProvider.getNamespace();
|
||||||
|
final KubernetesClientProvider kubernetesClientProvider = new StandardKubernetesClientProvider();
|
||||||
|
return new StandardLeaderElectionCommandProvider(kubernetesClientProvider, namespace);
|
||||||
|
}
|
||||||
|
|
||||||
|
private synchronized void registerLeaderElectionCommand(final String roleName, final LeaderElectionStateChangeListener listener, final String participantId) {
|
||||||
|
final Future<?> currentRoleCommand = roleCommands.get(roleName);
|
||||||
|
if (currentRoleCommand == null) {
|
||||||
|
final String roleId = getRoleId(roleName);
|
||||||
|
final Runnable leaderElectionCommand = leaderElectionCommandProvider.getCommand(
|
||||||
|
roleId,
|
||||||
|
participantId,
|
||||||
|
listener::onStartLeading,
|
||||||
|
listener::onStopLeading,
|
||||||
|
leaderId -> setRoleLeader(roleName, leaderId)
|
||||||
|
);
|
||||||
|
|
||||||
|
final Future<?> roleCommand = executorService.submit(leaderElectionCommand);
|
||||||
|
roleCommands.put(roleName, roleCommand);
|
||||||
|
logger.info("Registered command for Election Role [{}] ID [{}]", roleName, participantId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setRoleLeader(final String roleName, final String leaderId) {
|
||||||
|
final String previousLeaderId = roleLeaders.put(roleName, leaderId);
|
||||||
|
if (leaderId.equals(previousLeaderId)) {
|
||||||
|
logger.debug("Role [{}] Leader [{}] not changed", roleName, leaderId);
|
||||||
|
} else {
|
||||||
|
logger.debug("Role [{}] Leader [{}] Previous [{}] changed", roleName, leaderId, previousLeaderId);
|
||||||
|
onLeaderChanged(roleName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getParticipantId(final String roleName) {
|
||||||
|
final ParticipantRegistration roleRegistration = roleRegistrations.get(roleName);
|
||||||
|
return roleRegistration == null ? null : roleRegistration.participantId;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void requireRoleName(final String roleName) {
|
||||||
|
if (roleName == null || roleName.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("Role Name required");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getRoleId(final String roleName) {
|
||||||
|
final String roleId = ROLE_NAMES.get(roleName);
|
||||||
|
if (roleId == null) {
|
||||||
|
throw new IllegalArgumentException(String.format("Role Name [%s] not supported", roleName));
|
||||||
|
}
|
||||||
|
return roleId;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class ParticipantRegistration {
|
||||||
|
private final String roleName;
|
||||||
|
|
||||||
|
private final String participantId;
|
||||||
|
|
||||||
|
private final LeaderElectionStateChangeListener listener;
|
||||||
|
|
||||||
|
private ParticipantRegistration(final String roleName, final String participantId, final LeaderElectionStateChangeListener listener) {
|
||||||
|
this.roleName = roleName;
|
||||||
|
this.participantId = participantId;
|
||||||
|
this.listener = listener;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class NamedThreadFactory implements ThreadFactory {
|
||||||
|
private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Thread newThread(final Runnable runnable) {
|
||||||
|
final Thread thread = defaultFactory.newThread(runnable);
|
||||||
|
thread.setName(KubernetesLeaderElectionManager.class.getSimpleName());
|
||||||
|
thread.setDaemon(true);
|
||||||
|
return thread;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,104 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.leader.election.command;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.LeaderCallbacks;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.LeaderElectionConfig;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.LeaderElectionConfigBuilder;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.LeaderElector;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.resourcelock.LeaseLock;
|
||||||
|
import io.fabric8.kubernetes.client.extended.leaderelection.resourcelock.Lock;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runnable command for starting the Leader Election process
|
||||||
|
*/
|
||||||
|
class LeaderElectionCommand implements Runnable {
|
||||||
|
/** Leader Lease Duration based on kube-scheduler default setting */
|
||||||
|
private static final Duration LEASE_DURATION = Duration.ofSeconds(15);
|
||||||
|
|
||||||
|
/** Leader Lease Renew Deadline less than Lease Duration based on kube-scheduler default setting */
|
||||||
|
private static final Duration RENEW_DEADLINE = Duration.ofSeconds(10);
|
||||||
|
|
||||||
|
/** Lease Retry Period based on kube-scheduler default setting */
|
||||||
|
private static final Duration RETRY_PERIOD = Duration.ofSeconds(2);
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(LeaderElectionCommand.class);
|
||||||
|
|
||||||
|
private final KubernetesClient kubernetesClient;
|
||||||
|
|
||||||
|
private final LeaderCallbacks leaderCallbacks;
|
||||||
|
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
private final Lock lock;
|
||||||
|
|
||||||
|
LeaderElectionCommand(
|
||||||
|
final KubernetesClient kubernetesClient,
|
||||||
|
final String namespace,
|
||||||
|
final String name,
|
||||||
|
final String identity,
|
||||||
|
final Runnable onStartLeading,
|
||||||
|
final Runnable onStopLeading,
|
||||||
|
final Consumer<String> onNewLeader
|
||||||
|
) {
|
||||||
|
this.kubernetesClient = kubernetesClient;
|
||||||
|
this.name = Objects.requireNonNull(name, "Name required");
|
||||||
|
this.lock = new LeaseLock(namespace, name, identity);
|
||||||
|
this.leaderCallbacks = new LeaderCallbacks(onStartLeading, onStopLeading, onNewLeader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
logger.info("Election Name [{}] ID [{}] Participation STARTED", name, lock.identity());
|
||||||
|
|
||||||
|
while (!Thread.currentThread().isInterrupted()) {
|
||||||
|
runLeaderElector();
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Election Name [{}] ID [{}] Participation STOPPED", name, lock.identity());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runLeaderElector() {
|
||||||
|
logger.info("Election Name [{}] ID [{}] Command STARTED", name, lock.identity());
|
||||||
|
try {
|
||||||
|
final LeaderElectionConfig leaderElectionConfig = getLeaderElectionConfig();
|
||||||
|
final LeaderElector leaderElector = kubernetesClient.leaderElector().withConfig(leaderElectionConfig).build();
|
||||||
|
leaderElector.run();
|
||||||
|
logger.info("Election Name [{}] ID [{}] Command STOPPED", name, lock.identity());
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
logger.error("Election Name [{}] ID [{}] Command FAILED", name, lock.identity(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private LeaderElectionConfig getLeaderElectionConfig() {
|
||||||
|
return new LeaderElectionConfigBuilder()
|
||||||
|
.withName(name)
|
||||||
|
.withLeaderCallbacks(leaderCallbacks)
|
||||||
|
.withLock(lock)
|
||||||
|
.withLeaseDuration(LEASE_DURATION)
|
||||||
|
.withRenewDeadline(RENEW_DEADLINE)
|
||||||
|
.withRetryPeriod(RETRY_PERIOD)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.leader.election.command;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provider abstraction for Kubernetes Leader Election Commands with callbacks
|
||||||
|
*/
|
||||||
|
public interface LeaderElectionCommandProvider extends Closeable {
|
||||||
|
/**
|
||||||
|
* Get Command with required properties
|
||||||
|
*
|
||||||
|
* @param name Election Name
|
||||||
|
* @param identity Election Participant Identity
|
||||||
|
* @param onStartLeading Callback run when elected as leader
|
||||||
|
* @param onStopLeading Callback run when no longer elected as leader
|
||||||
|
* @param onNewLeader Callback run with identification of new leader
|
||||||
|
* @return Runnable Command
|
||||||
|
*/
|
||||||
|
Runnable getCommand(
|
||||||
|
String name,
|
||||||
|
String identity,
|
||||||
|
Runnable onStartLeading,
|
||||||
|
Runnable onStopLeading,
|
||||||
|
Consumer<String> onNewLeader
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find Leader Identifier for specified Election Name
|
||||||
|
*
|
||||||
|
* @param name Election Name
|
||||||
|
* @return Leader Identifier or empty when not found
|
||||||
|
*/
|
||||||
|
Optional<String> findLeader(String name);
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.leader.election.command;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.api.model.coordination.v1.Lease;
|
||||||
|
import io.fabric8.kubernetes.api.model.coordination.v1.LeaseSpec;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClientException;
|
||||||
|
import org.apache.nifi.kubernetes.client.KubernetesClientProvider;
|
||||||
|
|
||||||
|
import java.net.HttpURLConnection;
|
||||||
|
import java.time.ZonedDateTime;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard implementation of Leader Election Command Provider with configurable namespace property
|
||||||
|
*/
|
||||||
|
public class StandardLeaderElectionCommandProvider implements LeaderElectionCommandProvider {
|
||||||
|
private final KubernetesClient kubernetesClient;
|
||||||
|
|
||||||
|
private final String namespace;
|
||||||
|
|
||||||
|
public StandardLeaderElectionCommandProvider(final KubernetesClientProvider kubernetesClientProvider, final String namespace) {
|
||||||
|
this.kubernetesClient = Objects.requireNonNull(kubernetesClientProvider).getKubernetesClient();
|
||||||
|
this.namespace = Objects.requireNonNull(namespace);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Leader Election Command with configured namespace and client provider
|
||||||
|
*
|
||||||
|
* @param name Election Name
|
||||||
|
* @param identity Election Participant Identity
|
||||||
|
* @param onStartLeading Callback run when elected as leader
|
||||||
|
* @param onStopLeading Callback run when no longer elected as leader
|
||||||
|
* @param onNewLeader Callback run with identification of new leader
|
||||||
|
* @return Leader Election Command
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Runnable getCommand(
|
||||||
|
final String name,
|
||||||
|
final String identity,
|
||||||
|
final Runnable onStartLeading,
|
||||||
|
final Runnable onStopLeading,
|
||||||
|
final Consumer<String> onNewLeader
|
||||||
|
) {
|
||||||
|
return new LeaderElectionCommand(
|
||||||
|
kubernetesClient,
|
||||||
|
namespace,
|
||||||
|
name,
|
||||||
|
identity,
|
||||||
|
onStartLeading,
|
||||||
|
onStopLeading,
|
||||||
|
onNewLeader
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find Leader Identifier for specified Election Name
|
||||||
|
*
|
||||||
|
* @param name Election Name
|
||||||
|
* @return Leader Identifier or empty when not found or lease expired
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Optional<String> findLeader(final String name) {
|
||||||
|
try {
|
||||||
|
final Lease lease = kubernetesClient.leases().inNamespace(namespace).withName(name).get();
|
||||||
|
final String currentHolderIdentity = getCurrentHolderIdentity(lease);
|
||||||
|
return Optional.ofNullable(currentHolderIdentity);
|
||||||
|
} catch (final KubernetesClientException e) {
|
||||||
|
if (isNotFound(e)) {
|
||||||
|
return Optional.empty();
|
||||||
|
} else {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close Kubernetes Client
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
kubernetesClient.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isNotFound(final KubernetesClientException e) {
|
||||||
|
return HttpURLConnection.HTTP_NOT_FOUND == e.getCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getCurrentHolderIdentity(final Lease lease) {
|
||||||
|
final String holderIdentity;
|
||||||
|
|
||||||
|
if (lease == null) {
|
||||||
|
holderIdentity = null;
|
||||||
|
} else {
|
||||||
|
final LeaseSpec spec = lease.getSpec();
|
||||||
|
final ZonedDateTime expiration = getExpiration(spec);
|
||||||
|
final ZonedDateTime now = ZonedDateTime.now();
|
||||||
|
if (now.isAfter(expiration)) {
|
||||||
|
holderIdentity = null;
|
||||||
|
} else {
|
||||||
|
holderIdentity = spec.getHolderIdentity();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return holderIdentity;
|
||||||
|
}
|
||||||
|
|
||||||
|
private ZonedDateTime getExpiration(final LeaseSpec leaseSpec) {
|
||||||
|
final ZonedDateTime renewTime = leaseSpec.getRenewTime();
|
||||||
|
final Integer leaseDuration = leaseSpec.getLeaseDurationSeconds();
|
||||||
|
return renewTime.plusSeconds(leaseDuration);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
org.apache.nifi.kubernetes.leader.election.KubernetesLeaderElectionManager
|
|
@ -0,0 +1,286 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.leader.election;
|
||||||
|
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionRole;
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionStateChangeListener;
|
||||||
|
import org.apache.nifi.kubernetes.leader.election.command.LeaderElectionCommandProvider;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
import org.mockito.Captor;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
import static org.mockito.ArgumentMatchers.isA;
|
||||||
|
import static org.mockito.Mockito.doReturn;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
class KubernetesLeaderElectionManagerTest {
|
||||||
|
|
||||||
|
private static final LeaderElectionRole LEADER_ELECTION_ROLE = LeaderElectionRole.CLUSTER_COORDINATOR;
|
||||||
|
|
||||||
|
private static final String ROLE = LEADER_ELECTION_ROLE.getRoleName();
|
||||||
|
|
||||||
|
private static final String PARTICIPANT_ID = "Node-0";
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
LeaderElectionStateChangeListener changeListener;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
ExecutorService executorService;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
Future<?> future;
|
||||||
|
|
||||||
|
@Captor
|
||||||
|
ArgumentCaptor<Runnable> commandCaptor;
|
||||||
|
|
||||||
|
ManagedLeaderElectionCommandProvider leaderElectionCommandProvider;
|
||||||
|
|
||||||
|
KubernetesLeaderElectionManager manager;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setManager() {
|
||||||
|
leaderElectionCommandProvider = new ManagedLeaderElectionCommandProvider();
|
||||||
|
manager = new MockKubernetesLeaderElectionManager();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testStartStartStop() {
|
||||||
|
manager.start();
|
||||||
|
manager.start();
|
||||||
|
manager.stop();
|
||||||
|
|
||||||
|
assertTrue(leaderElectionCommandProvider.closed);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testStartIsLeaderFalseStop() {
|
||||||
|
manager.start();
|
||||||
|
|
||||||
|
final boolean leader = manager.isLeader(ROLE);
|
||||||
|
assertFalse(leader);
|
||||||
|
|
||||||
|
manager.stop();
|
||||||
|
|
||||||
|
assertTrue(leaderElectionCommandProvider.closed);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testStartRegisterParticipatingStartLeading() {
|
||||||
|
manager.start();
|
||||||
|
|
||||||
|
setSubmitStartLeading();
|
||||||
|
|
||||||
|
manager.register(ROLE, changeListener, PARTICIPANT_ID);
|
||||||
|
|
||||||
|
captureRunCommand();
|
||||||
|
assertActiveParticipantLeader();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testRegisterParticipatingStartLeading() {
|
||||||
|
manager.register(ROLE, changeListener, PARTICIPANT_ID);
|
||||||
|
|
||||||
|
setSubmitStartLeading();
|
||||||
|
|
||||||
|
manager.start();
|
||||||
|
|
||||||
|
captureRunCommand();
|
||||||
|
assertActiveParticipantLeader();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testRegisterParticipatingStartLeadingUnregister() {
|
||||||
|
manager.register(ROLE, changeListener, PARTICIPANT_ID);
|
||||||
|
|
||||||
|
setSubmitStartLeading();
|
||||||
|
|
||||||
|
manager.start();
|
||||||
|
|
||||||
|
captureRunCommand();
|
||||||
|
assertActiveParticipantLeader();
|
||||||
|
|
||||||
|
manager.unregister(ROLE);
|
||||||
|
leaderElectionCommandProvider.leader = null;
|
||||||
|
|
||||||
|
assertNotActiveParticipantNotLeader();
|
||||||
|
|
||||||
|
assertEquals(LEADER_ELECTION_ROLE.getRoleId(), leaderElectionCommandProvider.findLeaderName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testIsLeaderNotRegistered() {
|
||||||
|
final boolean leader = manager.isLeader(ROLE);
|
||||||
|
|
||||||
|
assertFalse(leader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testRegisterParticipatingIsActiveParticipantTrue() {
|
||||||
|
manager.register(ROLE, changeListener, PARTICIPANT_ID);
|
||||||
|
|
||||||
|
final boolean activeParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertTrue(activeParticipant);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testRegisterParticipatingIsActiveParticipantTrueUnregister() {
|
||||||
|
manager.register(ROLE, changeListener, PARTICIPANT_ID);
|
||||||
|
|
||||||
|
final boolean registeredActiveParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertTrue(registeredActiveParticipant);
|
||||||
|
|
||||||
|
manager.unregister(ROLE);
|
||||||
|
|
||||||
|
final boolean unregisteredActiveParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertFalse(unregisteredActiveParticipant);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testRegisterNotParticipatingIsActiveParticipantFalse() {
|
||||||
|
manager.register(ROLE, changeListener);
|
||||||
|
|
||||||
|
final boolean activeParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertFalse(activeParticipant);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testUnregisterRoleNameRequired() {
|
||||||
|
assertThrows(IllegalArgumentException.class, () -> manager.unregister(null));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testUnregisterNotRegistered() {
|
||||||
|
manager.unregister(ROLE);
|
||||||
|
|
||||||
|
final boolean unregisteredActiveParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertFalse(unregisteredActiveParticipant);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setSubmitStartLeading() {
|
||||||
|
doReturn(future).when(executorService).submit(isA(Runnable.class));
|
||||||
|
leaderElectionCommandProvider.runStartLeading = true;
|
||||||
|
leaderElectionCommandProvider.runNewLeader = true;
|
||||||
|
leaderElectionCommandProvider.runStopLeading = true;
|
||||||
|
leaderElectionCommandProvider.leader = PARTICIPANT_ID;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void captureRunCommand() {
|
||||||
|
verify(executorService).submit(commandCaptor.capture());
|
||||||
|
commandCaptor.getValue().run();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertActiveParticipantLeader() {
|
||||||
|
final boolean activeParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertTrue(activeParticipant);
|
||||||
|
|
||||||
|
final boolean leader = manager.isLeader(ROLE);
|
||||||
|
assertTrue(leader);
|
||||||
|
|
||||||
|
final Optional<String> leaderId = manager.getLeader(ROLE);
|
||||||
|
assertTrue(leaderId.isPresent());
|
||||||
|
assertEquals(PARTICIPANT_ID, leaderId.get());
|
||||||
|
|
||||||
|
assertEquals(LEADER_ELECTION_ROLE.getRoleId(), leaderElectionCommandProvider.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertNotActiveParticipantNotLeader() {
|
||||||
|
final boolean activeParticipant = manager.isActiveParticipant(ROLE);
|
||||||
|
assertFalse(activeParticipant);
|
||||||
|
|
||||||
|
final boolean leader = manager.isLeader(ROLE);
|
||||||
|
assertFalse(leader);
|
||||||
|
|
||||||
|
final Optional<String> leaderId = manager.getLeader(ROLE);
|
||||||
|
assertFalse(leaderId.isPresent(), "Leader found for unregistered election");
|
||||||
|
}
|
||||||
|
|
||||||
|
private class MockKubernetesLeaderElectionManager extends KubernetesLeaderElectionManager {
|
||||||
|
@Override
|
||||||
|
protected ExecutorService createExecutorService() {
|
||||||
|
return executorService;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected LeaderElectionCommandProvider createLeaderElectionCommandProvider() {
|
||||||
|
return leaderElectionCommandProvider;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class ManagedLeaderElectionCommandProvider implements LeaderElectionCommandProvider {
|
||||||
|
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
private String findLeaderName;
|
||||||
|
|
||||||
|
private boolean runStartLeading;
|
||||||
|
|
||||||
|
private boolean runStopLeading;
|
||||||
|
|
||||||
|
private boolean runNewLeader;
|
||||||
|
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
private String leader;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Runnable getCommand(
|
||||||
|
final String name,
|
||||||
|
final String identity,
|
||||||
|
final Runnable onStartLeading,
|
||||||
|
final Runnable onStopLeading,
|
||||||
|
final Consumer<String> onNewLeader
|
||||||
|
) {
|
||||||
|
this.name = name;
|
||||||
|
return () -> {
|
||||||
|
if (runStartLeading) {
|
||||||
|
onStartLeading.run();
|
||||||
|
}
|
||||||
|
if (runNewLeader) {
|
||||||
|
onNewLeader.accept(identity);
|
||||||
|
}
|
||||||
|
if (runStopLeading) {
|
||||||
|
onStopLeading.run();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Optional<String> findLeader(final String name) {
|
||||||
|
this.findLeaderName = name;
|
||||||
|
return Optional.ofNullable(leader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-bundle</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-kubernetes-nar</artifactId>
|
||||||
|
<packaging>nar</packaging>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-nar</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
<type>nar</type>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-leader-election</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-state-provider</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -0,0 +1,52 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-kubernetes-bundle</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-kubernetes-state-provider</artifactId>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-utils</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-kubernetes-client</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-server-mock</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -0,0 +1,312 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.state.provider;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.api.model.ConfigMap;
|
||||||
|
import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
|
||||||
|
import io.fabric8.kubernetes.api.model.ObjectMeta;
|
||||||
|
import io.fabric8.kubernetes.api.model.StatusDetails;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClientException;
|
||||||
|
import io.fabric8.kubernetes.client.dsl.Resource;
|
||||||
|
import org.apache.nifi.components.AbstractConfigurableComponent;
|
||||||
|
import org.apache.nifi.components.state.Scope;
|
||||||
|
import org.apache.nifi.components.state.StateMap;
|
||||||
|
import org.apache.nifi.components.state.StateProvider;
|
||||||
|
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
||||||
|
import org.apache.nifi.kubernetes.client.ServiceAccountNamespaceProvider;
|
||||||
|
import org.apache.nifi.kubernetes.client.StandardKubernetesClientProvider;
|
||||||
|
import org.apache.nifi.logging.ComponentLog;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.HttpURLConnection;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State Provider implementation based on Kubernetes ConfigMaps with Base64 encoded keys to meet Kubernetes constraints
|
||||||
|
*/
|
||||||
|
public class KubernetesConfigMapStateProvider extends AbstractConfigurableComponent implements StateProvider {
|
||||||
|
private static final Scope[] SUPPORTED_SCOPES = { Scope.CLUSTER };
|
||||||
|
|
||||||
|
private static final Charset KEY_CHARACTER_SET = StandardCharsets.UTF_8;
|
||||||
|
|
||||||
|
private static final String CONFIG_MAP_NAME_FORMAT = "nifi-component-%s";
|
||||||
|
|
||||||
|
/** Encode ConfigMap keys using URL Encoder without padding characters for compliance with Kubernetes naming */
|
||||||
|
private static final Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding();
|
||||||
|
|
||||||
|
private static final Base64.Decoder decoder = Base64.getUrlDecoder();
|
||||||
|
|
||||||
|
private final AtomicBoolean enabled = new AtomicBoolean();
|
||||||
|
|
||||||
|
private KubernetesClient kubernetesClient;
|
||||||
|
|
||||||
|
private String namespace;
|
||||||
|
|
||||||
|
private String identifier;
|
||||||
|
|
||||||
|
private ComponentLog logger;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get configured component identifier
|
||||||
|
*
|
||||||
|
* @return Component Identifier
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String getIdentifier() {
|
||||||
|
return identifier;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize Provider using configured properties
|
||||||
|
*
|
||||||
|
* @param context Initialization Context
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void initialize(final StateProviderInitializationContext context) {
|
||||||
|
this.identifier = context.getIdentifier();
|
||||||
|
this.logger = context.getLogger();
|
||||||
|
this.kubernetesClient = getKubernetesClient();
|
||||||
|
this.namespace = new ServiceAccountNamespaceProvider().getNamespace();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shutdown Provider
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void shutdown() {
|
||||||
|
kubernetesClient.close();
|
||||||
|
logger.info("Provider shutdown");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set State as ConfigMap based on Component Identifier
|
||||||
|
*
|
||||||
|
* @param state State Map
|
||||||
|
* @param componentId Component Identifier
|
||||||
|
* @throws IOException Thrown on failure to set State Map
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void setState(final Map<String, String> state, final String componentId) throws IOException {
|
||||||
|
try {
|
||||||
|
final ConfigMap configMap = createConfigMapBuilder(state, componentId).build();
|
||||||
|
final ConfigMap configMapCreated = kubernetesClient.configMaps().resource(configMap).createOrReplace();
|
||||||
|
final Optional<String> version = getVersion(configMapCreated);
|
||||||
|
logger.debug("Set State Component ID [{}] Version [{}]", componentId, version);
|
||||||
|
} catch (final KubernetesClientException e) {
|
||||||
|
if (isNotFound(e.getCode())) {
|
||||||
|
logger.debug("State not found for Component ID [{}]", componentId, e);
|
||||||
|
} else {
|
||||||
|
throw new IOException(String.format("Set failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
throw new IOException(String.format("Set failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get State Map for Component Identifier
|
||||||
|
*
|
||||||
|
* @param componentId Component Identifier of State to be retrieved
|
||||||
|
* @return State Map
|
||||||
|
* @throws IOException Thrown on failure to get State Map
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public StateMap getState(final String componentId) throws IOException {
|
||||||
|
try {
|
||||||
|
final ConfigMap configMap = configMapResource(componentId).get();
|
||||||
|
final Map<String, String> data = configMap == null ? Collections.emptyMap() : getDecodedMap(configMap.getData());
|
||||||
|
final Optional<String> version = configMap == null ? Optional.empty() : getVersion(configMap);
|
||||||
|
return new StandardStateMap(data, version);
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
throw new IOException(String.format("Get failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replace State ConfigMap with new State based on current resource version
|
||||||
|
*
|
||||||
|
* @param currentState Current State Map with version
|
||||||
|
* @param state New State Map
|
||||||
|
* @param componentId Component Identifier
|
||||||
|
* @return Replace operation status
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean replace(final StateMap currentState, final Map<String, String> state, final String componentId) throws IOException {
|
||||||
|
final ConfigMapBuilder configMapBuilder = createConfigMapBuilder(state, componentId);
|
||||||
|
final Optional<String> stateVersion = currentState.getStateVersion();
|
||||||
|
if (stateVersion.isPresent()) {
|
||||||
|
final String resourceVersion = stateVersion.get();
|
||||||
|
configMapBuilder.editOrNewMetadata().withResourceVersion(resourceVersion);
|
||||||
|
}
|
||||||
|
final ConfigMap configMap = configMapBuilder.build();
|
||||||
|
|
||||||
|
try {
|
||||||
|
final ConfigMap configMapReplaced = kubernetesClient.configMaps().resource(configMap).replace();
|
||||||
|
final Optional<String> version = getVersion(configMapReplaced);
|
||||||
|
logger.debug("Replaced State Component ID [{}] Version [{}]", componentId, version);
|
||||||
|
return true;
|
||||||
|
} catch (final KubernetesClientException e) {
|
||||||
|
if (isNotFoundOrConflict(e.getCode())) {
|
||||||
|
logger.debug("Replace State Failed Component ID [{}] Version [{}]", componentId, stateVersion, e);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
throw new IOException(String.format("Replace failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
throw new IOException(String.format("Replace failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear state information for specified Component Identifier
|
||||||
|
*
|
||||||
|
* @param componentId the id of the component for which state is being cleared
|
||||||
|
* @throws IOException Thrown on failure to clear state for Component Identifier
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void clear(final String componentId) throws IOException {
|
||||||
|
try {
|
||||||
|
setState(Collections.emptyMap(), componentId);
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
throw new IOException(String.format("Clear failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove state information for specified Component Identifier
|
||||||
|
*
|
||||||
|
* @param componentId Identifier of component removed from the configuration
|
||||||
|
* @throws IOException Thrown on failure to remove state for Component Identifier
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void onComponentRemoved(final String componentId) throws IOException {
|
||||||
|
try {
|
||||||
|
final List<StatusDetails> deleteStatus = configMapResource(componentId).delete();
|
||||||
|
logger.debug("Config Map [{}] deleted {}", componentId, deleteStatus);
|
||||||
|
} catch (final RuntimeException e) {
|
||||||
|
throw new IOException(String.format("Remove failed for Component ID [%s]", componentId), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enable Provider
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void enable() {
|
||||||
|
enabled.getAndSet(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disable Provider
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void disable() {
|
||||||
|
enabled.getAndSet(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Enabled status
|
||||||
|
*
|
||||||
|
* @return Enabled status
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isEnabled() {
|
||||||
|
return enabled.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Supported Scopes returns CLUSTER
|
||||||
|
*
|
||||||
|
* @return Supported Scopes including CLUSTER
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Scope[] getSupportedScopes() {
|
||||||
|
return SUPPORTED_SCOPES;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Kubernetes Client using standard configuration
|
||||||
|
*
|
||||||
|
* @return Kubernetes Client
|
||||||
|
*/
|
||||||
|
protected KubernetesClient getKubernetesClient() {
|
||||||
|
return new StandardKubernetesClientProvider().getKubernetesClient();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Resource<ConfigMap> configMapResource(final String componentId) {
|
||||||
|
final String name = getConfigMapName(componentId);
|
||||||
|
return kubernetesClient.configMaps().inNamespace(namespace).withName(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ConfigMapBuilder createConfigMapBuilder(final Map<String, String> state, final String componentId) {
|
||||||
|
final Map<String, String> encodedData = getEncodedMap(state);
|
||||||
|
final String name = getConfigMapName(componentId);
|
||||||
|
return new ConfigMapBuilder()
|
||||||
|
.withNewMetadata()
|
||||||
|
.withNamespace(namespace)
|
||||||
|
.withName(name)
|
||||||
|
.endMetadata()
|
||||||
|
.withData(encodedData);
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getConfigMapName(final String componentId) {
|
||||||
|
return String.format(CONFIG_MAP_NAME_FORMAT, componentId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Optional<String> getVersion(final ConfigMap configMap) {
|
||||||
|
final ObjectMeta metadata = configMap.getMetadata();
|
||||||
|
final String resourceVersion = metadata.getResourceVersion();
|
||||||
|
return Optional.ofNullable(resourceVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, String> getEncodedMap(final Map<String, String> stateMap) {
|
||||||
|
final Map<String, String> encodedMap = new LinkedHashMap<>();
|
||||||
|
stateMap.forEach((key, value) -> {
|
||||||
|
final byte[] keyBytes = key.getBytes(KEY_CHARACTER_SET);
|
||||||
|
final String encodedKey = encoder.encodeToString(keyBytes);
|
||||||
|
encodedMap.put(encodedKey, value);
|
||||||
|
});
|
||||||
|
return encodedMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, String> getDecodedMap(final Map<String, String> configMap) {
|
||||||
|
final Map<String, String> decodedMap = new LinkedHashMap<>();
|
||||||
|
configMap.forEach((key, value) -> {
|
||||||
|
final byte[] keyBytes = decoder.decode(key);
|
||||||
|
final String decodedKey = new String(keyBytes, KEY_CHARACTER_SET);
|
||||||
|
decodedMap.put(decodedKey, value);
|
||||||
|
});
|
||||||
|
return decodedMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isNotFound(final int code) {
|
||||||
|
return HttpURLConnection.HTTP_NOT_FOUND == code;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isNotFoundOrConflict(final int code) {
|
||||||
|
return isNotFound(code) || HttpURLConnection.HTTP_CONFLICT == code;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.state.provider;
|
||||||
|
|
||||||
|
import org.apache.nifi.components.state.StateMap;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard implementation of StateMap
|
||||||
|
*/
|
||||||
|
class StandardStateMap implements StateMap {
|
||||||
|
private static final int EMPTY_VERSION = -1;
|
||||||
|
|
||||||
|
private final Map<String, String> data;
|
||||||
|
|
||||||
|
private final Optional<String> version;
|
||||||
|
|
||||||
|
StandardStateMap(final Map<String, String> data, final Optional<String> version) {
|
||||||
|
this.data = Collections.unmodifiableMap(data);
|
||||||
|
this.version = version;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Version returns String.hashCode() or -1 on empty for compatibility
|
||||||
|
*
|
||||||
|
* @return Version
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public long getVersion() {
|
||||||
|
return version.map(stateVersion -> stateVersion.hashCode()).orElse(EMPTY_VERSION);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get State Version
|
||||||
|
*
|
||||||
|
* @return State Version or empty when not known
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Optional<String> getStateVersion() {
|
||||||
|
return version;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Value from State Map
|
||||||
|
*
|
||||||
|
* @param key the key whose value should be retrieved
|
||||||
|
* @return Value or null when not found
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String get(final String key) {
|
||||||
|
return data.get(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get State Map
|
||||||
|
*
|
||||||
|
* @return State Map
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Map<String, String> toMap() {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
org.apache.nifi.kubernetes.state.provider.KubernetesConfigMapStateProvider
|
|
@ -0,0 +1,257 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.kubernetes.state.provider;
|
||||||
|
|
||||||
|
import io.fabric8.kubernetes.api.model.ConfigMap;
|
||||||
|
import io.fabric8.kubernetes.api.model.ConfigMapList;
|
||||||
|
import io.fabric8.kubernetes.client.KubernetesClient;
|
||||||
|
import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient;
|
||||||
|
import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer;
|
||||||
|
import io.fabric8.kubernetes.client.server.mock.KubernetesMockServerExtension;
|
||||||
|
import io.fabric8.mockwebserver.dsl.HttpMethod;
|
||||||
|
import okhttp3.mockwebserver.RecordedRequest;
|
||||||
|
import org.apache.nifi.components.state.Scope;
|
||||||
|
import org.apache.nifi.components.state.StateMap;
|
||||||
|
import org.apache.nifi.components.state.StateProviderInitializationContext;
|
||||||
|
import org.apache.nifi.logging.ComponentLog;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
@EnableKubernetesMockClient(crud = true)
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
@ExtendWith(KubernetesMockServerExtension.class)
|
||||||
|
class KubernetesConfigMapStateProviderTest {
|
||||||
|
private static final String IDENTIFIER = KubernetesConfigMapStateProvider.class.getSimpleName();
|
||||||
|
|
||||||
|
private static final String FIRST_VERSION = "1";
|
||||||
|
|
||||||
|
private static final String SECOND_VERSION = "2";
|
||||||
|
|
||||||
|
private static final String DEFAULT_NAMESPACE = "default";
|
||||||
|
|
||||||
|
private static final String COMPONENT_ID = "COMPONENT-ID";
|
||||||
|
|
||||||
|
private static final String STATE_PROPERTY = "started";
|
||||||
|
|
||||||
|
private static final String STATE_PROPERTY_ENCODED = "c3RhcnRlZA";
|
||||||
|
|
||||||
|
private static final String STATE_VALUE = "now";
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
StateProviderInitializationContext context;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
ComponentLog logger;
|
||||||
|
|
||||||
|
KubernetesMockServer kubernetesMockServer;
|
||||||
|
|
||||||
|
KubernetesClient kubernetesClient;
|
||||||
|
|
||||||
|
KubernetesConfigMapStateProvider provider;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setProvider() {
|
||||||
|
provider = new MockKubernetesConfigMapStateProvider();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testGetSupportedScopes() {
|
||||||
|
final Scope[] supportedScopes = provider.getSupportedScopes();
|
||||||
|
|
||||||
|
assertArrayEquals(new Scope[]{Scope.CLUSTER}, supportedScopes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testInitializeShutdown() {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
assertEquals(IDENTIFIER, provider.getIdentifier());
|
||||||
|
|
||||||
|
provider.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testInitializeEnableDisable() {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
assertEquals(IDENTIFIER, provider.getIdentifier());
|
||||||
|
|
||||||
|
assertFalse(provider.isEnabled());
|
||||||
|
|
||||||
|
provider.enable();
|
||||||
|
assertTrue(provider.isEnabled());
|
||||||
|
|
||||||
|
provider.disable();
|
||||||
|
assertFalse(provider.isEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testGetStateNotFound() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
final StateMap stateMap = provider.getState(COMPONENT_ID);
|
||||||
|
|
||||||
|
assertTrue(stateMap.toMap().isEmpty());
|
||||||
|
assertFalse(stateMap.getStateVersion().isPresent());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testSetStateGetState() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
final Map<String, String> state = Collections.singletonMap(STATE_PROPERTY, STATE_VALUE);
|
||||||
|
|
||||||
|
provider.setState(state, COMPONENT_ID);
|
||||||
|
|
||||||
|
final StateMap stateMap = provider.getState(COMPONENT_ID);
|
||||||
|
|
||||||
|
assertNotNull(stateMap);
|
||||||
|
final Map<String, String> stateRetrieved = stateMap.toMap();
|
||||||
|
assertEquals(state, stateRetrieved);
|
||||||
|
|
||||||
|
assertConfigMapFound();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testSetStateOnComponentRemoved() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
final Map<String, String> state = Collections.singletonMap(STATE_PROPERTY, STATE_VALUE);
|
||||||
|
|
||||||
|
provider.setState(state, COMPONENT_ID);
|
||||||
|
|
||||||
|
final StateMap stateMap = provider.getState(COMPONENT_ID);
|
||||||
|
assertStateEquals(state, stateMap);
|
||||||
|
|
||||||
|
provider.onComponentRemoved(COMPONENT_ID);
|
||||||
|
|
||||||
|
final StateMap removedStateMap = provider.getState(COMPONENT_ID);
|
||||||
|
assertStateEquals(Collections.emptyMap(), removedStateMap);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testClearGetState() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
provider.clear(COMPONENT_ID);
|
||||||
|
|
||||||
|
final StateMap stateMap = provider.getState(COMPONENT_ID);
|
||||||
|
|
||||||
|
assertNotNull(stateMap);
|
||||||
|
final Map<String, String> stateRetrieved = stateMap.toMap();
|
||||||
|
assertTrue(stateRetrieved.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testReplaceNotFound() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
final StateMap stateMap = new StandardStateMap(Collections.emptyMap(), Optional.empty());
|
||||||
|
final boolean replaced = provider.replace(stateMap, Collections.emptyMap(), COMPONENT_ID);
|
||||||
|
|
||||||
|
assertFalse(replaced);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testSetStateReplace() throws IOException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
final Map<String, String> state = Collections.singletonMap(STATE_PROPERTY, STATE_VALUE);
|
||||||
|
provider.setState(state, COMPONENT_ID);
|
||||||
|
|
||||||
|
final StateMap initialStateMap = provider.getState(COMPONENT_ID);
|
||||||
|
final Optional<String> initialVersion = initialStateMap.getStateVersion();
|
||||||
|
assertTrue(initialVersion.isPresent());
|
||||||
|
assertEquals(FIRST_VERSION, initialVersion.get());
|
||||||
|
|
||||||
|
final boolean replaced = provider.replace(initialStateMap, Collections.emptyMap(), COMPONENT_ID);
|
||||||
|
|
||||||
|
assertTrue(replaced);
|
||||||
|
|
||||||
|
final StateMap replacedStateMap = provider.getState(COMPONENT_ID);
|
||||||
|
final Optional<String> replacedVersion = replacedStateMap.getStateVersion();
|
||||||
|
assertTrue(replacedVersion.isPresent());
|
||||||
|
assertEquals(SECOND_VERSION, replacedVersion.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testOnComponentRemovedNotFound() throws IOException, InterruptedException {
|
||||||
|
setContext();
|
||||||
|
provider.initialize(context);
|
||||||
|
|
||||||
|
provider.onComponentRemoved(COMPONENT_ID);
|
||||||
|
|
||||||
|
final RecordedRequest request = kubernetesMockServer.getLastRequest();
|
||||||
|
assertEquals(HttpMethod.DELETE.name(), request.getMethod());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setContext() {
|
||||||
|
when(context.getIdentifier()).thenReturn(IDENTIFIER);
|
||||||
|
when(context.getLogger()).thenReturn(logger);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertStateEquals(final Map<String, String> expected, final StateMap stateMap) {
|
||||||
|
assertNotNull(stateMap);
|
||||||
|
final Map<String, String> stateRetrieved = stateMap.toMap();
|
||||||
|
assertEquals(expected, stateRetrieved);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertConfigMapFound() {
|
||||||
|
final ConfigMapList configMapList = kubernetesClient.configMaps().inNamespace(DEFAULT_NAMESPACE).list();
|
||||||
|
final Optional<ConfigMap> configMapFound = configMapList.getItems()
|
||||||
|
.stream()
|
||||||
|
.filter(configMap -> configMap.getMetadata().getName().endsWith(COMPONENT_ID))
|
||||||
|
.findFirst();
|
||||||
|
assertTrue(configMapFound.isPresent());
|
||||||
|
|
||||||
|
final ConfigMap configMap = configMapFound.get();
|
||||||
|
final Map<String, String> configMapData = configMap.getData();
|
||||||
|
final Map<String, String> expectedData = Collections.singletonMap(STATE_PROPERTY_ENCODED, STATE_VALUE);
|
||||||
|
assertEquals(expectedData, configMapData);
|
||||||
|
}
|
||||||
|
|
||||||
|
private class MockKubernetesConfigMapStateProvider extends KubernetesConfigMapStateProvider {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected KubernetesClient getKubernetesClient() {
|
||||||
|
return kubernetesClient;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-kubernetes-bundle</artifactId>
|
||||||
|
<packaging>pom</packaging>
|
||||||
|
<modules>
|
||||||
|
<module>nifi-framework-kubernetes-leader-election</module>
|
||||||
|
<module>nifi-framework-kubernetes-state-provider</module>
|
||||||
|
<module>nifi-framework-kubernetes-nar</module>
|
||||||
|
</modules>
|
||||||
|
</project>
|
|
@ -0,0 +1,34 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-leader-election-shared</artifactId>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-utils</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.controller.leader.election;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Leader Election Role enumeration for mapping public role to RFC 1123 subdomain-style names
|
||||||
|
*/
|
||||||
|
public enum LeaderElectionRole {
|
||||||
|
CLUSTER_COORDINATOR("Cluster Coordinator", "cluster-coordinator"),
|
||||||
|
|
||||||
|
PRIMARY_NODE("Primary Node", "primary-node");
|
||||||
|
|
||||||
|
private final String roleName;
|
||||||
|
|
||||||
|
private final String roleId;
|
||||||
|
|
||||||
|
LeaderElectionRole(final String roleName, final String roleId) {
|
||||||
|
this.roleName = roleName;
|
||||||
|
this.roleId = roleId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRoleName() {
|
||||||
|
return roleName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRoleId() {
|
||||||
|
return roleId;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,147 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.nifi.controller.leader.election;
|
||||||
|
|
||||||
|
import org.apache.nifi.util.timebuffer.CountSumMinMaxAccess;
|
||||||
|
import org.apache.nifi.util.timebuffer.LongEntityAccess;
|
||||||
|
import org.apache.nifi.util.timebuffer.TimedBuffer;
|
||||||
|
import org.apache.nifi.util.timebuffer.TimestampedLong;
|
||||||
|
import org.apache.nifi.util.timebuffer.TimestampedLongAggregation;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract implementation of Leader Election Manager supporting tracking of operations
|
||||||
|
*/
|
||||||
|
public abstract class TrackedLeaderElectionManager implements LeaderElectionManager {
|
||||||
|
|
||||||
|
private final ConcurrentMap<String, TimedBuffer<TimestampedLong>> leaderChanges = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
private final TimedBuffer<TimestampedLongAggregation> pollTimes = new TimedBuffer<>(TimeUnit.SECONDS, 300, new CountSumMinMaxAccess());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register as observer without Participant Identifier
|
||||||
|
*
|
||||||
|
* @param roleName Name of role to be registered for elections
|
||||||
|
* @param listener Listener notified on leader state changes
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void register(final String roleName, final LeaderElectionStateChangeListener listener) {
|
||||||
|
register(roleName, listener, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Integer> getLeadershipChangeCount(final long duration, final TimeUnit unit) {
|
||||||
|
final Map<String, Integer> leadershipChangesPerRole = new LinkedHashMap<>();
|
||||||
|
|
||||||
|
for (final Map.Entry<String, TimedBuffer<TimestampedLong>> entry : leaderChanges.entrySet()) {
|
||||||
|
final String roleName = entry.getKey();
|
||||||
|
final TimedBuffer<TimestampedLong> buffer = entry.getValue();
|
||||||
|
|
||||||
|
final TimestampedLong aggregateValue = buffer.getAggregateValue(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(duration, unit));
|
||||||
|
final int leadershipChanges = aggregateValue.getValue().intValue();
|
||||||
|
leadershipChangesPerRole.put(roleName, leadershipChanges);
|
||||||
|
}
|
||||||
|
|
||||||
|
return leadershipChangesPerRole;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getAveragePollTime(final TimeUnit timeUnit) {
|
||||||
|
final long averageNanos;
|
||||||
|
synchronized (pollTimes) {
|
||||||
|
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
||||||
|
if (aggregation == null || aggregation.getCount() == 0) {
|
||||||
|
return 0L;
|
||||||
|
}
|
||||||
|
averageNanos = aggregation.getSum() / aggregation.getCount();
|
||||||
|
}
|
||||||
|
return timeUnit.convert(averageNanos, TimeUnit.NANOSECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMinPollTime(final TimeUnit timeUnit) {
|
||||||
|
final long minNanos;
|
||||||
|
synchronized (pollTimes) {
|
||||||
|
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
||||||
|
if (aggregation == null) {
|
||||||
|
return 0L;
|
||||||
|
}
|
||||||
|
minNanos = aggregation.getMin();
|
||||||
|
}
|
||||||
|
return timeUnit.convert(minNanos, TimeUnit.NANOSECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMaxPollTime(final TimeUnit timeUnit) {
|
||||||
|
final long maxNanos;
|
||||||
|
synchronized (pollTimes) {
|
||||||
|
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
||||||
|
if (aggregation == null) {
|
||||||
|
return 0L;
|
||||||
|
}
|
||||||
|
maxNanos = aggregation.getMax();
|
||||||
|
}
|
||||||
|
return timeUnit.convert(maxNanos, TimeUnit.NANOSECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getPollCount() {
|
||||||
|
synchronized (pollTimes) {
|
||||||
|
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
||||||
|
if (aggregation == null) {
|
||||||
|
return 0L;
|
||||||
|
}
|
||||||
|
return aggregation.getCount();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register Poll Time in nanoseconds
|
||||||
|
*
|
||||||
|
* @param nanos Elapsed System Time in nanoseconds
|
||||||
|
*/
|
||||||
|
protected void registerPollTime(final long nanos) {
|
||||||
|
synchronized (pollTimes) {
|
||||||
|
pollTimes.add(TimestampedLongAggregation.newValue(nanos));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* On Leader Changed register role name changes
|
||||||
|
*
|
||||||
|
* @param roleName Role Name for leader changes
|
||||||
|
*/
|
||||||
|
protected void onLeaderChanged(final String roleName) {
|
||||||
|
final TimedBuffer<TimestampedLong> buffer = leaderChanges.computeIfAbsent(roleName, key -> new TimedBuffer<>(TimeUnit.HOURS, 24, new LongEntityAccess()));
|
||||||
|
buffer.add(new TimestampedLong(1L));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is specified identifier participating in the election based on null or empty participant identifier
|
||||||
|
*
|
||||||
|
* @param participantId Participant Identifier
|
||||||
|
* @return Participating status
|
||||||
|
*/
|
||||||
|
protected boolean isParticipating(final String participantId) {
|
||||||
|
return participantId != null && !participantId.trim().isEmpty();
|
||||||
|
}
|
||||||
|
}
|
|
@ -28,6 +28,7 @@ import org.apache.nifi.components.ConfigurableComponent;
|
||||||
import org.apache.nifi.components.PropertyDescriptor;
|
import org.apache.nifi.components.PropertyDescriptor;
|
||||||
import org.apache.nifi.components.state.StateProvider;
|
import org.apache.nifi.components.state.StateProvider;
|
||||||
import org.apache.nifi.controller.ControllerService;
|
import org.apache.nifi.controller.ControllerService;
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionManager;
|
||||||
import org.apache.nifi.controller.repository.ContentRepository;
|
import org.apache.nifi.controller.repository.ContentRepository;
|
||||||
import org.apache.nifi.controller.repository.FlowFileRepository;
|
import org.apache.nifi.controller.repository.FlowFileRepository;
|
||||||
import org.apache.nifi.controller.repository.FlowFileSwapManager;
|
import org.apache.nifi.controller.repository.FlowFileSwapManager;
|
||||||
|
@ -117,6 +118,7 @@ public class StandardExtensionDiscoveringManager implements ExtensionDiscovering
|
||||||
definitionMap.put(NarProvider.class, new HashSet<>());
|
definitionMap.put(NarProvider.class, new HashSet<>());
|
||||||
definitionMap.put(ExternalResourceProvider.class, new HashSet<>());
|
definitionMap.put(ExternalResourceProvider.class, new HashSet<>());
|
||||||
definitionMap.put(FlowRegistryClient.class, new HashSet<>());
|
definitionMap.put(FlowRegistryClient.class, new HashSet<>());
|
||||||
|
definitionMap.put(LeaderElectionManager.class, new HashSet<>());
|
||||||
|
|
||||||
additionalExtensionTypes.forEach(type -> definitionMap.putIfAbsent(type, new HashSet<>()));
|
additionalExtensionTypes.forEach(type -> definitionMap.putIfAbsent(type, new HashSet<>()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,84 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>nifi-framework-zookeeper-leader-election</artifactId>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-leader-election-shared</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-cluster-zookeeper</artifactId>
|
||||||
|
<version>2.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-properties</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.nifi</groupId>
|
||||||
|
<artifactId>nifi-framework-core-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>ch.qos.logback</groupId>
|
||||||
|
<artifactId>logback-classic</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-framework</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-recipes</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<!-- metrics-core required for ZooKeeper Server -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.dropwizard.metrics</groupId>
|
||||||
|
<artifactId>metrics-core</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<!-- snappy-java required for ZooKeeper Server -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.xerial.snappy</groupId>
|
||||||
|
<artifactId>snappy-java</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-lang3</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
|
@ -15,7 +15,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.nifi.controller.leader.election;
|
package org.apache.nifi.framework.cluster.leader.zookeeper;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -24,7 +24,7 @@ import java.util.List;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.curator.framework.api.ACLProvider;
|
import org.apache.curator.framework.api.ACLProvider;
|
||||||
import org.apache.curator.framework.imps.DefaultACLProvider;
|
import org.apache.curator.framework.imps.DefaultACLProvider;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
import org.apache.zookeeper.ZooDefs;
|
import org.apache.zookeeper.ZooDefs;
|
||||||
import org.apache.zookeeper.data.ACL;
|
import org.apache.zookeeper.data.ACL;
|
||||||
import org.apache.zookeeper.data.Id;
|
import org.apache.zookeeper.data.Id;
|
|
@ -14,7 +14,7 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.nifi.controller.leader.election;
|
package org.apache.nifi.framework.cluster.leader.zookeeper;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.curator.RetryPolicy;
|
import org.apache.curator.RetryPolicy;
|
||||||
|
@ -27,14 +27,11 @@ import org.apache.curator.framework.recipes.leader.Participant;
|
||||||
import org.apache.curator.framework.state.ConnectionState;
|
import org.apache.curator.framework.state.ConnectionState;
|
||||||
import org.apache.curator.retry.RetryNTimes;
|
import org.apache.curator.retry.RetryNTimes;
|
||||||
import org.apache.curator.utils.ZookeeperFactory;
|
import org.apache.curator.utils.ZookeeperFactory;
|
||||||
import org.apache.nifi.controller.cluster.ZooKeeperClientConfig;
|
import org.apache.nifi.controller.leader.election.TrackedLeaderElectionManager;
|
||||||
import org.apache.nifi.engine.FlowEngine;
|
import org.apache.nifi.engine.FlowEngine;
|
||||||
|
import org.apache.nifi.framework.cluster.zookeeper.ZooKeeperClientConfig;
|
||||||
|
import org.apache.nifi.controller.leader.election.LeaderElectionStateChangeListener;
|
||||||
import org.apache.nifi.util.NiFiProperties;
|
import org.apache.nifi.util.NiFiProperties;
|
||||||
import org.apache.nifi.util.timebuffer.CountSumMinMaxAccess;
|
|
||||||
import org.apache.nifi.util.timebuffer.LongEntityAccess;
|
|
||||||
import org.apache.nifi.util.timebuffer.TimedBuffer;
|
|
||||||
import org.apache.nifi.util.timebuffer.TimestampedLong;
|
|
||||||
import org.apache.nifi.util.timebuffer.TimestampedLongAggregation;
|
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.Watcher;
|
import org.apache.zookeeper.Watcher;
|
||||||
import org.apache.zookeeper.ZooKeeper;
|
import org.apache.zookeeper.ZooKeeper;
|
||||||
|
@ -47,17 +44,20 @@ import org.apache.zookeeper.common.ZKConfig;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
public class CuratorLeaderElectionManager extends TrackedLeaderElectionManager {
|
||||||
|
private static final String OBSERVER_ID = "OBSERVER";
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(CuratorLeaderElectionManager.class);
|
private static final Logger logger = LoggerFactory.getLogger(CuratorLeaderElectionManager.class);
|
||||||
|
|
||||||
private final FlowEngine leaderElectionMonitorEngine;
|
private final ExecutorService leaderElectionMonitorEngine = new FlowEngine(4, CuratorLeaderElectionManager.class.getSimpleName());
|
||||||
|
|
||||||
private final ZooKeeperClientConfig zkConfig;
|
private final ZooKeeperClientConfig zkConfig;
|
||||||
|
|
||||||
private CuratorFramework curatorClient;
|
private CuratorFramework curatorClient;
|
||||||
|
@ -65,14 +65,17 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
private volatile boolean stopped = true;
|
private volatile boolean stopped = true;
|
||||||
|
|
||||||
private final ConcurrentMap<String, LeaderRole> leaderRoles = new ConcurrentHashMap<>();
|
private final ConcurrentMap<String, LeaderRole> leaderRoles = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
private final ConcurrentMap<String, RegisteredRole> registeredRoles = new ConcurrentHashMap<>();
|
private final ConcurrentMap<String, RegisteredRole> registeredRoles = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
private final ConcurrentMap<String, TimedBuffer<TimestampedLong>> leaderChanges = new ConcurrentHashMap<>();
|
|
||||||
private final TimedBuffer<TimestampedLongAggregation> pollTimes = new TimedBuffer<>(TimeUnit.SECONDS, 300, new CountSumMinMaxAccess());
|
|
||||||
private final ConcurrentMap<String, String> lastKnownLeader = new ConcurrentHashMap<>();
|
private final ConcurrentMap<String, String> lastKnownLeader = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
public CuratorLeaderElectionManager(final int threadPoolSize, final NiFiProperties properties) {
|
/**
|
||||||
leaderElectionMonitorEngine = new FlowEngine(threadPoolSize, "Leader Election Notification", true);
|
* Curator Leader Election Manager constructor with NiFi Properties for NarThreadContextClassLoader.createInstance()
|
||||||
|
*
|
||||||
|
* @param properties NiFi Properties
|
||||||
|
*/
|
||||||
|
public CuratorLeaderElectionManager(final NiFiProperties properties) {
|
||||||
zkConfig = ZooKeeperClientConfig.createConfig(properties);
|
zkConfig = ZooKeeperClientConfig.createConfig(properties);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,11 +111,6 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
return participantId != null;
|
return participantId != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void register(String roleName, LeaderElectionStateChangeListener listener) {
|
|
||||||
register(roleName, listener, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getElectionPath(final String roleName) {
|
private String getElectionPath(final String roleName) {
|
||||||
final String rootPath = zkConfig.getRootPath();
|
final String rootPath = zkConfig.getRootPath();
|
||||||
final String leaderPath = rootPath + (rootPath.endsWith("/") ? "" : "/") + "leaders/" + roleName;
|
final String leaderPath = rootPath + (rootPath.endsWith("/") ? "" : "/") + "leaders/" + roleName;
|
||||||
|
@ -141,27 +139,24 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
|
|
||||||
registeredRoles.put(roleName, new RegisteredRole(participantId, listener));
|
registeredRoles.put(roleName, new RegisteredRole(participantId, listener));
|
||||||
|
|
||||||
final boolean isParticipant = participantId != null && !participantId.trim().isEmpty();
|
final boolean participating = isParticipating(participantId);
|
||||||
|
|
||||||
if (!isStopped()) {
|
if (!isStopped()) {
|
||||||
final ElectionListener electionListener = new ElectionListener(roleName, listener, participantId);
|
final ElectionListener electionListener = new ElectionListener(roleName, listener, participantId);
|
||||||
final LeaderSelector leaderSelector = new LeaderSelector(curatorClient, leaderPath, leaderElectionMonitorEngine, electionListener);
|
final LeaderSelector leaderSelector = new LeaderSelector(curatorClient, leaderPath, leaderElectionMonitorEngine, electionListener);
|
||||||
if (isParticipant) {
|
if (participating) {
|
||||||
leaderSelector.autoRequeue();
|
leaderSelector.autoRequeue();
|
||||||
leaderSelector.setId(participantId);
|
leaderSelector.setId(participantId);
|
||||||
leaderSelector.start();
|
leaderSelector.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
final LeaderRole leaderRole = new LeaderRole(leaderSelector, electionListener, isParticipant);
|
final LeaderRole leaderRole = new LeaderRole(leaderSelector, electionListener, participating);
|
||||||
|
|
||||||
leaderRoles.put(roleName, leaderRole);
|
leaderRoles.put(roleName, leaderRole);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isParticipant) {
|
final String registeredId = participating ? participantId : OBSERVER_ID;
|
||||||
logger.info("{} Registered new Leader Selector for role {}; this node is an active participant in the election.", this, roleName);
|
logger.info("Registered for Election: Role [{}] Registered ID [{}]", roleName, registeredId);
|
||||||
} else {
|
|
||||||
logger.info("{} Registered new Leader Selector for role {}; this node is a silent observer in the election.", this, roleName);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -183,7 +178,7 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
leaderRole.getElectionListener().disable();
|
leaderRole.getElectionListener().disable();
|
||||||
|
|
||||||
leaderSelector.close();
|
leaderSelector.close();
|
||||||
logger.info("This node is no longer registered to be elected as the Leader for Role '{}'", roleName);
|
logger.info("Unregistered for Election: Role [{}]", roleName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -208,11 +203,11 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
curatorClient = null;
|
curatorClient = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
leaderElectionMonitorEngine.shutdown();
|
||||||
logger.info("{} stopped and closed", this);
|
logger.info("{} stopped and closed", this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private boolean isStopped() {
|
||||||
public boolean isStopped() {
|
|
||||||
return stopped;
|
return stopped;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,26 +220,6 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
return leaderRoles.get(roleName);
|
return leaderRoles.get(roleName);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void onLeaderChanged(final String roleName) {
|
|
||||||
final TimedBuffer<TimestampedLong> buffer = leaderChanges.computeIfAbsent(roleName, key -> new TimedBuffer<>(TimeUnit.HOURS, 24, new LongEntityAccess()));
|
|
||||||
buffer.add(new TimestampedLong(1L));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<String, Integer> getLeadershipChangeCount(final long duration, final TimeUnit unit) {
|
|
||||||
final Map<String, Integer> leadershipChangesPerRole = new HashMap<>();
|
|
||||||
|
|
||||||
for (final Map.Entry<String, TimedBuffer<TimestampedLong>> entry : leaderChanges.entrySet()) {
|
|
||||||
final String roleName = entry.getKey();
|
|
||||||
final TimedBuffer<TimestampedLong> buffer = entry.getValue();
|
|
||||||
|
|
||||||
final TimestampedLong aggregateValue = buffer.getAggregateValue(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(duration, unit));
|
|
||||||
final int leadershipChanges = aggregateValue.getValue().intValue();
|
|
||||||
leadershipChangesPerRole.put(roleName, leadershipChanges);
|
|
||||||
}
|
|
||||||
|
|
||||||
return leadershipChangesPerRole;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isLeader(final String roleName) {
|
public boolean isLeader(final String roleName) {
|
||||||
final boolean activeParticipant = isActiveParticipant(roleName);
|
final boolean activeParticipant = isActiveParticipant(roleName);
|
||||||
|
@ -263,7 +238,7 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getLeader(final String roleName) {
|
public Optional<String> getLeader(final String roleName) {
|
||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
return determineLeaderExternal(roleName);
|
return determineLeaderExternal(roleName);
|
||||||
}
|
}
|
||||||
|
@ -280,19 +255,19 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
try {
|
try {
|
||||||
participant = role.getLeaderSelector().getLeader();
|
participant = role.getLeaderSelector().getLeader();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("Unable to determine leader for role '{}'; returning null", roleName, e);
|
logger.warn("Unable to determine leader for role [{}]", roleName, e);
|
||||||
return null;
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (participant == null) {
|
if (participant == null) {
|
||||||
logger.debug("There is currently no elected leader for the {} role", roleName);
|
logger.debug("There is currently no elected leader for the {} role", roleName);
|
||||||
return null;
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
final String participantId = participant.getId();
|
final String participantId = participant.getId();
|
||||||
if (StringUtils.isEmpty(participantId)) {
|
if (StringUtils.isEmpty(participantId)) {
|
||||||
logger.debug("Found leader participant for role {} but the participantId was empty", roleName);
|
logger.debug("Found leader participant for role [{}] but the participantId was empty", roleName);
|
||||||
return null;
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
final String previousLeader = lastKnownLeader.put(roleName, participantId);
|
final String previousLeader = lastKnownLeader.put(roleName, participantId);
|
||||||
|
@ -300,86 +275,20 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
onLeaderChanged(roleName);
|
onLeaderChanged(roleName);
|
||||||
}
|
}
|
||||||
|
|
||||||
return participantId;
|
return Optional.of(participantId);
|
||||||
} finally {
|
} finally {
|
||||||
registerPollTime(System.nanoTime() - startNanos);
|
registerPollTime(System.nanoTime() - startNanos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void registerPollTime(final long nanos) {
|
|
||||||
synchronized (pollTimes) {
|
|
||||||
pollTimes.add(TimestampedLongAggregation.newValue(nanos));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getAveragePollTime(final TimeUnit timeUnit) {
|
|
||||||
final long averageNanos;
|
|
||||||
synchronized (pollTimes) {
|
|
||||||
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
|
||||||
if (aggregation == null || aggregation.getCount() == 0) {
|
|
||||||
return 0L;
|
|
||||||
}
|
|
||||||
averageNanos = aggregation.getSum() / aggregation.getCount();
|
|
||||||
}
|
|
||||||
return timeUnit.convert(averageNanos, TimeUnit.NANOSECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getMinPollTime(final TimeUnit timeUnit) {
|
|
||||||
final long minNanos;
|
|
||||||
synchronized (pollTimes) {
|
|
||||||
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
|
||||||
if (aggregation == null) {
|
|
||||||
return 0L;
|
|
||||||
}
|
|
||||||
minNanos = aggregation.getMin();
|
|
||||||
}
|
|
||||||
return timeUnit.convert(minNanos, TimeUnit.NANOSECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getMaxPollTime(final TimeUnit timeUnit) {
|
|
||||||
final long maxNanos;
|
|
||||||
synchronized (pollTimes) {
|
|
||||||
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
|
||||||
if (aggregation == null) {
|
|
||||||
return 0L;
|
|
||||||
}
|
|
||||||
maxNanos = aggregation.getMax();
|
|
||||||
}
|
|
||||||
return timeUnit.convert(maxNanos, TimeUnit.NANOSECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getPollCount() {
|
|
||||||
synchronized (pollTimes) {
|
|
||||||
final TimestampedLongAggregation.TimestampedAggregation aggregation = pollTimes.getAggregateValue(0L).getAggregation();
|
|
||||||
if (aggregation == null) {
|
|
||||||
return 0L;
|
|
||||||
}
|
|
||||||
return aggregation.getCount();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determines whether or not leader election has already begun for the role with the given name
|
|
||||||
*
|
|
||||||
* @param roleName the role of interest
|
|
||||||
* @return <code>true</code> if leader election has already begun, <code>false</code> if it has not or if unable to determine this.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public boolean isLeaderElected(final String roleName) {
|
|
||||||
final String leaderAddress = determineLeaderExternal(roleName);
|
|
||||||
return !StringUtils.isEmpty(leaderAddress);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use a new Curator client to determine which node is the elected leader for the given role.
|
* Use a new Curator client to determine which node is the elected leader for the given role.
|
||||||
*
|
*
|
||||||
* @param roleName the name of the role
|
* @param roleName the name of the role
|
||||||
* @return the id of the elected leader, or <code>null</code> if no leader has been selected or if unable to determine
|
* @return the id of the elected leader, or <code>Optional.empty()</code> if no leader has been selected or if unable to determine
|
||||||
* the leader from ZooKeeper
|
* the leader from ZooKeeper
|
||||||
*/
|
*/
|
||||||
private String determineLeaderExternal(final String roleName) {
|
private Optional<String> determineLeaderExternal(final String roleName) {
|
||||||
final long start = System.nanoTime();
|
final long start = System.nanoTime();
|
||||||
|
|
||||||
try (CuratorFramework client = createClient()) {
|
try (CuratorFramework client = createClient()) {
|
||||||
|
@ -401,17 +310,17 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final Participant leader = selector.getLeader();
|
final Participant leader = selector.getLeader();
|
||||||
return leader == null ? null : leader.getId();
|
return leader == null ? Optional.empty() : Optional.of(leader.getId());
|
||||||
} catch (final KeeperException.NoNodeException nne) {
|
} catch (final KeeperException.NoNodeException nne) {
|
||||||
// If there is no ZNode, then there is no elected leader.
|
// If there is no ZNode, then there is no elected leader.
|
||||||
return null;
|
return Optional.empty();
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
logger.warn("Unable to determine the Elected Leader for role '{}' due to {}; assuming no leader has been elected", roleName, e.toString());
|
logger.warn("Unable to determine the Elected Leader for role '{}' due to {}; assuming no leader has been elected", roleName, e.toString());
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.warn("", e);
|
logger.warn("", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
registerPollTime(System.nanoTime() - start);
|
registerPollTime(System.nanoTime() - start);
|
||||||
|
@ -569,13 +478,14 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
* @return <code>true</code> if this node is still the elected leader according to ZooKeeper, false otherwise
|
* @return <code>true</code> if this node is still the elected leader according to ZooKeeper, false otherwise
|
||||||
*/
|
*/
|
||||||
private boolean verifyLeader() {
|
private boolean verifyLeader() {
|
||||||
final String leader = getLeader(roleName);
|
final Optional<String> leaderAddress = getLeader(roleName);
|
||||||
if (leader == null) {
|
if (!leaderAddress.isPresent()) {
|
||||||
logger.debug("Reached out to ZooKeeper to determine which node is the elected leader for Role '{}' but found that there is no leader.", roleName);
|
logger.debug("Reached out to ZooKeeper to determine which node is the elected leader for Role '{}' but found that there is no leader.", roleName);
|
||||||
setLeader(false);
|
setLeader(false);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final String leader = leaderAddress.get();
|
||||||
final boolean match = leader.equals(participantId);
|
final boolean match = leader.equals(participantId);
|
||||||
logger.debug("Reached out to ZooKeeper to determine which node is the elected leader for Role '{}'. Elected Leader = '{}', Participant ID = '{}', This Node Elected = {}",
|
logger.debug("Reached out to ZooKeeper to determine which node is the elected leader for Role '{}'. Elected Leader = '{}', Participant ID = '{}', This Node Elected = {}",
|
||||||
roleName, leader, participantId, match);
|
roleName, leader, participantId, match);
|
||||||
|
@ -591,7 +501,7 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
|
|
||||||
if (listener != null) {
|
if (listener != null) {
|
||||||
try {
|
try {
|
||||||
listener.onLeaderElection();
|
listener.onStartLeading();
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
logger.error("This node was elected Leader for Role '{}' but failed to take leadership. Will relinquish leadership role. Failure was due to: {}", roleName, e);
|
logger.error("This node was elected Leader for Role '{}' but failed to take leadership. Will relinquish leadership role. Failure was due to: {}", roleName, e);
|
||||||
setLeader(false);
|
setLeader(false);
|
||||||
|
@ -644,7 +554,7 @@ public class CuratorLeaderElectionManager implements LeaderElectionManager {
|
||||||
|
|
||||||
if (listener != null) {
|
if (listener != null) {
|
||||||
try {
|
try {
|
||||||
listener.onLeaderRelinquish();
|
listener.onStopLeading();
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
logger.error("This node is no longer leader for role '{}' but failed to shutdown leadership responsibilities properly due to: {}", roleName, e.toString());
|
logger.error("This node is no longer leader for role '{}' but failed to shutdown leadership responsibilities properly due to: {}", roleName, e.toString());
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
org.apache.nifi.framework.cluster.leader.zookeeper.CuratorLeaderElectionManager
|
|
@ -201,6 +201,7 @@
|
||||||
|
|
||||||
<!-- nifi.properties: cluster node properties (only configure for cluster nodes) -->
|
<!-- nifi.properties: cluster node properties (only configure for cluster nodes) -->
|
||||||
<nifi.cluster.is.node>false</nifi.cluster.is.node>
|
<nifi.cluster.is.node>false</nifi.cluster.is.node>
|
||||||
|
<nifi.cluster.leader.election.implementation>CuratorLeaderElectionManager</nifi.cluster.leader.election.implementation>
|
||||||
<nifi.cluster.node.address />
|
<nifi.cluster.node.address />
|
||||||
<nifi.cluster.node.protocol.port />
|
<nifi.cluster.node.protocol.port />
|
||||||
<nifi.cluster.node.protocol.max.threads>50</nifi.cluster.node.protocol.max.threads>
|
<nifi.cluster.node.protocol.max.threads>50</nifi.cluster.node.protocol.max.threads>
|
||||||
|
|
|
@ -257,6 +257,7 @@ nifi.cluster.protocol.is.secure=${nifi.cluster.protocol.is.secure}
|
||||||
|
|
||||||
# cluster node properties (only configure for cluster nodes) #
|
# cluster node properties (only configure for cluster nodes) #
|
||||||
nifi.cluster.is.node=${nifi.cluster.is.node}
|
nifi.cluster.is.node=${nifi.cluster.is.node}
|
||||||
|
nifi.cluster.leader.election.implementation=${nifi.cluster.leader.election.implementation}
|
||||||
nifi.cluster.node.address=${nifi.cluster.node.address}
|
nifi.cluster.node.address=${nifi.cluster.node.address}
|
||||||
nifi.cluster.node.protocol.port=${nifi.cluster.node.protocol.port}
|
nifi.cluster.node.protocol.port=${nifi.cluster.node.protocol.port}
|
||||||
nifi.cluster.node.protocol.max.threads=${nifi.cluster.node.protocol.max.threads}
|
nifi.cluster.node.protocol.max.threads=${nifi.cluster.node.protocol.max.threads}
|
||||||
|
|
|
@ -64,6 +64,12 @@
|
||||||
<property name="Access Control">Open</property>
|
<property name="Access Control">Open</property>
|
||||||
</cluster-provider>
|
</cluster-provider>
|
||||||
|
|
||||||
|
<!-- Kubernetes ConfigMap implementation of State Provider -->
|
||||||
|
<cluster-provider>
|
||||||
|
<id>kubernetes-provider</id>
|
||||||
|
<class>org.apache.nifi.kubernetes.state.provider.KubernetesConfigMapStateProvider</class>
|
||||||
|
</cluster-provider>
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Cluster State Provider that stores state in Redis. This can be used as an alternative to the ZooKeeper State Provider.
|
Cluster State Provider that stores state in Redis. This can be used as an alternative to the ZooKeeper State Provider.
|
||||||
|
|
||||||
|
|
|
@ -6289,12 +6289,13 @@ public class StandardNiFiServiceFacade implements NiFiServiceFacade {
|
||||||
final String nodeAddress = nodeId.getSocketAddress() + ":" + nodeId.getSocketPort();
|
final String nodeAddress = nodeId.getSocketAddress() + ":" + nodeId.getSocketPort();
|
||||||
|
|
||||||
for (final String roleName : ClusterRoles.getAllRoles()) {
|
for (final String roleName : ClusterRoles.getAllRoles()) {
|
||||||
final String leader = leaderElectionManager.getLeader(roleName);
|
final Optional<String> leader = leaderElectionManager.getLeader(roleName);
|
||||||
if (leader == null) {
|
if (!leader.isPresent()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (leader.equals(nodeAddress)) {
|
final String leaderAddress = leader.get();
|
||||||
|
if (leaderAddress.equals(nodeAddress)) {
|
||||||
roles.add(roleName);
|
roles.add(roleName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,11 @@
|
||||||
<module>nifi-framework-components</module>
|
<module>nifi-framework-components</module>
|
||||||
<module>nifi-framework-core</module>
|
<module>nifi-framework-core</module>
|
||||||
<module>nifi-framework-cluster-protocol</module>
|
<module>nifi-framework-cluster-protocol</module>
|
||||||
|
<module>nifi-framework-cluster-zookeeper</module>
|
||||||
<module>nifi-framework-cluster</module>
|
<module>nifi-framework-cluster</module>
|
||||||
|
<module>nifi-framework-leader-election-shared</module>
|
||||||
|
<module>nifi-framework-zookeeper-leader-election</module>
|
||||||
|
<module>nifi-framework-kubernetes-bundle</module>
|
||||||
<module>nifi-framework-nar-utils</module>
|
<module>nifi-framework-nar-utils</module>
|
||||||
<module>nifi-framework-nar-loading-utils</module>
|
<module>nifi-framework-nar-loading-utils</module>
|
||||||
<module>nifi-user-actions</module>
|
<module>nifi-user-actions</module>
|
||||||
|
|
|
@ -318,7 +318,7 @@ public class ListGCSBucket extends AbstractGCSProcessor {
|
||||||
|
|
||||||
void restoreState(final ProcessSession session) throws IOException {
|
void restoreState(final ProcessSession session) throws IOException {
|
||||||
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) {
|
if (!stateMap.getStateVersion().isPresent() || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) {
|
||||||
currentTimestamp = 0L;
|
currentTimestamp = 0L;
|
||||||
currentKeys.clear();
|
currentKeys.clear();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -141,7 +141,7 @@ public class ListGCSBucketTest extends AbstractGCSTest {
|
||||||
addRequiredPropertiesToRunner(runner);
|
addRequiredPropertiesToRunner(runner);
|
||||||
runner.assertValid();
|
runner.assertValid();
|
||||||
|
|
||||||
assertEquals(-1L, runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).getVersion(), "Cluster StateMap should be fresh (version -1L)");
|
assertFalse(runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).getStateVersion().isPresent(), "Cluster StateMap should be fresh (version -1L)");
|
||||||
assertTrue(processor.getStateKeys().isEmpty());
|
assertTrue(processor.getStateKeys().isEmpty());
|
||||||
|
|
||||||
processor.restoreState(runner.getProcessSessionFactory().createSession());
|
processor.restoreState(runner.getProcessSessionFactory().createSession());
|
||||||
|
@ -187,9 +187,9 @@ public class ListGCSBucketTest extends AbstractGCSTest {
|
||||||
addRequiredPropertiesToRunner(runner);
|
addRequiredPropertiesToRunner(runner);
|
||||||
runner.assertValid();
|
runner.assertValid();
|
||||||
|
|
||||||
assertEquals(-1L,
|
assertFalse(
|
||||||
runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).getVersion(),
|
runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).getStateVersion().isPresent(),
|
||||||
"Cluster StateMap should be fresh (version -1L)"
|
"Cluster StateMap should be fresh"
|
||||||
);
|
);
|
||||||
|
|
||||||
final Set<String> keys = new LinkedHashSet<>(Arrays.asList("test-key-0", "test-key-1"));
|
final Set<String> keys = new LinkedHashSet<>(Arrays.asList("test-key-0", "test-key-1"));
|
||||||
|
@ -197,7 +197,7 @@ public class ListGCSBucketTest extends AbstractGCSTest {
|
||||||
processor.persistState(session, 4L, keys);
|
processor.persistState(session, 4L, keys);
|
||||||
|
|
||||||
final StateMap stateMap = runner.getStateManager().getState(Scope.CLUSTER);
|
final StateMap stateMap = runner.getStateManager().getState(Scope.CLUSTER);
|
||||||
assertEquals(1L, stateMap.getVersion(), "Cluster StateMap should have been written to");
|
assertTrue(stateMap.getStateVersion().isPresent(), "Cluster StateMap should have been written to");
|
||||||
|
|
||||||
final Map<String, String> state = new HashMap<>();
|
final Map<String, String> state = new HashMap<>();
|
||||||
state.put(ListGCSBucket.CURRENT_TIMESTAMP, String.valueOf(4L));
|
state.put(ListGCSBucket.CURRENT_TIMESTAMP, String.valueOf(4L));
|
||||||
|
@ -369,7 +369,7 @@ public class ListGCSBucketTest extends AbstractGCSTest {
|
||||||
runner.assertTransferCount(ListGCSBucket.REL_SUCCESS, 0);
|
runner.assertTransferCount(ListGCSBucket.REL_SUCCESS, 0);
|
||||||
verifyConfigVerification(runner, processor, 0);
|
verifyConfigVerification(runner, processor, 0);
|
||||||
|
|
||||||
assertEquals(-1L, runner.getStateManager().getState(Scope.CLUSTER).getVersion(), "No state should be persisted on an empty return");
|
assertFalse(runner.getStateManager().getState(Scope.CLUSTER).getStateVersion().isPresent(), "No state should be persisted on an empty return");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -420,7 +420,7 @@ public class ListHDFS extends AbstractHadoopProcessor {
|
||||||
// Ensure that we are using the latest listing information before we try to perform a listing of HDFS files.
|
// Ensure that we are using the latest listing information before we try to perform a listing of HDFS files.
|
||||||
try {
|
try {
|
||||||
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() == -1L) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
latestTimestampEmitted = -1L;
|
latestTimestampEmitted = -1L;
|
||||||
latestTimestampListed = -1L;
|
latestTimestampListed = -1L;
|
||||||
getLogger().debug("Found no state stored");
|
getLogger().debug("Found no state stored");
|
||||||
|
|
|
@ -207,7 +207,7 @@ public class GetHBase extends AbstractProcessor implements VisibilityFetchSuppor
|
||||||
@OnScheduled
|
@OnScheduled
|
||||||
public void parseColumns(final ProcessContext context) throws IOException {
|
public void parseColumns(final ProcessContext context) throws IOException {
|
||||||
final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER);
|
final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() < 0) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
// no state has been stored in the State Manager - check if we have state stored in the
|
// no state has been stored in the State Manager - check if we have state stored in the
|
||||||
// DistributedMapCacheClient service and migrate it if so
|
// DistributedMapCacheClient service and migrate it if so
|
||||||
final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
|
final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
|
||||||
|
@ -461,7 +461,7 @@ public class GetHBase extends AbstractProcessor implements VisibilityFetchSuppor
|
||||||
|
|
||||||
private ScanResult getState(final ProcessSession session) throws IOException {
|
private ScanResult getState(final ProcessSession session) throws IOException {
|
||||||
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
||||||
if (stateMap.getVersion() < 0) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.nifi.components.state.StateMap;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -49,6 +50,17 @@ public class RedisStateMap implements StateMap {
|
||||||
return version;
|
return version;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Optional<String> getStateVersion() {
|
||||||
|
final String stateVersion;
|
||||||
|
if (DEFAULT_VERSION == version || version == null) {
|
||||||
|
stateVersion = null;
|
||||||
|
} else {
|
||||||
|
stateVersion = String.valueOf(version);
|
||||||
|
}
|
||||||
|
return Optional.ofNullable(stateVersion);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String get(String key) {
|
public String get(String key) {
|
||||||
return stateValues.get(key);
|
return stateValues.get(key);
|
||||||
|
|
|
@ -25,6 +25,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A RedisStateMapSerDe that uses JSON as the underlying representation.
|
* A RedisStateMapSerDe that uses JSON as the underlying representation.
|
||||||
|
@ -35,6 +36,8 @@ public class RedisStateMapJsonSerDe implements RedisStateMapSerDe {
|
||||||
public static final String FIELD_ENCODING = "encodingVersion";
|
public static final String FIELD_ENCODING = "encodingVersion";
|
||||||
public static final String FIELD_STATE_VALUES = "stateValues";
|
public static final String FIELD_STATE_VALUES = "stateValues";
|
||||||
|
|
||||||
|
static final long EMPTY_VERSION = -1;
|
||||||
|
|
||||||
private final JsonFactory jsonFactory = new JsonFactory(new ObjectMapper());
|
private final JsonFactory jsonFactory = new JsonFactory(new ObjectMapper());
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -46,7 +49,10 @@ public class RedisStateMapJsonSerDe implements RedisStateMapSerDe {
|
||||||
try (final ByteArrayOutputStream out = new ByteArrayOutputStream()) {
|
try (final ByteArrayOutputStream out = new ByteArrayOutputStream()) {
|
||||||
final JsonGenerator jsonGenerator = jsonFactory.createGenerator(out);
|
final JsonGenerator jsonGenerator = jsonFactory.createGenerator(out);
|
||||||
jsonGenerator.writeStartObject();
|
jsonGenerator.writeStartObject();
|
||||||
jsonGenerator.writeNumberField(FIELD_VERSION, stateMap.getVersion());
|
|
||||||
|
final Optional<String> stateVersion = stateMap.getStateVersion();
|
||||||
|
final long version = stateVersion.map(Long::parseLong).orElse(EMPTY_VERSION);
|
||||||
|
jsonGenerator.writeNumberField(FIELD_VERSION, version);
|
||||||
jsonGenerator.writeNumberField(FIELD_ENCODING, stateMap.getEncodingVersion());
|
jsonGenerator.writeNumberField(FIELD_ENCODING, stateMap.getEncodingVersion());
|
||||||
|
|
||||||
jsonGenerator.writeObjectFieldStart(FIELD_STATE_VALUES);
|
jsonGenerator.writeObjectFieldStart(FIELD_STATE_VALUES);
|
||||||
|
|
|
@ -41,6 +41,7 @@ import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A StateProvider backed by Redis.
|
* A StateProvider backed by Redis.
|
||||||
|
@ -212,14 +213,14 @@ public class RedisStateProvider extends AbstractConfigurableComponent implements
|
||||||
final byte[] key = getComponentKey(componentId).getBytes(StandardCharsets.UTF_8);
|
final byte[] key = getComponentKey(componentId).getBytes(StandardCharsets.UTF_8);
|
||||||
redisConnection.watch(key);
|
redisConnection.watch(key);
|
||||||
|
|
||||||
final long prevVersion = oldValue == null ? -1L : oldValue.getVersion();
|
final Optional<String> previousVersion = oldValue == null ? Optional.empty() : oldValue.getStateVersion();
|
||||||
|
|
||||||
final byte[] currValue = redisConnection.get(key);
|
final byte[] currValue = redisConnection.get(key);
|
||||||
final RedisStateMap currStateMap = serDe.deserialize(currValue);
|
final RedisStateMap currStateMap = serDe.deserialize(currValue);
|
||||||
final long currVersion = currStateMap == null ? -1L : currStateMap.getVersion();
|
final Optional<String> currentVersion = currStateMap == null ? Optional.empty() : currStateMap.getStateVersion();
|
||||||
|
|
||||||
// the replace API expects that you can't call replace on a non-existing value, so unwatch and return
|
// the replace API expects that you can't call replace on a non-existing value, so unwatch and return
|
||||||
if (!allowReplaceMissing && currVersion == -1) {
|
if (!allowReplaceMissing && !currentVersion.isPresent()) {
|
||||||
redisConnection.unwatch();
|
redisConnection.unwatch();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -228,10 +229,11 @@ public class RedisStateProvider extends AbstractConfigurableComponent implements
|
||||||
redisConnection.multi();
|
redisConnection.multi();
|
||||||
|
|
||||||
// compare-and-set
|
// compare-and-set
|
||||||
if (prevVersion == currVersion) {
|
if (previousVersion.equals(currentVersion)) {
|
||||||
// build the new RedisStateMap incrementing the version, using latest encoding, and using the passed in values
|
// build the new RedisStateMap incrementing the version, using latest encoding, and using the passed in values
|
||||||
|
final long currentVersionNumber = currentVersion.map(Long::parseLong).orElse(RedisStateMapJsonSerDe.EMPTY_VERSION);
|
||||||
final RedisStateMap newStateMap = new RedisStateMap.Builder()
|
final RedisStateMap newStateMap = new RedisStateMap.Builder()
|
||||||
.version(currVersion + 1)
|
.version(currentVersionNumber + 1)
|
||||||
.encodingVersion(ENCODING_VERSION)
|
.encodingVersion(ENCODING_VERSION)
|
||||||
.stateValues(newValue)
|
.stateValues(newValue)
|
||||||
.build();
|
.build();
|
||||||
|
@ -263,7 +265,7 @@ public class RedisStateProvider extends AbstractConfigurableComponent implements
|
||||||
updated = replace(currStateMap, Collections.emptyMap(), componentId, true);
|
updated = replace(currStateMap, Collections.emptyMap(), componentId, true);
|
||||||
|
|
||||||
final String result = updated ? "successful" : "unsuccessful";
|
final String result = updated ? "successful" : "unsuccessful";
|
||||||
logger.debug("Attempt # {} to clear state for component {} was {}", new Object[] { attempted + 1, componentId, result});
|
logger.debug("Attempt # {} to clear state for component {} was {}", attempted + 1, componentId, result);
|
||||||
|
|
||||||
attempted++;
|
attempted++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,7 +101,7 @@ public class ITRedisStateProvider {
|
||||||
|
|
||||||
StateMap map = provider.getState(componentId);
|
StateMap map = provider.getState(componentId);
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertEquals(-1, map.getVersion());
|
assertFalse(map.getStateVersion().isPresent());
|
||||||
|
|
||||||
assertNotNull(map.toMap());
|
assertNotNull(map.toMap());
|
||||||
assertTrue(map.toMap().isEmpty());
|
assertTrue(map.toMap().isEmpty());
|
||||||
|
@ -109,7 +109,7 @@ public class ITRedisStateProvider {
|
||||||
|
|
||||||
map = provider.getState(componentId);
|
map = provider.getState(componentId);
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertEquals(0, map.getVersion());
|
assertTrue(map.getStateVersion().isPresent());
|
||||||
assertEquals("value1", map.get(key));
|
assertEquals("value1", map.get(key));
|
||||||
assertEquals("value1", map.toMap().get(key));
|
assertEquals("value1", map.toMap().get(key));
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ public class ITRedisStateProvider {
|
||||||
|
|
||||||
map = provider.getState(componentId);
|
map = provider.getState(componentId);
|
||||||
assertEquals("value2", map.get(key));
|
assertEquals("value2", map.get(key));
|
||||||
assertEquals(1L, map.getVersion());
|
assertTrue(map.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -131,7 +131,7 @@ public class ITRedisStateProvider {
|
||||||
StateMap stateMap = provider.getState(componentId);
|
StateMap stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals("value1", stateMap.get(key));
|
assertEquals("value1", stateMap.get(key));
|
||||||
assertEquals(0, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
|
|
||||||
provider.setState(Collections.singletonMap(key, "intermediate value"), componentId);
|
provider.setState(Collections.singletonMap(key, "intermediate value"), componentId);
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ public class ITRedisStateProvider {
|
||||||
assertEquals(key, stateMap.toMap().keySet().iterator().next());
|
assertEquals(key, stateMap.toMap().keySet().iterator().next());
|
||||||
assertEquals(1, stateMap.toMap().size());
|
assertEquals(1, stateMap.toMap().size());
|
||||||
assertEquals("intermediate value", stateMap.get(key));
|
assertEquals("intermediate value", stateMap.get(key));
|
||||||
assertEquals(1, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ public class ITRedisStateProvider {
|
||||||
map = stateMap.toMap();
|
map = stateMap.toMap();
|
||||||
assertNotNull(map);
|
assertNotNull(map);
|
||||||
assertTrue(map.isEmpty());
|
assertTrue(map.isEmpty());
|
||||||
assertEquals(1, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -172,21 +172,21 @@ public class ITRedisStateProvider {
|
||||||
final StateProvider provider = getProvider();
|
final StateProvider provider = getProvider();
|
||||||
StateMap stateMap = provider.getState(componentId);
|
StateMap stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(-1L, stateMap.getVersion());
|
assertFalse(stateMap.getStateVersion().isPresent());
|
||||||
assertTrue(stateMap.toMap().isEmpty());
|
assertTrue(stateMap.toMap().isEmpty());
|
||||||
|
|
||||||
provider.setState(Collections.singletonMap("testClear", "value"), componentId);
|
provider.setState(Collections.singletonMap("testClear", "value"), componentId);
|
||||||
|
|
||||||
stateMap = provider.getState(componentId);
|
stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(0, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
assertEquals("value", stateMap.get("testClear"));
|
assertEquals("value", stateMap.get("testClear"));
|
||||||
|
|
||||||
provider.clear(componentId);
|
provider.clear(componentId);
|
||||||
|
|
||||||
stateMap = provider.getState(componentId);
|
stateMap = provider.getState(componentId);
|
||||||
assertNotNull(stateMap);
|
assertNotNull(stateMap);
|
||||||
assertEquals(1L, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
assertTrue(stateMap.toMap().isEmpty());
|
assertTrue(stateMap.toMap().isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -238,7 +238,7 @@ public class ITRedisStateProvider {
|
||||||
|
|
||||||
provider.setState(newValue, componentId);
|
provider.setState(newValue, componentId);
|
||||||
final StateMap stateMap = provider.getState(componentId);
|
final StateMap stateMap = provider.getState(componentId);
|
||||||
assertEquals(0L, stateMap.getVersion());
|
assertTrue(stateMap.getStateVersion().isPresent());
|
||||||
|
|
||||||
provider.onComponentRemoved(componentId);
|
provider.onComponentRemoved(componentId);
|
||||||
|
|
||||||
|
@ -247,8 +247,8 @@ public class ITRedisStateProvider {
|
||||||
|
|
||||||
final StateMap stateMapAfterRemoval = provider.getState(componentId);
|
final StateMap stateMapAfterRemoval = provider.getState(componentId);
|
||||||
|
|
||||||
// version should be -1 because the state has been removed entirely.
|
// version should be not present because the state has been removed entirely.
|
||||||
assertEquals(-1L, stateMapAfterRemoval.getVersion());
|
assertFalse(stateMap.getStateVersion().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ public class TestRedisStateMapJsonSerDe {
|
||||||
|
|
||||||
final RedisStateMap deserialized = serDe.deserialize(serialized);
|
final RedisStateMap deserialized = serDe.deserialize(serialized);
|
||||||
assertNotNull(deserialized);
|
assertNotNull(deserialized);
|
||||||
assertEquals(stateMap.getVersion(), deserialized.getVersion());
|
assertEquals(stateMap.getStateVersion(), deserialized.getStateVersion());
|
||||||
assertEquals(stateMap.getEncodingVersion(), deserialized.getEncodingVersion());
|
assertEquals(stateMap.getEncodingVersion(), deserialized.getEncodingVersion());
|
||||||
assertEquals(stateMap.toMap(), deserialized.toMap());
|
assertEquals(stateMap.toMap(), deserialized.toMap());
|
||||||
}
|
}
|
||||||
|
@ -73,7 +74,7 @@ public class TestRedisStateMapJsonSerDe {
|
||||||
|
|
||||||
final RedisStateMap deserialized = serDe.deserialize(serialized);
|
final RedisStateMap deserialized = serDe.deserialize(serialized);
|
||||||
assertNotNull(deserialized);
|
assertNotNull(deserialized);
|
||||||
assertEquals(RedisStateMap.DEFAULT_VERSION.longValue(), stateMap.getVersion());
|
assertFalse(stateMap.getStateVersion().isPresent());
|
||||||
assertEquals(RedisStateMap.DEFAULT_ENCODING, stateMap.getEncodingVersion());
|
assertEquals(RedisStateMap.DEFAULT_ENCODING, stateMap.getEncodingVersion());
|
||||||
assertNotNull(deserialized.toMap());
|
assertNotNull(deserialized.toMap());
|
||||||
assertEquals(0, deserialized.toMap().size());
|
assertEquals(0, deserialized.toMap().size());
|
||||||
|
|
|
@ -590,7 +590,7 @@ public class GetSplunk extends AbstractProcessor {
|
||||||
private TimeRange loadState(final ProcessSession session) throws IOException {
|
private TimeRange loadState(final ProcessSession session) throws IOException {
|
||||||
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
final StateMap stateMap = session.getState(Scope.CLUSTER);
|
||||||
|
|
||||||
if (stateMap.getVersion() < 0) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
getLogger().debug("No previous state found");
|
getLogger().debug("No previous state found");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -329,7 +329,7 @@ public class TestGetSplunk {
|
||||||
|
|
||||||
final StateMap state = runner.getStateManager().getState(Scope.CLUSTER);
|
final StateMap state = runner.getStateManager().getState(Scope.CLUSTER);
|
||||||
assertNotNull(state);
|
assertNotNull(state);
|
||||||
assertTrue(state.getVersion() > 0);
|
assertTrue(state.getStateVersion().isPresent());
|
||||||
|
|
||||||
// save the latest time from the first run which should be earliest time of next run
|
// save the latest time from the first run which should be earliest time of next run
|
||||||
final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY);
|
final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY);
|
||||||
|
@ -377,7 +377,7 @@ public class TestGetSplunk {
|
||||||
|
|
||||||
final StateMap state = runner.getStateManager().getState(Scope.CLUSTER);
|
final StateMap state = runner.getStateManager().getState(Scope.CLUSTER);
|
||||||
assertNotNull(state);
|
assertNotNull(state);
|
||||||
assertTrue(state.getVersion() > 0);
|
assertTrue(state.getStateVersion().isPresent());
|
||||||
|
|
||||||
// save the latest time from the first run which should be earliest time of next run
|
// save the latest time from the first run which should be earliest time of next run
|
||||||
final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY);
|
final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY);
|
||||||
|
|
|
@ -346,7 +346,7 @@ public class MonitorActivity extends AbstractProcessor {
|
||||||
}
|
}
|
||||||
newValues.put(STATE_KEY_LATEST_SUCCESS_TRANSFER, String.valueOf(now));
|
newValues.put(STATE_KEY_LATEST_SUCCESS_TRANSFER, String.valueOf(now));
|
||||||
|
|
||||||
if (state == null || state.getVersion() == -1) {
|
if (state == null || !state.getStateVersion().isPresent()) {
|
||||||
session.setState(newValues, Scope.CLUSTER);
|
session.setState(newValues, Scope.CLUSTER);
|
||||||
} else {
|
} else {
|
||||||
final String existingTimestamp = state.get(STATE_KEY_LATEST_SUCCESS_TRANSFER);
|
final String existingTimestamp = state.get(STATE_KEY_LATEST_SUCCESS_TRANSFER);
|
||||||
|
|
|
@ -414,7 +414,7 @@ public class TailFile extends AbstractProcessor {
|
||||||
|
|
||||||
final String startPosition = context.getProperty(START_POSITION).getValue();
|
final String startPosition = context.getProperty(START_POSITION).getValue();
|
||||||
|
|
||||||
if (stateMap.getVersion() == -1L || stateMap.toMap().isEmpty()) {
|
if (!stateMap.getStateVersion().isPresent() || stateMap.toMap().isEmpty()) {
|
||||||
//state has been cleared or never stored so recover as 'empty state'
|
//state has been cleared or never stored so recover as 'empty state'
|
||||||
initStates(filesToTail, Collections.emptyMap(), true, startPosition);
|
initStates(filesToTail, Collections.emptyMap(), true, startPosition);
|
||||||
recoverState(context, filesToTail, Collections.emptyMap());
|
recoverState(context, filesToTail, Collections.emptyMap());
|
||||||
|
|
|
@ -28,9 +28,12 @@ import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
public class HashMapStateProvider implements StateProvider {
|
public class HashMapStateProvider implements StateProvider {
|
||||||
private static final int UNKNOWN_STATE_VERSION = -1;
|
private static final long VERSION_INCREMENT = 1;
|
||||||
|
private static final String INITIAL_VERSION = String.valueOf(VERSION_INCREMENT);
|
||||||
|
|
||||||
private final Map<String, StateMap> committedStates = new HashMap<>();
|
private final Map<String, StateMap> committedStates = new HashMap<>();
|
||||||
private final Map<String, StateMap> activeStates = new HashMap<>();
|
private final Map<String, StateMap> activeStates = new HashMap<>();
|
||||||
|
|
||||||
|
@ -67,8 +70,9 @@ public class HashMapStateProvider implements StateProvider {
|
||||||
@Override
|
@Override
|
||||||
public synchronized void setState(final Map<String, String> state, final String componentId) {
|
public synchronized void setState(final Map<String, String> state, final String componentId) {
|
||||||
final StateMap existing = getState(componentId);
|
final StateMap existing = getState(componentId);
|
||||||
final long version = existing == null ? UNKNOWN_STATE_VERSION : existing.getVersion();
|
final Optional<String> existingVersion = existing.getStateVersion();
|
||||||
final StateMap updated = new StandardStateMap(state, version + 1);
|
final String version = existingVersion.map(this::getIncrementedVersion).orElse(INITIAL_VERSION);
|
||||||
|
final StateMap updated = new StandardStateMap(state, Optional.of(version));
|
||||||
activeStates.put(componentId, updated);
|
activeStates.put(componentId, updated);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,13 +83,13 @@ public class HashMapStateProvider implements StateProvider {
|
||||||
existing = committedStates.get(componentId);
|
existing = committedStates.get(componentId);
|
||||||
}
|
}
|
||||||
|
|
||||||
return existing == null ? new StandardStateMap(Collections.emptyMap(), -1) : existing;
|
return existing == null ? new StandardStateMap(Collections.emptyMap(), Optional.empty()) : existing;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean replace(final StateMap oldValue, final Map<String, String> newValue, final String componentId) {
|
public synchronized boolean replace(final StateMap oldValue, final Map<String, String> newValue, final String componentId) {
|
||||||
final StateMap existing = getState(componentId);
|
final StateMap existing = getState(componentId);
|
||||||
if (oldValue.getVersion() == existing.getVersion() && oldValue.toMap().equals(existing.toMap())) {
|
if (oldValue.getStateVersion().equals(existing.getStateVersion()) && oldValue.toMap().equals(existing.toMap())) {
|
||||||
setState(newValue, componentId);
|
setState(newValue, componentId);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -148,4 +152,10 @@ public class HashMapStateProvider implements StateProvider {
|
||||||
public String getIdentifier() {
|
public String getIdentifier() {
|
||||||
return "stateless-state-provider";
|
return "stateless-state-provider";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getIncrementedVersion(final String currentVersion) {
|
||||||
|
final long versionNumber = Long.parseLong(currentVersion);
|
||||||
|
final long version = versionNumber + VERSION_INCREMENT;
|
||||||
|
return String.valueOf(version);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -667,14 +667,13 @@ public class StandardStatelessFlow implements StatelessDataflow {
|
||||||
for (final Map.Entry<String, StateMap> entry : stateMaps.entrySet()) {
|
for (final Map.Entry<String, StateMap> entry : stateMaps.entrySet()) {
|
||||||
final String componentId = entry.getKey();
|
final String componentId = entry.getKey();
|
||||||
final StateMap stateMap = entry.getValue();
|
final StateMap stateMap = entry.getValue();
|
||||||
if (stateMap.getVersion() == -1) {
|
if (!stateMap.getStateVersion().isPresent()) {
|
||||||
// Version of -1 indicates no state has been stored.
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
final SerializableStateMap serializableStateMap = new SerializableStateMap();
|
final SerializableStateMap serializableStateMap = new SerializableStateMap();
|
||||||
serializableStateMap.setStateValues(stateMap.toMap());
|
serializableStateMap.setStateValues(stateMap.toMap());
|
||||||
serializableStateMap.setVersion(stateMap.getVersion());
|
serializableStateMap.setVersion(stateMap.getStateVersion().orElse(null));
|
||||||
|
|
||||||
final String serialized;
|
final String serialized;
|
||||||
try {
|
try {
|
||||||
|
@ -716,7 +715,7 @@ public class StandardStatelessFlow implements StatelessDataflow {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
final StateMap stateMap = new StandardStateMap(deserialized.getStateValues(), deserialized.getVersion());
|
final StateMap stateMap = new StandardStateMap(deserialized.getStateValues(), Optional.ofNullable(deserialized.getVersion()));
|
||||||
deserializedStateMaps.put(componentId, stateMap);
|
deserializedStateMaps.put(componentId, stateMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -779,14 +778,14 @@ public class StandardStatelessFlow implements StatelessDataflow {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class SerializableStateMap {
|
private static class SerializableStateMap {
|
||||||
private long version;
|
private String version;
|
||||||
private Map<String, String> stateValues;
|
private Map<String, String> stateValues;
|
||||||
|
|
||||||
public long getVersion() {
|
public String getVersion() {
|
||||||
return version;
|
return version;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setVersion(final long version) {
|
public void setVersion(final String version) {
|
||||||
this.version = version;
|
this.version = version;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
16
pom.xml
16
pom.xml
|
@ -106,6 +106,7 @@
|
||||||
<com.amazonaws.version>1.12.371</com.amazonaws.version>
|
<com.amazonaws.version>1.12.371</com.amazonaws.version>
|
||||||
<software.amazon.awssdk.version>2.17.295</software.amazon.awssdk.version>
|
<software.amazon.awssdk.version>2.17.295</software.amazon.awssdk.version>
|
||||||
<gson.version>2.10.1</gson.version>
|
<gson.version>2.10.1</gson.version>
|
||||||
|
<io.fabric8.kubernetes.client.version>6.3.1</io.fabric8.kubernetes.client.version>
|
||||||
<kotlin.version>1.8.10</kotlin.version>
|
<kotlin.version>1.8.10</kotlin.version>
|
||||||
<okhttp.version>4.10.0</okhttp.version>
|
<okhttp.version>4.10.0</okhttp.version>
|
||||||
<org.apache.commons.cli.version>1.5.0</org.apache.commons.cli.version>
|
<org.apache.commons.cli.version>1.5.0</org.apache.commons.cli.version>
|
||||||
|
@ -588,6 +589,21 @@
|
||||||
<artifactId>swagger-annotations</artifactId>
|
<artifactId>swagger-annotations</artifactId>
|
||||||
<version>${swagger.annotations.version}</version>
|
<version>${swagger.annotations.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client-api</artifactId>
|
||||||
|
<version>${io.fabric8.kubernetes.client.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-client</artifactId>
|
||||||
|
<version>${io.fabric8.kubernetes.client.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.fabric8</groupId>
|
||||||
|
<artifactId>kubernetes-server-mock</artifactId>
|
||||||
|
<version>${io.fabric8.kubernetes.client.version}</version>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.yaml</groupId>
|
<groupId>org.yaml</groupId>
|
||||||
<artifactId>snakeyaml</artifactId>
|
<artifactId>snakeyaml</artifactId>
|
||||||
|
|
Loading…
Reference in New Issue