Merge branch 'NIFI-259'

This commit is contained in:
Aldrin Piri 2016-02-05 14:09:29 -05:00
commit 4df6512126
191 changed files with 8776 additions and 1344 deletions

View File

@ -33,5 +33,6 @@ public enum Operation {
Enable,
Disable,
Batch,
Purge;
Purge,
ClearState;
}

View File

@ -0,0 +1,54 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.annotation.behavior;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.nifi.components.state.Scope;
import org.apache.nifi.components.state.StateManager;
/**
* <p>
* Annotation that a Processor, ReportingTask, or Controller Service can use to indicate
* that the component makes use of the {@link StateManager}. This annotation provides the
* user with a description of what information is being stored so that the user is able to
* understand what is shown to them and know what they are clearing should they choose to
* clear the state. Additionally, the UI will not show any state information to users if
* this annotation is not present.
* </p>
*/
@Documented
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Inherited
public @interface Stateful {
/**
* Provides a description of what information is being stored in the {@link StateManager}
*/
String description();
/**
* Indicates the Scope(s) associated with the State that is stored and retrieved.
*/
Scope[] scopes();
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.annotation.lifecycle;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* <p>
* Marker Annotation that a Processor, Reporting Task, or Controller Service can use to indicate
* that the method with this Annotation should be invoked whenever the component's configuration
* is restored after a restart of NiFi.
* </p>
*
* <p>
* Methods with this annotation must take zero arguments.
* </p>
*
* <p>
* Whenever a new component is added to the flow, this method will be called immediately, since
* there is no configuration to restore (in this case all configuration has already been restored,
* since there is no configuration to restore).
* </p>
*
* @since 0.5.0
*/
@Documented
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@Inherited
public @interface OnConfigurationRestored {
}

View File

@ -19,6 +19,8 @@ package org.apache.nifi.components;
import java.util.Collection;
import java.util.List;
import org.apache.nifi.annotation.lifecycle.OnConfigurationRestored;
public interface ConfigurableComponent {
/**
@ -49,11 +51,19 @@ public interface ConfigurableComponent {
* necessary lazily evaluate it. Any throwable that escapes this method will
* simply be ignored.
*
* When NiFi is restarted, this method will be called for each 'dynamic' property that is
* added, as well as for each property that is not set to the default value. I.e., if the
* Properties are modified from the default values. If it is undesirable for your use case
* to react to properties being modified in this situation, you can add the {@link OnConfigurationRestored}
* annotation to a method - this will allow the Processor to know when configuration has
* been restored, so that it can determine whether or not to perform some action in the
* onPropertyModified method.
*
* @param descriptor the descriptor for the property being modified
* @param oldValue the value that was previously set, or null if no value
* was previously set for this property
* was previously set for this property
* @param newValue the new property value or if null indicates the property
* was removed
* was removed
*/
void onPropertyModified(PropertyDescriptor descriptor, String oldValue, String newValue);

View File

@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
/**
* A Scope represents how a NiFi component's state is to be stored and retrieved when running in a cluster.
*/
public enum Scope {
/**
* State is to be treated as "global" across the cluster. I.e., the same component on all nodes will
* have access to the same state.
*/
CLUSTER,
/**
* State is to be treated local to the node. I.e., the same component will have different state on each
* node in the cluster.
*/
LOCAL;
@Override
public String toString() {
return name();
}
}

View File

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
import java.io.IOException;
import java.util.Map;
import org.apache.nifi.annotation.behavior.Stateful;
/**
* <p>
* The StateManager is responsible for providing NiFi components a mechanism for storing
* and retrieving state.
* </p>
*
* <p>
* When calling methods in this class, the {@link Scope} is used in order to specify whether
* state should be stored/retrieved from the local state or the clustered state. However, if
* any instance of NiFi is not clustered (or is disconnected from its cluster), the Scope is
* not really relevant and the local state will be used. This allows component
* developers to not concern themselves with whether or not a particular instance of NiFi is
* clustered. Instead, developers should assume that the instance is indeed clustered and write
* the component accordingly. If not clustered, the component will still behave in the same
* manner, as a standalone node could be thought of as a "cluster of 1."
* </p>
*
* <p>
* This mechanism is designed to allow developers to easily store and retrieve small amounts of state.
* The storage mechanism is implementation-specific, but is typically either stored on a remote system
* or on disk. For that reason, one should consider the cost of storing and retrieving this data, and the
* amount of data should be kept to the minimum required.
* </p>
*
* <p>
* Any component that wishes to use the StateManager should also use the {@link Stateful} annotation to provide
* a description of what state is being stored and what Scope is used. If this annotation is not present, the UI
* will not expose such information or allow DFMs to clear the state.
* </p>
*/
public interface StateManager {
/**
* Updates the value of the component's state, setting it to given value
*
* @param state the value to change the state to
* @param scope the scope to use when storing the state
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
void setState(Map<String, String> state, Scope scope) throws IOException;
/**
* Returns the current state for the component. This return value will never be <code>null</code>.
* If the state has not yet been set, the StateMap's version will be -1, and the map of values will be empty.
*
* @param scope the scope to use when fetching the state
* @return the current state for the component
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
StateMap getState(Scope scope) throws IOException;
/**
* Updates the value of the component's state to the new value if and only if the value currently
* is the same as the given oldValue.
*
* @param oldValue the old value to compare against
* @param newValue the new value to use if and only if the state's value is the same as the given oldValue
* @param scope the scope to use for storing the new state
* @return <code>true</code> if the state was updated to the new value, <code>false</code> if the state's value was not
* equal to oldValue
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
boolean replace(StateMap oldValue, Map<String, String> newValue, Scope scope) throws IOException;
/**
* Clears all keys and values from the component's state
*
* @param scope the scope whose values should be cleared
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
void clear(Scope scope) throws IOException;
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
import java.util.Map;
/**
* Provides a representation of a component's state at some point in time.
*/
public interface StateMap {
/**
* Each time that a component's state is updated, the state is assigned a new version.
* This version can then be used to atomically update state by the backing storage mechanism.
* Though this number is monotonically increasing, it should not be expected to increment always
* from X to X+1. I.e., version numbers may be skipped.
*
* @return the version associated with the state
*/
long getVersion();
/**
* Returns the value associated with the given key
*
* @param key the key whose value should be retrieved
* @return the value associated with the given key, or <code>null</code> if no value is associated
* with this key.
*/
String get(String key);
/**
* Returns an immutable Map representation of all keys and values for the state of a component.
*
* @return an immutable Map representation of all keys and values for the state of a component.
*/
Map<String, String> toMap();
}

View File

@ -0,0 +1,133 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
import java.io.IOException;
import java.util.Map;
import org.apache.nifi.components.ConfigurableComponent;
/**
* <p>
* Provides a mechanism by which components can store and retrieve state. Depending on the Provider, the state
* may be stored locally, or it may be stored on a remote resource.
* </p>
*
* <p>
* Which implementation should be used for local and clustered state is configured in the NiFi properties file.
* It is therefore possible to provide custom implementations of this interface. Note, however, that this interface
* is new as of version 0.5.0 of Apache NiFi and may not be considered "stable" as of yet. Therefore, it is subject
* to change without notice, so providing custom implementations is cautioned against until the API becomes more stable.
* </p>
*
* @since 0.5.0
*/
public interface StateProvider extends ConfigurableComponent {
/**
* Initializes the StateProvider so that it is capable of being used. This method will be called
* once before any of the other methods are called and will not be called again until the {@link #shutdown()}
* method has been called
*
* @param context the initialization context that can be used to prepare the state provider for use
*/
void initialize(StateProviderInitializationContext context) throws IOException;
/**
* Shuts down the StateProvider and cleans up any resources held by it. Once this method has returned, the
* StateProvider may be initialized once again via the {@link #initialize(StateProviderInitializationContext)} method.
*/
void shutdown();
/**
* Updates the value of the component's state, setting the new value to the
* given state
*
* @param state the value to change the state to
* @param componentId the id of the component for which state is being set
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
void setState(Map<String, String> state, String componentId) throws IOException;
/**
* Returns the currently configured state for the component. The returned StateMap will never be null.
* The version of the StateMap will be -1 and the state will contain no key/value pairs if the state has never been set.
*
* @param componentId the id of the component for which state is to be retrieved
* @return the currently configured value for the component's state
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
StateMap getState(String componentId) throws IOException;
/**
* Updates the value of the component's state to the new value if and only if the value currently
* is the same as the given oldValue.
*
* @param oldValue the old value to compare against
* @param newValue the new value to use if and only if the state's value is the same as the given oldValue
* @param componentId the id of the component for which state is being retrieved
* @return <code>true</code> if the state was updated to the new value, <code>false</code> if the state's value was not
* equal to oldValue
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
boolean replace(StateMap oldValue, Map<String, String> newValue, String componentId) throws IOException;
/**
* Removes all values from the component's state that is stored using the given scope
*
* @param componentId the id of the component for which state is being cleared
*
* @throws IOException if unable to communicate with the underlying storage mechanism
*/
void clear(String componentId) throws IOException;
/**
* This method is called whenever a component is removed from the NiFi instance. This allows the State Provider to
* perform tasks when a component is removed in order to clean up resources that may be associated with that component
*
* @param componentId the ID of the component that was added to the NiFi instance
* @throws IOException if unable to perform the necessary cleanup
*/
void onComponentRemoved(String componentId) throws IOException;
/**
* Notifies the state provider that it should begin servicing requests to store and retrieve state
*/
void enable();
/**
* Notifies the state provider that it should stop servicing requests to store and retrieve state and instead throw a ProviderDisabledException if any request is made to do so
*/
void disable();
/**
* @return <code>true</code> if the provider is enabled, <code>false</code> otherwise.
*/
boolean isEnabled();
/**
* Provides a listing of {@link Scope}s supported by the StateProvider
* @return the {@link Scope}s supported by the configuration
*/
Scope[] getSupportedScopes();
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
import java.util.Map;
import javax.net.ssl.SSLContext;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
/**
* This interface defines an initialization context that is passed to a {@link StateProvider} when it
* is initialized.
*/
public interface StateProviderInitializationContext {
/**
* @return the identifier if the StateProvider
*/
String getIdentifier();
/**
* @return a Map of Property Descriptors to their configured values
*/
Map<PropertyDescriptor, PropertyValue> getProperties();
/**
* Returns the configured value for the given property
*
* @param property the property to retrieve the value for
*
* @return the configured value for the property.
*/
PropertyValue getProperty(PropertyDescriptor property);
/**
* @return the SSL Context that should be used to communicate with remote resources,
* or <code>null</code> if no SSLContext has been configured
*/
SSLContext getSSLContext();
}

View File

@ -21,6 +21,7 @@ import java.util.Map;
import org.apache.nifi.components.AbstractConfigurableComponent;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.annotation.OnConfigured;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.ProcessorInitializationContext;
@ -32,12 +33,14 @@ public abstract class AbstractControllerService extends AbstractConfigurableComp
private ControllerServiceLookup serviceLookup;
private volatile ConfigurationContext configContext;
private ComponentLog logger;
private StateManager stateManager;
@Override
public final void initialize(final ControllerServiceInitializationContext context) throws InitializationException {
this.identifier = context.getIdentifier();
serviceLookup = context.getControllerServiceLookup();
logger = context.getLogger();
stateManager = context.getStateManager();
init(context);
}
@ -93,4 +96,11 @@ public abstract class AbstractControllerService extends AbstractConfigurableComp
protected ComponentLog getLogger() {
return logger;
}
/**
* @return the StateManager that can be used to store and retrieve state for this Controller Service
*/
protected StateManager getStateManager() {
return stateManager;
}
}

View File

@ -16,6 +16,7 @@
*/
package org.apache.nifi.controller;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.logging.ComponentLog;
public interface ControllerServiceInitializationContext {
@ -37,4 +38,9 @@ public interface ControllerServiceInitializationContext {
* way and generate bulletins when appropriate
*/
ComponentLog getLogger();
/**
* @return the StateManager that can be used to store and retrieve state for this component
*/
StateManager getStateManager();
}

View File

@ -19,6 +19,7 @@ package org.apache.nifi.processor;
import java.util.Collections;
import java.util.Set;
import org.apache.nifi.annotation.lifecycle.OnConfigurationRestored;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.annotation.lifecycle.OnUnscheduled;
import org.apache.nifi.components.AbstractConfigurableComponent;
@ -47,6 +48,7 @@ public abstract class AbstractSessionFactoryProcessor extends AbstractConfigurab
private String identifier;
private ProcessorLog logger;
private volatile boolean scheduled = false;
private volatile boolean configurationRestored = false;
private ControllerServiceLookup serviceLookup;
private String description;
@ -104,6 +106,22 @@ public abstract class AbstractSessionFactoryProcessor extends AbstractConfigurab
scheduled = false;
}
@OnConfigurationRestored
public final void updateConfiguredRestoredTrue() {
configurationRestored = true;
}
/**
* Returns a boolean indicating whether or not the configuration of the Processor has already been restored.
* See the {@link OnConfigurationRestored} annotation for more information about what it means for the configuration
* to be restored.
*
* @return <code>true</code> if configuration has been restored, <code>false</code> otherwise.
*/
protected boolean isConfigurationRestored() {
return configurationRestored;
}
@Override
public final String getIdentifier() {
return identifier;

View File

@ -21,6 +21,7 @@ import java.util.Set;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerServiceLookup;
/**
@ -154,4 +155,9 @@ public interface ProcessContext {
* does not allow the Expression Language, even if a seemingly valid Expression is present in the value.
*/
boolean isExpressionLanguagePresent(PropertyDescriptor property);
/**
* @return the StateManager that can be used to store and retrieve state for this component
*/
StateManager getStateManager();
}

View File

@ -20,6 +20,7 @@ import java.util.Map;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerServiceLookup;
/**
@ -86,4 +87,9 @@ public interface ReportingContext {
* Controller Services
*/
ControllerServiceLookup getControllerServiceLookup();
/**
* @return the StateManager that can be used to store and retrieve state for this component
*/
StateManager getStateManager();
}

View File

@ -303,6 +303,12 @@ language governing permissions and limitations under the License. -->
<nifi.templates.directory>./conf/templates</nifi.templates.directory>
<nifi.database.directory>./database_repository</nifi.database.directory>
<nifi.state.management.configuration.file>./conf/state-management.xml</nifi.state.management.configuration.file>
<nifi.state.management.embedded.zookeeper.start>false</nifi.state.management.embedded.zookeeper.start>
<nifi.state.management.embedded.zookeeper.properties>./conf/zookeeper.properties</nifi.state.management.embedded.zookeeper.properties>
<nifi.state.management.provider.local>local-provider</nifi.state.management.provider.local>
<nifi.state.management.provider.cluster>zk-provider</nifi.state.management.provider.cluster>
<nifi.flowfile.repository.implementation>org.apache.nifi.controller.repository.WriteAheadFlowFileRepository</nifi.flowfile.repository.implementation>
<nifi.flowfile.repository.directory>./flowfile_repository</nifi.flowfile.repository.directory>
<nifi.flowfile.repository.partitions>256</nifi.flowfile.repository.partitions>

View File

@ -189,6 +189,13 @@ public class NiFiProperties extends Properties {
// kerberos properties
public static final String KERBEROS_KRB5_FILE = "nifi.kerberos.krb5.file";
// state management
public static final String STATE_MANAGEMENT_CONFIG_FILE = "nifi.state.management.configuration.file";
public static final String STATE_MANAGEMENT_LOCAL_PROVIDER_ID = "nifi.state.management.provider.local";
public static final String STATE_MANAGEMENT_CLUSTER_PROVIDER_ID = "nifi.state.management.provider.cluster";
public static final String STATE_MANAGEMENT_START_EMBEDDED_ZOOKEEPER = "nifi.state.management.embedded.zookeeper.start";
public static final String STATE_MANAGEMENT_ZOOKEEPER_PROPERTIES = "nifi.state.management.embedded.zookeeper.properties";
// defaults
public static final String DEFAULT_TITLE = "NiFi";
public static final Boolean DEFAULT_AUTO_RESUME_STATE = true;
@ -236,6 +243,9 @@ public class NiFiProperties extends Properties {
public static final int DEFAULT_CLUSTER_MANAGER_PROTOCOL_THREADS = 10;
public static final String DEFAULT_CLUSTER_MANAGER_SAFEMODE_DURATION = "0 sec";
// state management defaults
public static final String DEFAULT_STATE_MANAGEMENT_CONFIG_FILE = "conf/state-management.xml";
private NiFiProperties() {
super();
}
@ -985,4 +995,25 @@ public class NiFiProperties extends Properties {
public String getBoredYieldDuration() {
return getProperty(BORED_YIELD_DURATION, DEFAULT_BORED_YIELD_DURATION);
}
public File getStateManagementConfigFile() {
return new File(getProperty(STATE_MANAGEMENT_CONFIG_FILE, DEFAULT_STATE_MANAGEMENT_CONFIG_FILE));
}
public String getLocalStateProviderId() {
return getProperty(STATE_MANAGEMENT_LOCAL_PROVIDER_ID);
}
public String getClusterStateProviderId() {
return getProperty(STATE_MANAGEMENT_CLUSTER_PROVIDER_ID);
}
public File getEmbeddedZooKeeperPropertiesFile() {
final String filename = getProperty(STATE_MANAGEMENT_ZOOKEEPER_PROPERTIES);
return filename == null ? null : new File(filename);
}
public boolean isStartEmbeddedZooKeeper() {
return Boolean.parseBoolean(getProperty(STATE_MANAGEMENT_START_EMBEDDED_ZOOKEEPER));
}
}

View File

@ -182,6 +182,7 @@ public class EndpointConnectionPool {
public Thread newThread(final Runnable r) {
final Thread thread = defaultFactory.newThread(r);
thread.setName("NiFi Site-to-Site Connection Pool Maintenance");
thread.setDaemon(true);
return thread;
}
});
@ -761,7 +762,7 @@ public class EndpointConnectionPool {
final int index = n % destinations.size();
PeerStatus status = destinations.get(index);
if (status == null) {
final PeerDescription description = new PeerDescription(nodeInfo.getHostname(), nodeInfo.getSiteToSitePort(), nodeInfo.isSiteToSiteSecure());
final PeerDescription description = new PeerDescription(nodeInfo.getSiteToSiteHostname(), nodeInfo.getSiteToSitePort(), nodeInfo.isSiteToSiteSecure());
status = new PeerStatus(description, nodeInfo.getTotalFlowFiles());
destinations.set(index, status);
break;

View File

@ -18,23 +18,23 @@ package org.apache.nifi.remote.cluster;
public class NodeInformation {
private final String hostname;
private final String siteToSiteHostname;
private final Integer siteToSitePort;
private final int apiPort;
private final boolean isSiteToSiteSecure;
private final int totalFlowFiles;
public NodeInformation(final String hostname, final Integer siteToSitePort, final int apiPort,
public NodeInformation(final String siteToSiteHostname, final Integer siteToSitePort, final int apiPort,
final boolean isSiteToSiteSecure, final int totalFlowFiles) {
this.hostname = hostname;
this.siteToSiteHostname = siteToSiteHostname;
this.siteToSitePort = siteToSitePort;
this.apiPort = apiPort;
this.isSiteToSiteSecure = isSiteToSiteSecure;
this.totalFlowFiles = totalFlowFiles;
}
public String getHostname() {
return hostname;
public String getSiteToSiteHostname() {
return siteToSiteHostname;
}
public int getAPIPort() {
@ -66,7 +66,7 @@ public class NodeInformation {
}
final NodeInformation other = (NodeInformation) obj;
if (!hostname.equals(other.hostname)) {
if (!siteToSiteHostname.equals(other.siteToSiteHostname)) {
return false;
}
if (siteToSitePort == null && other.siteToSitePort != null) {
@ -88,11 +88,11 @@ public class NodeInformation {
@Override
public int hashCode() {
return 83832 + hostname.hashCode() + (siteToSitePort == null ? 8 : siteToSitePort.hashCode()) + apiPort + (isSiteToSiteSecure ? 3829 : 0);
return 83832 + siteToSiteHostname.hashCode() + (siteToSitePort == null ? 8 : siteToSitePort.hashCode()) + apiPort + (isSiteToSiteSecure ? 3829 : 0);
}
@Override
public String toString() {
return "Node[" + hostname + ":" + apiPort + "]";
return "Node[" + siteToSiteHostname + ":" + apiPort + "]";
}
}

View File

@ -30,7 +30,7 @@ public class NodeInformationAdapter extends XmlAdapter<AdaptedNodeInformation, N
@Override
public AdaptedNodeInformation marshal(final NodeInformation nodeInformation) throws Exception {
final AdaptedNodeInformation adapted = new AdaptedNodeInformation();
adapted.setHostname(nodeInformation.getHostname());
adapted.setHostname(nodeInformation.getSiteToSiteHostname());
adapted.setSiteToSitePort(nodeInformation.getSiteToSitePort());
adapted.setApiPort(nodeInformation.getAPIPort());
adapted.setSiteToSiteSecure(nodeInformation.isSiteToSiteSecure());

View File

@ -560,6 +560,7 @@ If it is not possible to install the unlimited strength jurisdiction policies, t
It is preferable to request upstream/downstream systems to switch to https://cwiki.apache.org/confluence/display/NIFI/Encryption+Information[keyed encryption] or use a "strong" https://cwiki.apache.org/confluence/display/NIFI/Key+Derivation+Function+Explanations[Key Derivation Function (KDF) supported by NiFi].
[[clustering]]
Clustering Configuration
------------------------
@ -626,6 +627,7 @@ For the NCM, the minimum properties to configure are as follows:
For Node 1, the minimum properties to configure are as follows:
* Under the Web Properties, set either the http or https port that you want Node 1 to run on. If the NCM is running on the same server, choose a different web port for Node 1. Also, consider whether you need to set the http or https host property.
* Under the State Management section, set the `nifi.state.management.provider.cluster` property to the identifier of the Cluster State Provider. Ensure that the Cluster State Provider has been configured in the _state-management.xml_ file. See <<state_providers>> for more information.
* Under Cluster Node Properties, set the following:
** nifi.cluster.is.node - Set this to _true_.
** nifi.cluster.node.address - Set this to the fully qualified hostname of the node. If left blank, it defaults to "localhost".
@ -636,6 +638,7 @@ For Node 1, the minimum properties to configure are as follows:
For Node 2, the minimum properties to configure are as follows:
* Under the Web Properties, set either the http or https port that you want Node 2 to run on. Also, consider whether you need to set the http or https host property.
* Under the State Management section, set the `nifi.state.management.provider.cluster` property to the identifier of the Cluster State Provider. Ensure that the Cluster State Provider has been configured in the _state-management.xml_ file. See <<state_providers>> for more information.
* Under the Cluster Node Properties, set the following:
** nifi.cluster.is.node - Set this to _true_.
** nifi.cluster.node.address - Set this to the fully qualified hostname of the node. If left blank, it defaults to "localhost".
@ -660,6 +663,305 @@ additivity="false">
[[state_management]]
State Management
----------------
NiFi provides a mechanism for Processors, Reporting Tasks, Controller Services, and the framework itself to persist state. This
allows a Processor, for example, to resume from the place where it left off after NiFi is restarted. Additionally, it allows for
a Processor to store some piece of information so that the Processor can access that information from all of the different nodes
in the cluster. This allows one node to pick up where another node left off, or to coordinate across all of the nodes in a cluster.
[[state_providers]]
=== Configuring State Providers
When a component decides to store or retrieve state, it does so by providing a "Scope" - either Node-local or Cluster-wide. The
mechanism that is used to store and retrieve this state is then determined based on this Scope, as well as the configured State
Providers. The _nifi.properties_ file contains three different properties that are relevant to configuring these State Providers.
The first is the `nifi.state.management.configuration.file` property specifies an external XML file that is used for configuring
the local and cluster-wide State Providers. This XML file may contain configurations for multiple providers, so the
`nifi.state.management.provider.local` property provides the identifier of the local State Provider configured in this XML file.
Similarly, the `nifi.state.management.provider.cluster` property provides the identifier of the cluster-wide State Provider
configured in this XML file.
This XML file consists of a top-level `state-management` element, which has one or more `local-provider` and zero or more
`cluster-provider` elements. Each of these elements then contains an `id` element that is used to specify the identifier that can
be referenced in the _nifi.properties_ file, as well as a `class` element that specifies the fully-qualified class name to use
in order to instantiate the State Provider. Finally, each of these elements may have zero or more `property` elements. Each
`property` element has an attribute, `name` that is the name of the property that the State Provider supports. The textual content
of the `property` element is the value of the property.
Once these State Providers have been configured in the _state-management.xml_ file (or whatever file is configured), those Providers
may be referenced by their identifiers. By default, the Local State Provider is configured to be a `WriteAheadLocalStateProvider` that
persists the data to the _$NIFI_HOME/state_ directory. The default Cluster State Provider is configured to be a `ZooKeeperStateProvider`.
The default ZooKeeper-based provider must have its `Connect String` property populated before it can be used. It is also advisable,
if multiple NiFi instances will use the same ZooKeeper instance, that the value of the `Root Node` property be changed. For instance,
one might set the value to `/nifi/<team name>/production`. A `Connect String` takes the form of comma separated <host>:<port> tuples,
such as my-zk-server1:2181,my-zk-server2:2181,my-zk-server3:2181. In the event a port is not specified for any of the hosts, the ZooKeeper
default of 2181 is assumed.
When adding data to ZooKeeper, there are two options for Access Control: `Open` and `CreatorOnly`. If the `Access Control` property is
set to `Open`, then anyone is allowed to log into ZooKeeper and have full permissions to see, change, delete, or administer the data.
If `CreatorOnly` is specified, then only the user that created the data is allowed to read, change, delete, or administer the data.
In order to use the `CreatorOnly` option, NiFi must provide some form of authentication. See the <<zk_access_control>>
section below for more information on how to configure authentication.
If NiFi is configured to run in a standalone mode, the `cluster-provider` element need not be populated in the _state-management.xml_
file and will actually be ignored if they are populated. However, the `local-provider` element must always be present and populated.
Additionally, if NiFi is run in a cluster, each node must also have the `cluster-provider` element present and properly configured.
Otherwise, NiFi will fail to startup.
While there are not many properties that need to be configured for these providers, they were externalized into a separate _state-management.xml_
file, rather than being configured via the _nifi.properties_ file, simply because different implementations may require different properties,
and it is easier to maintain and understand the configuration in an XML-based file such as this, than to mix the properties of the Provider
in with all of the other NiFi framework-specific properties.
It should be noted that if Processors and other components save state using the Clustered scope, the Local State Provider will be used
if the instance is a standalone instance (not in a cluster) or is disconnected from the cluster. This also means that if a standalone instance
is migrated to become a cluster, then that state will no longer be available, as the component will begin using the Clustered State Provider
instead of the Local State Provider.
[[embedded_zookeeper]]
=== Embedded ZooKeeper Server
As mentioned above, the default State Provider for cluster-wide state is the `ZooKeeperStateProvider`. At the time of this writing, this is the
only State Provider that exists for handling cluster-wide state. What this means is that NiFi has dependencies on ZooKeeper in order to
behave as a cluster. However, there are many environments in which NiFi is deployed where there is no existing ZooKeeper ensemble being maintained.
In order to avoid the burden of forcing administrators to also maintain a separate ZooKeeper instance, NiFi provides the option of starting an
embedded ZooKeeper server.
This can be accomplished by setting the `nifi.state.management.embedded.zookeeper.start` property in _nifi.properties_ to `true` on those nodes
that should run the embedded ZooKeeper server. Generally, it is advisable to run ZooKeeper on either 3 or 5 nodes. Running on fewer than 3 nodes
provides less durability in the face of failure. Running on more than 5 nodes generally produces more network traffic than is necessary. Additionally,
running ZooKeeper on 4 nodes provides no more benefit than running on 3 nodes, ZooKeeper requires a majority of nodes be active in order to function.
However, it is up to the administrator to determine the number of nodes most appropriate to the particular deployment of NiFi.
If the `nifi.state.management.embedded.zookeeper.start` property is set to `true`, the `nifi.state.management.embedded.zookeeper.properties` property
in _nifi.properties_ also becomes relevant. This specifies the ZooKeeper properties file to use. At a minimum, this properties file needs to be populated
with the list of ZooKeeper servers. The servers are specified as properties in the form of `server.1`, `server.2`, to `server.n`. Each of these servers is
configured as <hostname>:<quorum port>[:<leader election port>]. For example, `myhost:2888:3888`. This list of nodes should be the same nodes in the NiFi
cluster that have the `nifi.state.management.embedded.zookeeper.start` property set to `true`. Also note that because ZooKeeper will be listening on these
ports, the firewall may need to be configured to open these ports for incoming traffic, at least between nodes in the cluster. Additionally, the port to
listen on for client connections must be opened in the firewall. The default value for this is _2181_ but can be configured via the _clientPort_ property
in the _zookeeper.properties_ file.
When using an embedded ZooKeeper, the _conf/zookeeper.properties_ file has a property named `dataDir`. By default, this value is set to `./state/zookeeper`.
If more than one NiFi node is running an embedded ZooKeeper, it is important to tell the server which one it is. This is accomplished by creating a file named
_myid_ and placing it in ZooKeeper's data directory. The contents of this file should be the index of the server as specific by the `server.<number>`. So for
one of the ZooKeeper servers, we will accomplish this by performing the following commands:
[source]
cd $NIFI_HOME
mkdir state
mkdir state/zookeeper
echo 1 > state/zookeeper/myid
For the next NiFi Node that will run ZooKeeper, we can accomplish this by performing the following commands:
[source]
cd $NIFI_HOME
mkdir state
mkdir state/zookeeper
echo 2 > state/zookeeper/myid
And so on.
For more information on the properties used to administer ZooKeeper, see the
link:https://zookeeper.apache.org/doc/current/zookeeperAdmin.html[ZooKeeper Admin Guide].
For information on securing the embedded ZooKeeper Server, see the <<securing_zookeeper>> section below.
[[zk_access_control]]
=== ZooKeeper Access Control
ZooKeeper provides Access Control to its data via an Access Control List (ACL) mechanism. When data is written to ZooKeeper, NiFi will provide an ACL
that indicates that any user is allowed to have full permissions to the data, or an ACL that indicates that only the user that created the data is
allowed to access the data. Which ACL is used depends on the value of the `Access Control` property for the `ZooKeeperStateProvider` (see the
<<state_providers>> section for more information).
In order to use an ACL that indicates that only the Creator is allowed to access the data, we need to tell ZooKeeper who the Creator is. There are two
mechanisms for accomplishing this. The first mechanism is to provide authentication using Kerberos. See <<zk_kerberos_client>> for more information.
The second option is to use a user name and password. This is configured by specifying a value for the `Username` and a value for the `Password` properties
for the `ZooKeeperStateProvider` (see the <<state_providers>> section for more information). The important thing to keep in mind here, though, is that ZooKeeper
will pass around the password in plain text. This means that using a user name and password should not be used unless ZooKeeper is running on localhost as a
one-instance cluster, or if communications with ZooKeeper occur only over encrypted communications, such as a VPN or an SSL connection. ZooKeeper will be
providing support for SSL connections in version 3.5.0.
[[securing_zookeeper]]
=== Securing ZooKeeper
When NiFi communicates with ZooKeeper, all communications, by default, are non-secure, and anyone who logs into ZooKeeper is able to view and manipulate all
of the NiFi state that is stored in ZooKeeper. To prevent this, we can use Kerberos to manage the authentication. At this time, ZooKeeper does not provide
support for encryption via SSL. Support for SSL in ZooKeeper is being actively developed and is expected to be available in the 3.5.x release version.
In order to secure the communications, we need to ensure that both the client and the server support the same configuration. Instructions for configuring the
NiFi ZooKeeper client and embedded ZooKeeper server to use Kerberos are provided below.
[[zk_kerberos_client]]
==== Kerberizing NiFi's ZooKeeper Client
The preferred mechanism for authenticating users with ZooKeeper is to use Kerberos. In order to use Kerberos to authenticate, we must configure a few
system properties, so that the ZooKeeper client knows who the user is and where the KeyTab file is. All nodes configured to store cluster-wide state
using `ZooKeeperStateProvider` and using Kerberos should follow these steps.
First, we must create the Principal that we will use when communicating with ZooKeeper. This is generally done via the `kadmin` tool:
[source]
kadmin: addprinc "nifi@EXAMPLE.COM"
A Kerberos Principal is made up of three parts: the primary, the instance, and the realm. Here, we are creating a Principal with the primary `nifi`,
no instance, and the realm `EXAMPLE.COM`. The primary (`nifi`, in this case) is the identifier that will be used to identify the user when authenticating
via Kerberos.
After we have created our Principal, we will need to create a KeyTab for the Principal:
[source]
kadmin: xst -k nifi.keytab nifi@EXAMPLE.COM
This will create a file in the current directory named `nifi.keytab`. We can now copy that file into the _$NIFI_HOME/conf/_ directory. We should ensure
that only the user that will be running NiFi is allowed to read this file.
Next, we need to configure NiFi to use this KeyTab for authentication. Since ZooKeeper uses the Java Authentication and Authorization Service (JAAS), we need to
create a JAAS-compatible file. In the `$NIFI_HOME/conf/` directory, create a file named `zookeeper-jaas.conf` and add to it the following snippet:
[source]
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="./conf/nifi.keytab"
storeKey=true
useTicketCache=false
principal="nifi@EXAMPLE.COM";
};
Finally, we need to tell NiFi to use this as our JAAS configuration. This is done by setting a JVM System Property, so we will edit the _conf/bootstrap.conf_ file.
We add the following line anywhere in this file in order to tell the NiFi JVM to use this configuration:
[source]
java.arg.15=-Djava.security.auth.login.config=./conf/zookeeper-jaas.conf
We can initialize our Kerberos ticket by running the following command:
[source]
kinit nifi
Note, the above `kinit` command requires that Kerberos client libraries be installed. This is accomplished in Fedora-based Linux distributions via:
[source]
yum install krb5-workstation krb5-libs krb5-auth-dialog
Once this is complete, the /etc/krb5.conf will need to be configured appropriately for your organization's Kerberos envrionment.
Now, when we start NiFi, it will use Kerberos to authentication as the `nifi` user when communicating with ZooKeeper.
[[zk_kerberos_server]]
==== Kerberizing Embedded ZooKeeper Server
When using the embedded ZooKeeper server, we may choose to secure the server by using Kerberos. All nodes configured to launch an embedded ZooKeeper
and using Kerberos should follow these steps.
If Kerberos is not already setup in your environment, you can find information on installing and setting up a Kerberos Server at
link:https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Managing_Smart_Cards/Configuring_a_Kerberos_5_Server.html[https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Managing_Smart_Cards/Configuring_a_Kerberos_5_Server.html]
. This guide assumes that Kerberos already has been installed in the environment in which NiFi is running.
In order to use Kerberos, we first need to generate a Kerberos Principal for our ZooKeeper server. This is accomplished via the `kadmin` tool:
[source]
kadmin: addprinc "zookeeper/myHost.example.com@EXAMPLE.COM"
Here, we are creating a Principal with the primary `zookeeper/myHost.example.com`, using the realm `EXAMPLE.COM`. We need to use a Principal whose
name is `<service name>/<instance name>`. In this case, the service is `zookeeper` and the instance name is `myHost.example.com` (the fully qualified name of our host).
Next, we will need to create a KeyTab for this Principal:
[source]
kadmin: xst -k zookeeper-server.keytab zookeeper/myHost.example.com@EXAMPLE.COM
This will create a file in the current directory named `zookeeper-server.keytab`. We can now copy that file into the `$NIFI_HOME/conf/` directory. We should ensure
that only the user that will be running NiFi is allowed to read this file.
We will need to repeat the above steps for each of the instances of NiFi that will be running the embedded ZooKeeper server, being sure to replace _myHost.example.com_ with
_myHost2.example.com_, or whatever fully qualified hostname the ZooKeeper server will be run on.
Now that we have our KeyTab for each of the servers that will be running NiFi, we will need to configure NiFi's embedded ZooKeeper server to use this configuration.
ZooKeeper uses the Java Authentication and Authorization Service (JAAS), so we need to create a JAAS-compatible file In the `$NIFI_HOME/conf/` directory, create a file
named `zookeeper-jaas.conf` (this file will already exist if the Client has already been configured to authenticate via Kerberos. That's okay, just add to the file).
We will add to this file, the following snippet:
[source]
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="./conf/zookeeper-server.keytab"
storeKey=true
useTicketCache=false
principal="zookeeper/myHost.example.com@EXAMPLE.COM";
};
Be sure to replace the value of _principal_ above with the appropriate Principal, including the fully qualified domain name of the server.
Next, we need to tell NiFi to use this as our JAAS configuration. This is done by setting a JVM System Property, so we will edit the `conf/bootstrap.conf` file.
If the Client has already been configured to use Kerberos, this is not necessary, as it was done above. Otherwise, we will add the following line to our _bootstrap.conf_ file:
[source]
java.arg.15=-Djava.security.auth.login.config=./conf/zookeeper-jaas.conf
We will want to initialize our Kerberos ticket by running the following command:
[source]
kinit "zookeeper/myHost.example.com@EXAMPLE.COM"
Again, be sure to replace the Principal with the appropriate value, including your realm and your fully qualified hostname.
Finally, we need to tell the Kerberos server to use the SASL Authentication Provider. To do this, we edit the `$NIFI_HOME/conf/zookeeper.properties` file and add the following
lines:
[source]
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
jaasLoginRenew=3600000
requireClientAuthScheme=sasl
The last line is optional but specifies that clients MUST use Kerberos to communicate with our ZooKeeper instance.
Now, we can start NiFi, and the embedded ZooKeeper server will use Kerberos as the authentication mechanism.
[[troubleshooting_kerberos]]
==== Troubleshooting Kerberos Configuration
When using Kerberos, it is import to use fully-qualified domain names and not use _localhost_. Please ensure that the fully qualified hostname of each server is used
in the following locations:
- _conf/zookeeper.properties_ file should use FQDN for `server.1`, `server.2`, ..., `server.N` values.
- The `Connect String` property of the ZooKeeperStateProvider
- The /etc/hosts file should also resolve the FQDN to an IP address that is *not* _127.0.0.1_.
Failure to do so, may result in errors similar to the following:
[source]
2016-01-08 16:08:57,888 ERROR [pool-26-thread-1-SendThread(localhost:2181)] o.a.zookeeper.client.ZooKeeperSaslClient An error: (java.security.PrivilegedActionException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER)]) occurred when evaluating Zookeeper Quorum Member's received SASL token. Zookeeper Client will go to AUTH_FAILED state.
If there are problems communicating or authenticating with Kerberos,
link:http://docs.oracle.com/javase/7/docs/technotes/guides/security/jgss/tutorials/Troubleshooting.html[this Troubleshooting Guide] may be of value.
One of the most important notes in the above Troubleshooting guide is the mechanism for turning on Debug output for Kerberos.
This is done by setting the `sun.security.krb5.debug` environment variable.
In NiFi, this is accomplished by adding the following line to the _$NIFI_HOME/conf/bootstrap.conf` file:
[source]
java.arg.16=-Dsun.security.krb5.debug=true
This will cause the debug output to be written to the NiFi Bootstrap log file. By default, this is located at _$NIFI_HOME/logs/nifi-bootstrap.log_.
This output can be rather verbose but provides extremely valuable information for troubleshooting Kerberos failures.
[[bootstrap_properties]]
Bootstrap Properties
--------------------
@ -824,6 +1126,22 @@ only consider if `nifi.security.user.login.identity.provider` configured with a
|nifi.documentation.working.directory|The documentation working directory. The default value is ./work/docs/components and probably should be left as is.
|====
*State Management* +
The State Management section of the Properties file provides a mechanism for configuring local and cluster-wide mechanisms
for components to persist state. See the <<state_management>> section for more information on how this is used.
|====
|*Property*|*Description*
|nifi.state.management.configuration.file|The XML file that contains configuration for the local and cluster-wide State Providers. The default value is _./conf/state-management.xml_
|nifi.state.management.provider.local|The ID of the Local State Provider to use. This value must match the value of the `id` element of one of the `local-provider` elements in the _state-management.xml_ file.
|nifi.state.management.provider.cluster|The ID of the Cluster State Provider to use. This value must match the value of the `id` element of one of the `cluster-provider` elements in the _state-management.xml_ file. This value is ignored if not clustered but is required for nodes in a cluster.
|nifi.state.management.embedded.zookeeper.start|Specifies whether or not this instance of NiFi should start an embedded ZooKeeper Server. This is used in conjunction with the ZooKeeperStateProvider.
|nifi.state.management.embedded.zookeeper.properties|Specifies a properties file that contains the configuration for the embedded ZooKeeper Server that is started (if the `|nifi.state.management.embedded.zookeeper.start` property is set to `true`)
|====
*H2 Settings* +
The H2 Settings section defines the settings for the H2 database, which keeps track of user access and flow controller history.

View File

@ -216,6 +216,13 @@ class, calling the appropriate methods
to fill in the details of the Relationship, and finally calling the
`build` method.
[[state_manager]]
==== StateManager
The StateManager provides Processors, Reporting Tasks, and Controller Services a mechanism
for easily storing and retrieving state. The API is similar to that of ConcurrentHashMap
but requires a Scope for each operation. The Scope indicates whether the state is to be
retrieved/stored locally or in a cluster-wide manner. For more information, see the
<<state_manager>> section.
[[processor_initialization_context]]
==== ProcessorInitializationContext
@ -558,6 +565,64 @@ for instance, they should not be
relied upon for critical business logic.
[[state_manager]]
=== State Manager
From the ProcessContext, ReportingContext, and ControllerServiceInitializationContext, components are
able to call the `getStateManager()` method. This State Manager is responsible for providing a simple API
for storing and retrieving state. As such, the API is designed to be quite similar to the ConcurrentMap
API, which most Java developers are already familiar with.
[[state_scope]]
==== Scope
One very notable difference between the StateManager API and the ConcurrentMap API, however, is the presence
of a Scope object on each method call of the StateManager. This Scope will either be `Scope.NODE` or `Scope.CLUSTER`.
If NiFi is run in a cluster, this Scope provides important information to the framework about how the operation should
occur.
If state as stored using `Scope.CLUSTER`, then all nodes in the cluster will be communicating with the same
state storage mechanism, as if all nodes were to share a single ConcurrentMap. If state is stored and retrieved using
`Scope.NODE`, then each node will see a different representation of the state.
It is also worth noting that if NiFi is configured to run as a standalone instance, rather than running in a cluster,
a scope of `Scope.NODE` is always used. This is done in order to allow the developer of a NiFi component to write the code
in one consistent way, without worrying about whether or not the NiFi instance is clustered. The developer should instead assume
that the instance is clustered and write the code accordingly.
==== Storing and Retrieving State
State is stored using the StateManager's `set`, `replace`, `putIfAbsent`, `remove`, and `clear` methods. All of these methods,
with the exception of `clear` take as the first argument the key to be set. The key that is used is unique only to the same
instance of the component and for the same Scope. That is, if two Processors store a value using the key _My Key_, those Processors
will not conflict with each other, even if both Processors are of the same type (e.g., both are of type ListFile) and scope. Furthermore,
if a Processor stores a value with the key of _My Key_ using the `Scope.CLUSTER` scope, and then attempts to retrieve the value
using the `Scope.NODE` scope, the value retrieved will be `null`. Each Processor's state, then, is stored in isolation from other
Processors' state. A unique key can be thought of as a triple of <Processor Instance, Key, Scope>.
It follows, then, that two Processors cannot share the same state. There are, however, some circumstances in which it is very
necessary to share state between two Processors of different types, or two Processors of the same type. This can be accomplished
by using a Controller Service. By storing and retrieving state from a Controller Service, multiple Processors can use the same
Controller Service and the state can be exposed via the Controller Service's API.
==== Unit Tests
NiFi's Mock Framework provides an extensive collection of tools to perform unit testing of Processors. Processor unit tests typically
begin with the `TestRunner` class. As a result, the `TestRunner` class contains a `getStateManager` method of its own. The StateManager
that is returned, however, is of a specific type: `MockStateManager`. This implementation provides several methods in addition to those
defined by the `StateManager` interface, that help developers to more easily develop unit tests.
First, the `MockStateManager` implements the `StateManager` interface, so all of the state can be examined from within a unit test.
Additionally, the `MockStateManager` exposes a handful of `assert*` methods to perform assertions that the State is set as expected.
There are times, however, that state could be updated multiple times during the run of a single invocation of a Processor's `onTrigger`
method. In this case, inspecting the values after running the Processor may not be sufficient. Additionally, we must always remember at each
step to check the value of the stored state, which can become error-prone and burdensome for the developer.
For these reasons, the `MockStateManager` provides an additional method, named `failIfStateSet`. This method instructs the State Manager that
the unit test should immediately fail if the state for a given key is ever set, or if it is set to a specific value. The `doNotFailIfStateSet`
method can then be used to instruct the Mock Framework to clear this state and allow state to be set to any value.
=== Reporting Processor Activity
@ -728,7 +793,7 @@ public final class InvokeHTTP extends AbstractProcessor {
----
=== Documenting Related Components
Often Processors and ControllerServices are related to one another. Sometimes its a put/get relation as in `PutFile` and `GetFile`.
Often Processors and ControllerServices are related to one another. Sometimes it is a put/get relation as in `PutFile` and `GetFile`.
Sometimes a Processor uses a ControllerService like `InvokeHTTP` and `StandardSSLContextService`. Sometimes one ControllerService uses another
like `DistributedMapCacheClientService` and `DistributedMapCacheServer`. Developers of these extension points may relate these
different components using the `SeeAlso` tag. This annotation links these components in the documentation.

View File

@ -0,0 +1,278 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.state;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.nifi.annotation.behavior.Stateful;
import org.apache.nifi.components.state.Scope;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.components.state.StateMap;
import org.junit.Assert;
public class MockStateManager implements StateManager {
private final AtomicInteger versionIndex = new AtomicInteger(0);
private StateMap localStateMap = new MockStateMap(null, -1L);
private StateMap clusterStateMap = new MockStateMap(null, -1L);
private volatile boolean failToGetLocalState = false;
private volatile boolean failToSetLocalState = false;
private volatile boolean failToGetClusterState = false;
private volatile boolean failToSetClusterState = false;
private final boolean usesLocalState;
private final boolean usesClusterState;
public MockStateManager(final Object component) {
final Stateful stateful = component.getClass().getAnnotation(Stateful.class);
if (stateful == null) {
usesLocalState = false;
usesClusterState = false;
} else {
final Scope[] scopes = stateful.scopes();
boolean local = false;
boolean cluster = false;
for (final Scope scope : scopes) {
if (scope == Scope.LOCAL) {
local = true;
} else if (scope == Scope.CLUSTER) {
cluster = true;
}
}
usesLocalState = local;
usesClusterState = cluster;
}
}
@Override
public synchronized void setState(final Map<String, String> state, final Scope scope) throws IOException {
verifyAnnotation(scope);
verifyCanSet(scope);
final StateMap stateMap = new MockStateMap(state, versionIndex.incrementAndGet());
if (scope == Scope.CLUSTER) {
clusterStateMap = stateMap;
} else {
localStateMap = stateMap;
}
}
@Override
public synchronized StateMap getState(final Scope scope) throws IOException {
verifyAnnotation(scope);
verifyCanGet(scope);
return retrieveState(scope);
}
private synchronized StateMap retrieveState(final Scope scope) {
verifyAnnotation(scope);
if (scope == Scope.CLUSTER) {
return clusterStateMap;
} else {
return localStateMap;
}
}
@Override
public synchronized boolean replace(final StateMap oldValue, final Map<String, String> newValue, final Scope scope) throws IOException {
verifyAnnotation(scope);
if (scope == Scope.CLUSTER) {
if (oldValue == clusterStateMap) {
verifyCanSet(scope);
clusterStateMap = new MockStateMap(newValue, versionIndex.incrementAndGet());
return true;
}
return false;
} else {
if (oldValue == localStateMap) {
verifyCanSet(scope);
localStateMap = new MockStateMap(newValue, versionIndex.incrementAndGet());
return true;
}
return false;
}
}
@Override
public synchronized void clear(final Scope scope) throws IOException {
verifyAnnotation(scope);
setState(Collections.<String, String> emptyMap(), scope);
}
private void verifyCanSet(final Scope scope) throws IOException {
final boolean failToSet = (scope == Scope.LOCAL) ? failToSetLocalState : failToSetClusterState;
if (failToSet) {
throw new IOException("Unit Test configured to throw IOException if " + scope + " State is set");
}
}
private void verifyCanGet(final Scope scope) throws IOException {
final boolean failToGet = (scope == Scope.LOCAL) ? failToGetLocalState : failToGetClusterState;
if (failToGet) {
throw new IOException("Unit Test configured to throw IOException if " + scope + " State is retrieved");
}
}
private void verifyAnnotation(final Scope scope) {
// ensure that the @Stateful annotation is present with the appropriate Scope
if ((scope == Scope.LOCAL && !usesLocalState) || (scope == Scope.CLUSTER && !usesClusterState)) {
Assert.fail("Component is attempting to set or retrieve state with a scope of " + scope + " but does not declare that it will use "
+ scope + " state. A @Stateful annotation should be added to the component with a scope of " + scope);
}
}
private String getValue(final String key, final Scope scope) {
final StateMap stateMap;
if (scope == Scope.CLUSTER) {
stateMap = clusterStateMap;
} else {
stateMap = localStateMap;
}
return stateMap.get(key);
}
//
// assertion methods to make unit testing easier
//
/**
* Ensures that the state with the given key and scope is set to the given value, or else the test will fail
*
* @param key the state key
* @param value the expected value
* @param scope the scope
*/
public void assertStateEquals(final String key, final String value, final Scope scope) {
Assert.assertEquals(value, getValue(key, scope));
}
/**
* Ensures that the state is equal to the given values
*
* @param stateValues the values expected
* @param scope the scope to compare the stateValues against
*/
public void assertStateEquals(final Map<String, String> stateValues, final Scope scope) {
final StateMap stateMap = retrieveState(scope);
Assert.assertEquals(stateValues, stateMap.toMap());
}
/**
* Ensures that the state is not equal to the given values
*
* @param stateValues the unexpected values
* @param scope the scope to compare the stateValues against
*/
public void assertStateNotEquals(final Map<String, String> stateValues, final Scope scope) {
final StateMap stateMap = retrieveState(scope);
Assert.assertNotSame(stateValues, stateMap.toMap());
}
/**
* Ensures that the state with the given key and scope is not set to the given value, or else the test will fail
*
* @param key the state key
* @param value the unexpected value
* @param scope the scope
*/
public void assertStateNotEquals(final String key, final String value, final Scope scope) {
Assert.assertNotEquals(value, getValue(key, scope));
}
/**
* Ensures that some value is set for the given key and scope, or else the test will fail
*
* @param key the state key
* @param scope the scope
*/
public void assertStateSet(final String key, final Scope scope) {
Assert.assertNotNull("Expected state to be set for key " + key + " and scope " + scope + ", but it was not set", getValue(key, scope));
}
/**
* Ensures that no value is set for the given key and scope, or else the test will fail
*
* @param key the state key
* @param scope the scope
*/
public void assertStateNotSet(final String key, final Scope scope) {
Assert.assertNull("Expected state not to be set for key " + key + " and scope " + scope + ", but it was set", getValue(key, scope));
}
/**
* Ensures that the state was set for the given scope, regardless of what the value was.
*
* @param scope the scope
*/
public void assertStateSet(final Scope scope) {
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
Assert.assertNotSame("Expected state to be set for Scope " + scope + ", but it was not set", -1L, stateMap.getVersion());
}
/**
* Ensures that the state was not set for the given scope
*
* @param scope the scope
*/
public void assertStateNotSet(final Scope scope) {
final StateMap stateMap = (scope == Scope.CLUSTER) ? clusterStateMap : localStateMap;
Assert.assertEquals("Expected state not to be set for Scope " + scope + ", but it was set", -1L, stateMap.getVersion());
}
/**
* Specifies whether or not the State Manager should throw an IOException when state is set for the given scope.
* Note that calls to {@link #replace(StateMap, Map, Scope)} will fail only if the state would be set (i.e., if
* we call replace and the StateMap does not match the old value, it will not fail).
*
* Also note that if setting state is set to fail, clearing will also fail, as clearing is thought of as setting the
* state to empty
*
* @param scope the scope that should (or should not) fail
* @param fail whether or not setting state should fail
*/
public void setFailOnStateSet(final Scope scope, final boolean fail) {
if (scope == Scope.LOCAL) {
failToSetLocalState = fail;
} else {
failToSetClusterState = fail;
}
}
/**
* Specifies whether or not the State Manager should throw an IOException when state is retrieved for the given scope.
*
* @param scope the scope that should (or should not) fail
* @param fail whether or not retrieving state should fail
*/
public void setFailOnStateGet(final Scope scope, final boolean fail) {
if (scope == Scope.LOCAL) {
failToGetLocalState = fail;
} else {
failToGetClusterState = fail;
}
}
}

View File

@ -0,0 +1,49 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.state;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.nifi.components.state.StateMap;
public class MockStateMap implements StateMap {
private final Map<String, String> stateValues;
private final long version;
public MockStateMap(final Map<String, String> stateValues, final long version) {
this.stateValues = stateValues == null ? Collections.<String, String> emptyMap() : new HashMap<>(stateValues);
this.version = version;
}
@Override
public long getVersion() {
return version;
}
@Override
public String get(final String key) {
return stateValues.get(key);
}
@Override
public Map<String, String> toMap() {
return Collections.unmodifiableMap(stateValues);
}
}

View File

@ -16,23 +16,31 @@
*/
package org.apache.nifi.util;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceInitializationContext;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.state.MockStateManager;
public class MockControllerServiceInitializationContext extends MockControllerServiceLookup implements ControllerServiceInitializationContext, ControllerServiceLookup {
private final String identifier;
private final ComponentLog logger;
private final StateManager stateManager;
public MockControllerServiceInitializationContext(final ControllerService controllerService, final String identifier) {
this(controllerService, identifier, new MockProcessorLog(identifier, controllerService));
this(controllerService, identifier, new MockStateManager(controllerService));
}
public MockControllerServiceInitializationContext(final ControllerService controllerService, final String identifier, final ComponentLog logger) {
public MockControllerServiceInitializationContext(final ControllerService controllerService, final String identifier, final StateManager stateManager) {
this(controllerService, identifier, new MockProcessorLog(identifier, controllerService), stateManager);
}
public MockControllerServiceInitializationContext(final ControllerService controllerService, final String identifier, final ComponentLog logger, final StateManager stateManager) {
this.identifier = identifier;
this.logger = logger;
this.stateManager = stateManager;
addControllerService(controllerService, identifier);
}
@ -55,4 +63,9 @@ public class MockControllerServiceInitializationContext extends MockControllerSe
public ComponentLog getLogger() {
return logger;
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

View File

@ -34,17 +34,20 @@ import org.apache.nifi.components.ConfigurableComponent;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.processor.Processor;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.SchedulingContext;
import org.apache.nifi.state.MockStateManager;
import org.junit.Assert;
public class MockProcessContext extends MockControllerServiceLookup implements SchedulingContext, ControllerServiceLookup {
private final ConfigurableComponent component;
private final Map<PropertyDescriptor, String> properties = new HashMap<>();
private final StateManager stateManager;
private String annotationData = null;
private boolean yieldCalled = false;
@ -56,17 +59,22 @@ public class MockProcessContext extends MockControllerServiceLookup implements S
private volatile Set<Relationship> connections = new HashSet<>();
private volatile Set<Relationship> unavailableRelationships = new HashSet<>();
public MockProcessContext(final ConfigurableComponent component) {
this(component, new MockStateManager(component));
}
/**
* Creates a new MockProcessContext for the given Processor
*
* @param component being mocked
*/
public MockProcessContext(final ConfigurableComponent component) {
public MockProcessContext(final ConfigurableComponent component, final StateManager stateManager) {
this.component = Objects.requireNonNull(component);
this.stateManager = stateManager;
}
public MockProcessContext(final ControllerService component, final MockProcessContext context) {
this(component);
public MockProcessContext(final ControllerService component, final MockProcessContext context, final StateManager stateManager) {
this(component, stateManager);
try {
annotationData = context.getControllerServiceAnnotationData(component);
@ -121,7 +129,7 @@ public class MockProcessContext extends MockControllerServiceLookup implements S
requireNonNull(value, "Cannot set property to null value; if the intent is to remove the property, call removeProperty instead");
final PropertyDescriptor fullyPopulatedDescriptor = component.getPropertyDescriptor(descriptor.getName());
final ValidationResult result = fullyPopulatedDescriptor.validate(value, new MockValidationContext(this));
final ValidationResult result = fullyPopulatedDescriptor.validate(value, new MockValidationContext(this, stateManager));
String oldValue = properties.put(fullyPopulatedDescriptor, value);
if (oldValue == null) {
oldValue = fullyPopulatedDescriptor.getDefaultValue();
@ -204,7 +212,7 @@ public class MockProcessContext extends MockControllerServiceLookup implements S
* non-null
*/
public Collection<ValidationResult> validate() {
return component.validate(new MockValidationContext(this));
return component.validate(new MockValidationContext(this, stateManager));
}
public boolean isValid() {
@ -342,4 +350,9 @@ public class MockProcessContext extends MockControllerServiceLookup implements S
final List<Range> elRanges = Query.extractExpressionRanges(getProperty(property).getValue());
return (elRanges != null && !elRanges.isEmpty());
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

View File

@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.reporting.Bulletin;
@ -37,11 +38,13 @@ public class MockReportingContext extends MockControllerServiceLookup implements
private final Map<String, ControllerServiceConfiguration> controllerServices;
private final MockEventAccess eventAccess = new MockEventAccess();
private final Map<PropertyDescriptor, String> properties = new HashMap<>();
private final StateManager stateManager;
private final Map<String, List<Bulletin>> componentBulletinsCreated = new HashMap<>();
public MockReportingContext(final Map<String, ControllerService> controllerServices) {
public MockReportingContext(final Map<String, ControllerService> controllerServices, final StateManager stateManager) {
this.controllerServices = new HashMap<>();
this.stateManager = stateManager;
for (final Map.Entry<String, ControllerService> entry : controllerServices.entrySet()) {
this.controllerServices.put(entry.getKey(), new ControllerServiceConfiguration(entry.getValue()));
}
@ -112,4 +115,9 @@ public class MockReportingContext extends MockControllerServiceLookup implements
return new ArrayList<>(created);
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

View File

@ -27,6 +27,7 @@ import org.apache.nifi.attribute.expression.language.StandardExpressionLanguageC
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.ValidationContext;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.expression.ExpressionLanguageCompiler;
@ -35,9 +36,11 @@ public class MockValidationContext implements ValidationContext, ControllerServi
private final MockProcessContext context;
private final Map<String, Boolean> expressionLanguageSupported;
private final StateManager stateManager;
public MockValidationContext(final MockProcessContext processContext) {
public MockValidationContext(final MockProcessContext processContext, final StateManager stateManager) {
this.context = processContext;
this.stateManager = stateManager;
final Map<PropertyDescriptor, String> properties = processContext.getProperties();
expressionLanguageSupported = new HashMap<>(properties.size());
@ -63,8 +66,8 @@ public class MockValidationContext implements ValidationContext, ControllerServi
@Override
public ValidationContext getControllerServiceValidationContext(final ControllerService controllerService) {
final MockProcessContext serviceProcessContext = new MockProcessContext(controllerService, context);
return new MockValidationContext(serviceProcessContext);
final MockProcessContext serviceProcessContext = new MockProcessContext(controllerService, context, stateManager);
return new MockValidationContext(serviceProcessContext, stateManager);
}
@Override
@ -118,6 +121,7 @@ public class MockValidationContext implements ValidationContext, ControllerServi
return context.isControllerServiceEnabling(serviceIdentifier);
}
@Override
public boolean isExpressionLanguagePresent(final String value) {
if (value == null) {
return false;

View File

@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.nifi.annotation.behavior.TriggerSerially;
import org.apache.nifi.annotation.lifecycle.OnAdded;
import org.apache.nifi.annotation.lifecycle.OnConfigurationRestored;
import org.apache.nifi.annotation.lifecycle.OnDisabled;
import org.apache.nifi.annotation.lifecycle.OnEnabled;
import org.apache.nifi.annotation.lifecycle.OnRemoved;
@ -57,6 +58,7 @@ import org.apache.nifi.components.AllowableValue;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.ValidationContext;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ConfigurationContext;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.queue.QueueSize;
@ -68,6 +70,7 @@ import org.apache.nifi.processor.Relationship;
import org.apache.nifi.provenance.ProvenanceEventRecord;
import org.apache.nifi.provenance.ProvenanceReporter;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.state.MockStateManager;
import org.junit.Assert;
public class StandardProcessorTestRunner implements TestRunner {
@ -79,6 +82,8 @@ public class StandardProcessorTestRunner implements TestRunner {
private final SharedSessionState sharedState;
private final AtomicLong idGenerator;
private final boolean triggerSerially;
private final MockStateManager processorStateManager;
private final Map<String, MockStateManager> controllerServiceStateManagers = new HashMap<>();
private int numThreads = 1;
private final AtomicInteger invocations = new AtomicInteger(0);
@ -101,7 +106,8 @@ public class StandardProcessorTestRunner implements TestRunner {
this.sharedState = new SharedSessionState(processor, idGenerator);
this.flowFileQueue = sharedState.getFlowFileQueue();
this.sessionFactory = new MockSessionFactory(sharedState, processor);
this.context = new MockProcessContext(processor);
this.processorStateManager = new MockStateManager(processor);
this.context = new MockProcessContext(processor, processorStateManager);
detectDeprecatedAnnotations(processor);
@ -116,6 +122,8 @@ public class StandardProcessorTestRunner implements TestRunner {
}
triggerSerially = null != processor.getClass().getAnnotation(TriggerSerially.class);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, processor);
}
@SuppressWarnings("deprecation")
@ -578,7 +586,9 @@ public class StandardProcessorTestRunner implements TestRunner {
final MockProcessorLog logger = new MockProcessorLog(identifier, service);
controllerServiceLoggers.put(identifier, logger);
final MockControllerServiceInitializationContext initContext = new MockControllerServiceInitializationContext(requireNonNull(service), requireNonNull(identifier), logger);
final MockStateManager serviceStateManager = new MockStateManager(service);
final MockControllerServiceInitializationContext initContext = new MockControllerServiceInitializationContext(requireNonNull(service), requireNonNull(identifier), logger, serviceStateManager);
controllerServiceStateManagers.put(identifier, serviceStateManager);
initContext.addControllerServices(context);
service.initialize(initContext);
@ -598,7 +608,12 @@ public class StandardProcessorTestRunner implements TestRunner {
@Override
public void assertNotValid(final ControllerService service) {
final ValidationContext validationContext = new MockValidationContext(context).getControllerServiceValidationContext(service);
final StateManager serviceStateManager = controllerServiceStateManagers.get(service.getIdentifier());
if (serviceStateManager == null) {
throw new IllegalStateException("Controller Service has not been added to this TestRunner via the #addControllerService method");
}
final ValidationContext validationContext = new MockValidationContext(context, serviceStateManager).getControllerServiceValidationContext(service);
final Collection<ValidationResult> results = context.getControllerService(service.getIdentifier()).validate(validationContext);
for (final ValidationResult result : results) {
@ -612,7 +627,12 @@ public class StandardProcessorTestRunner implements TestRunner {
@Override
public void assertValid(final ControllerService service) {
final ValidationContext validationContext = new MockValidationContext(context).getControllerServiceValidationContext(service);
final StateManager serviceStateManager = controllerServiceStateManagers.get(service.getIdentifier());
if (serviceStateManager == null) {
throw new IllegalStateException("Controller Service has not been added to this TestRunner via the #addControllerService method");
}
final ValidationContext validationContext = new MockValidationContext(context, serviceStateManager).getControllerServiceValidationContext(service);
final Collection<ValidationResult> results = context.getControllerService(service.getIdentifier()).validate(validationContext);
for (final ValidationResult result : results) {
@ -718,11 +738,16 @@ public class StandardProcessorTestRunner implements TestRunner {
@Override
public ValidationResult setProperty(final ControllerService service, final PropertyDescriptor property, final String value) {
final MockStateManager serviceStateManager = controllerServiceStateManagers.get(service.getIdentifier());
if (serviceStateManager == null) {
throw new IllegalStateException("Controller service " + service + " has not been added to this TestRunner via the #addControllerService method");
}
final ControllerServiceConfiguration configuration = getConfigToUpdate(service);
final Map<PropertyDescriptor, String> curProps = configuration.getProperties();
final Map<PropertyDescriptor, String> updatedProps = new HashMap<>(curProps);
final ValidationContext validationContext = new MockValidationContext(context).getControllerServiceValidationContext(service);
final ValidationContext validationContext = new MockValidationContext(context, serviceStateManager).getControllerServiceValidationContext(service);
final ValidationResult validationResult = property.validate(value, validationContext);
updatedProps.put(property, value);
@ -771,6 +796,22 @@ public class StandardProcessorTestRunner implements TestRunner {
sharedState.clearProvenanceEvents();
}
@Override
public MockStateManager getStateManager() {
return processorStateManager;
}
/**
* Returns the State Manager for the given Controller Service.
*
* @param controllerService the Controller Service whose State Manager should be returned
* @return the State Manager for the given Controller Service
*/
@Override
public MockStateManager getStateManager(final ControllerService controllerService) {
return controllerServiceStateManagers.get(controllerService.getIdentifier());
}
public MockProcessorLog getLogger() {
return logger;
}

View File

@ -35,6 +35,7 @@ import org.apache.nifi.processor.Relationship;
import org.apache.nifi.provenance.ProvenanceEventRecord;
import org.apache.nifi.provenance.ProvenanceReporter;
import org.apache.nifi.reporting.InitializationException;
import org.apache.nifi.state.MockStateManager;
public interface TestRunner {
@ -838,4 +839,15 @@ public interface TestRunner {
* @return the logger
*/
public MockProcessorLog getControllerServiceLogger(final String identifier);
/**
* @return the State Manager that is used to stored and retrieve state
*/
MockStateManager getStateManager();
/**
* @param service the controller service of interest
* @return the State Manager that is used to store and retrieve state for the given controller service
*/
MockStateManager getStateManager(ControllerService service);
}

View File

@ -123,4 +123,4 @@ public class TestMockProcessContext {
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
}
}
}
}

View File

@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.api.dto;
import com.wordnik.swagger.annotations.ApiModelProperty;
import javax.xml.bind.annotation.XmlType;
/**
* State for a given component.
*/
@XmlType(name = "componentState")
public class ComponentStateDTO {
private String componentId;
private String stateDescription;
private StateMapDTO clusterState;
private StateMapDTO localState;
/**
* @return The component identifier
*/
@ApiModelProperty(
value = "The component identifier."
)
public String getComponentId() {
return componentId;
}
public void setComponentId(String componentId) {
this.componentId = componentId;
}
/**
* @return Description of the state this component persists.
*/
@ApiModelProperty(
value = "Description of the state this component persists."
)
public String getStateDescription() {
return stateDescription;
}
public void setStateDescription(String stateDescription) {
this.stateDescription = stateDescription;
}
/**
* @return The cluster state for this component, or null if this NiFi is a standalone instance
*/
@ApiModelProperty(
value = "The cluster state for this component, or null if this NiFi is a standalone instance."
)
public StateMapDTO getClusterState() {
return clusterState;
}
public void setClusterState(StateMapDTO clusterState) {
this.clusterState = clusterState;
}
/**
* @return The local state for this component
*/
@ApiModelProperty(
value = "The local state for this component."
)
public StateMapDTO getLocalState() {
return localState;
}
public void setLocalState(StateMapDTO localState) {
this.localState = localState;
}
}

View File

@ -33,6 +33,7 @@ public class ControllerServiceDTO extends NiFiComponentDTO {
private String comments;
private String availability;
private String state;
private Boolean persistsState;
private Map<String, String> properties;
private Map<String, PropertyDescriptorDTO> descriptors;
@ -101,6 +102,20 @@ public class ControllerServiceDTO extends NiFiComponentDTO {
this.availability = availability;
}
/**
* @return whether this controller service persists state
*/
@ApiModelProperty(
value = "Whether the controller service persists state."
)
public Boolean getPersistsState() {
return persistsState;
}
public void setPersistsState(Boolean persistsState) {
this.persistsState = persistsState;
}
/**
* @return The state of this controller service. Possible values are ENABLED, ENABLING, DISABLED, DISABLING
*/

View File

@ -37,6 +37,7 @@ public class ProcessorDTO extends NiFiComponentDTO {
private Boolean supportsParallelProcessing;
private Boolean supportsEventDriven;
private Boolean supportsBatching;
private Boolean persistsState;
private String inputRequirement;
private ProcessorConfigDTO config;
@ -122,6 +123,20 @@ public class ProcessorDTO extends NiFiComponentDTO {
this.supportsParallelProcessing = supportsParallelProcessing;
}
/**
* @return whether this processor persists state
*/
@ApiModelProperty(
value = "Whether the processor persists state."
)
public Boolean getPersistsState() {
return persistsState;
}
public void setPersistsState(Boolean persistsState) {
this.persistsState = persistsState;
}
/**
* @return the input requirement of this processor
*/

View File

@ -33,6 +33,7 @@ public class ReportingTaskDTO extends NiFiComponentDTO {
private String state;
private String availability;
private String comments;
private Boolean persistsState;
private String schedulingPeriod;
private String schedulingStrategy;
@ -105,6 +106,20 @@ public class ReportingTaskDTO extends NiFiComponentDTO {
this.schedulingPeriod = schedulingPeriod;
}
/**
* @return whether this reporting task persists state
*/
@ApiModelProperty(
value = "Whether the reporting task persists state."
)
public Boolean getPersistsState() {
return persistsState;
}
public void setPersistsState(Boolean persistsState) {
this.persistsState = persistsState;
}
/**
* @return current scheduling state of the reporting task
*/

View File

@ -0,0 +1,90 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.api.dto;
import com.wordnik.swagger.annotations.ApiModelProperty;
import javax.xml.bind.annotation.XmlType;
/**
* Mapping of state for a given scope.
*/
@XmlType(name = "stateMap")
public class StateEntryDTO {
private String key;
private String value;
private String clusterNodeId; // include when clustered and scope is local
private String clusterNodeAddress; // include when clustered and scope is local
/**
* @return the key for this state
*/
@ApiModelProperty(
value = "The key for this state."
)
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
/**
* @return the value for this state
*/
@ApiModelProperty(
value = "The value for this state."
)
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
/**
* @return identifier of the node where this state originated
*/
@ApiModelProperty(
value = "The identifier for the node where the state originated."
)
public String getClusterNodeId() {
return clusterNodeId;
}
public void setClusterNodeId(String clusterNodeId) {
this.clusterNodeId = clusterNodeId;
}
/**
* @return label to use to show which node this state originated from
*/
@ApiModelProperty(
value = "The label for the node where the state originated."
)
public String getClusterNodeAddress() {
return clusterNodeAddress;
}
public void setClusterNodeAddress(String clusterNodeAddress) {
this.clusterNodeAddress = clusterNodeAddress;
}
}

View File

@ -0,0 +1,61 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.api.dto;
import com.wordnik.swagger.annotations.ApiModelProperty;
import javax.xml.bind.annotation.XmlType;
import java.util.List;
/**
* Mapping of state for a given scope.
*/
@XmlType(name = "stateMap")
public class StateMapDTO {
private String scope;
private List<StateEntryDTO> state;
/**
* @return The scope of this StateMap
*/
@ApiModelProperty(
value = "The scope of this StateMap."
)
public String getScope() {
return scope;
}
public void setScope(String scope) {
this.scope = scope;
}
/**
* @return The state
*/
@ApiModelProperty(
value = "The state."
)
public List<StateEntryDTO> getState() {
return state;
}
public void setState(List<StateEntryDTO> state) {
this.state = state;
}
}

View File

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.api.entity;
import org.apache.nifi.web.api.dto.ComponentStateDTO;
import javax.xml.bind.annotation.XmlRootElement;
/**
* A serialized representation of this class can be placed in the entity body of a request or response to or from the API. This particular entity holds a reference to an ComponentStateDTO.
*/
@XmlRootElement(name = "componentStateEntity")
public class ComponentStateEntity extends Entity {
private ComponentStateDTO componentState;
/**
* The ComponentStateDTO that is being serialized.
*
* @return The ComponentStateDTO object
*/
public ComponentStateDTO getComponentState() {
return componentState;
}
public void setComponentState(ComponentStateDTO componentState) {
this.componentState = componentState;
}
}

View File

@ -16,6 +16,7 @@
*/
package org.apache.nifi.documentation.mock;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerServiceInitializationContext;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.logging.ComponentLog;
@ -43,4 +44,9 @@ public class MockControllerServiceInitializationContext implements ControllerSer
return new MockProcessorLogger();
}
}
@Override
public StateManager getStateManager() {
return null;
}
}

View File

@ -22,6 +22,7 @@ import java.util.Set;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.Relationship;
@ -102,4 +103,9 @@ public class MockProcessContext implements ProcessContext {
public boolean isExpressionLanguagePresent(PropertyDescriptor property) {
return false;
}
}
@Override
public StateManager getStateManager() {
return null;
}
}

View File

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.cluster.protocol;
import java.util.Collection;
import java.util.Collections;
import javax.xml.bind.annotation.XmlRootElement;
/**
* Represents a set of Node Identifiers that identify all nodes that are in a NiFi Cluster
*/
@XmlRootElement(name = "ClusterNodes")
public class ClusterNodes {
private Collection<NodeIdentifier> nodeIds;
public ClusterNodes(final Collection<NodeIdentifier> nodeIds) {
this.nodeIds = nodeIds;
}
public Collection<NodeIdentifier> getNodeIdentifiers() {
return Collections.unmodifiableCollection(nodeIds);
}
}

View File

@ -31,7 +31,7 @@ import org.apache.nifi.cluster.protocol.jaxb.message.ConnectionResponseAdapter;
@XmlJavaTypeAdapter(ConnectionResponseAdapter.class)
public class ConnectionResponse {
private final boolean blockedByFirewall;
private final String rejectionReason;
private final int tryLaterSeconds;
private final NodeIdentifier nodeIdentifier;
private final StandardDataFlow dataFlow;
@ -43,7 +43,7 @@ public class ConnectionResponse {
private volatile String clusterManagerDN;
public ConnectionResponse(final NodeIdentifier nodeIdentifier, final StandardDataFlow dataFlow, final boolean primary,
final Integer managerRemoteInputPort, final Boolean managerRemoteCommsSecure, final String instanceId) {
final Integer managerRemoteInputPort, final Boolean managerRemoteCommsSecure, final String instanceId) {
if (nodeIdentifier == null) {
throw new IllegalArgumentException("Node identifier may not be empty or null.");
} else if (dataFlow == null) {
@ -52,7 +52,7 @@ public class ConnectionResponse {
this.nodeIdentifier = nodeIdentifier;
this.dataFlow = dataFlow;
this.tryLaterSeconds = 0;
this.blockedByFirewall = false;
this.rejectionReason = null;
this.primary = primary;
this.managerRemoteInputPort = managerRemoteInputPort;
this.managerRemoteCommsSecure = managerRemoteCommsSecure;
@ -66,18 +66,18 @@ public class ConnectionResponse {
this.dataFlow = null;
this.nodeIdentifier = null;
this.tryLaterSeconds = tryLaterSeconds;
this.blockedByFirewall = false;
this.rejectionReason = null;
this.primary = false;
this.managerRemoteInputPort = null;
this.managerRemoteCommsSecure = null;
this.instanceId = null;
}
private ConnectionResponse() {
private ConnectionResponse(final String rejectionReason) {
this.dataFlow = null;
this.nodeIdentifier = null;
this.tryLaterSeconds = 0;
this.blockedByFirewall = true;
this.rejectionReason = rejectionReason;
this.primary = false;
this.managerRemoteInputPort = null;
this.managerRemoteCommsSecure = null;
@ -85,7 +85,15 @@ public class ConnectionResponse {
}
public static ConnectionResponse createBlockedByFirewallResponse() {
return new ConnectionResponse();
return new ConnectionResponse("Blocked by Firewall");
}
public static ConnectionResponse createConflictingNodeIdResponse(final String otherNode) {
return new ConnectionResponse("The Node Identifier provided already belongs to node " + otherNode);
}
public static ConnectionResponse createRejectionResponse(final String explanation) {
return new ConnectionResponse(explanation);
}
public boolean isPrimary() {
@ -96,8 +104,8 @@ public class ConnectionResponse {
return tryLaterSeconds > 0;
}
public boolean isBlockedByFirewall() {
return blockedByFirewall;
public String getRejectionReason() {
return rejectionReason;
}
public int getTryLaterSeconds() {
@ -135,5 +143,4 @@ public class ConnectionResponse {
public String getClusterManagerDN() {
return clusterManagerDN;
}
}

View File

@ -33,7 +33,6 @@ import org.apache.commons.lang3.StringUtils;
* @Threadsafe
*/
public class NodeIdentifier {
/**
* the unique identifier for the node
*/
@ -61,13 +60,30 @@ public class NodeIdentifier {
*/
private final int socketPort;
/**
* the IP or hostname that external clients should use to communicate with this node via Site-to-Site
*/
private final String siteToSiteAddress;
/**
* the port that external clients should use to communicate with this node via Site-to-Site
*/
private final Integer siteToSitePort;
/**
* whether or not site-to-site communications with this node are secure
*/
private Boolean siteToSiteSecure;
private final String nodeDn;
public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort) {
this(id, apiAddress, apiPort, socketAddress, socketPort, null);
public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort,
final String siteToSiteAddress, final Integer siteToSitePort, final boolean siteToSiteSecure) {
this(id, apiAddress, apiPort, socketAddress, socketPort, siteToSiteAddress, siteToSitePort, siteToSiteSecure, null);
}
public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort, final String dn) {
public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort,
final String siteToSiteAddress, final Integer siteToSitePort, final boolean siteToSiteSecure, final String dn) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Node ID may not be empty or null.");
@ -79,6 +95,9 @@ public class NodeIdentifier {
validatePort(apiPort);
validatePort(socketPort);
if (siteToSitePort != null) {
validatePort(siteToSitePort);
}
this.id = id;
this.apiAddress = apiAddress;
@ -86,6 +105,9 @@ public class NodeIdentifier {
this.socketAddress = socketAddress;
this.socketPort = socketPort;
this.nodeDn = dn;
this.siteToSiteAddress = siteToSiteAddress == null ? apiAddress : siteToSiteAddress;
this.siteToSitePort = siteToSitePort;
this.siteToSiteSecure = siteToSiteSecure;
}
public String getId() {
@ -118,6 +140,19 @@ public class NodeIdentifier {
}
}
public String getSiteToSiteAddress() {
return siteToSiteAddress;
}
public Integer getSiteToSitePort() {
return siteToSitePort;
}
public boolean isSiteToSiteSecure() {
return siteToSiteSecure;
}
/**
* Compares the id of two node identifiers for equality.
*
@ -165,6 +200,7 @@ public class NodeIdentifier {
if (this.socketPort != other.socketPort) {
return false;
}
return true;
}
@ -177,7 +213,9 @@ public class NodeIdentifier {
@Override
public String toString() {
return "[" + "id=" + id + ", apiAddress=" + apiAddress + ", apiPort=" + apiPort + ", socketAddress=" + socketAddress + ", socketPort=" + socketPort + ']';
return "[" + "id=" + id + ", apiAddress=" + apiAddress + ", apiPort=" + apiPort
+ ", socketAddress=" + socketAddress + ", socketPort=" + socketPort
+ ", siteToSiteAddress=" + siteToSiteAddress + ", siteToSitePort=" + siteToSitePort + ']';
}
}

View File

@ -27,7 +27,7 @@ public class AdaptedConnectionResponse {
private StandardDataFlow dataFlow;
private NodeIdentifier nodeIdentifier;
private boolean blockedByFirewall;
private String rejectionReason;
private boolean primary;
private int tryLaterSeconds;
private Integer managerRemoteInputPort;
@ -63,12 +63,12 @@ public class AdaptedConnectionResponse {
this.tryLaterSeconds = tryLaterSeconds;
}
public boolean isBlockedByFirewall() {
return blockedByFirewall;
public String getRejectionReason() {
return rejectionReason;
}
public void setBlockedByFirewall(boolean blockedByFirewall) {
this.blockedByFirewall = blockedByFirewall;
public void setRejectionReason(final String rejectionReason) {
this.rejectionReason = rejectionReason;
}
public boolean isPrimary() {

View File

@ -21,14 +21,13 @@ package org.apache.nifi.cluster.protocol.jaxb.message;
public class AdaptedNodeIdentifier {
private String id;
private String apiAddress;
private int apiPort;
private String socketAddress;
private int socketPort;
private String siteToSiteAddress;
private Integer siteToSitePort;
private boolean siteToSiteSecure;
public AdaptedNodeIdentifier() {
}
@ -73,4 +72,28 @@ public class AdaptedNodeIdentifier {
this.socketPort = socketPort;
}
public String getSiteToSiteAddress() {
return siteToSiteAddress;
}
public void setSiteToSiteAddress(String siteToSiteAddress) {
this.siteToSiteAddress = siteToSiteAddress;
}
public Integer getSiteToSitePort() {
return siteToSitePort;
}
public void setSiteToSitePort(Integer siteToSitePort) {
this.siteToSitePort = siteToSitePort;
}
public boolean isSiteToSiteSecure() {
return siteToSiteSecure;
}
public void setSiteToSiteSecure(boolean siteToSiteSecure) {
this.siteToSiteSecure = siteToSiteSecure;
}
}

View File

@ -30,7 +30,7 @@ public class ConnectionResponseAdapter extends XmlAdapter<AdaptedConnectionRespo
aCr.setDataFlow(cr.getDataFlow());
aCr.setNodeIdentifier(cr.getNodeIdentifier());
aCr.setTryLaterSeconds(cr.getTryLaterSeconds());
aCr.setBlockedByFirewall(cr.isBlockedByFirewall());
aCr.setRejectionReason(cr.getRejectionReason());
aCr.setPrimary(cr.isPrimary());
aCr.setManagerRemoteInputPort(cr.getManagerRemoteInputPort());
aCr.setManagerRemoteCommsSecure(cr.isManagerRemoteCommsSecure());
@ -43,11 +43,11 @@ public class ConnectionResponseAdapter extends XmlAdapter<AdaptedConnectionRespo
public ConnectionResponse unmarshal(final AdaptedConnectionResponse aCr) {
if (aCr.shouldTryLater()) {
return new ConnectionResponse(aCr.getTryLaterSeconds());
} else if (aCr.isBlockedByFirewall()) {
return ConnectionResponse.createBlockedByFirewallResponse();
} else if (aCr.getRejectionReason() != null) {
return ConnectionResponse.createRejectionResponse(aCr.getRejectionReason());
} else {
return new ConnectionResponse(aCr.getNodeIdentifier(), aCr.getDataFlow(), aCr.isPrimary(),
aCr.getManagerRemoteInputPort(), aCr.isManagerRemoteCommsSecure(), aCr.getInstanceId());
aCr.getManagerRemoteInputPort(), aCr.isManagerRemoteCommsSecure(), aCr.getInstanceId());
}
}

View File

@ -34,6 +34,9 @@ public class NodeIdentifierAdapter extends XmlAdapter<AdaptedNodeIdentifier, Nod
aNi.setApiPort(ni.getApiPort());
aNi.setSocketAddress(ni.getSocketAddress());
aNi.setSocketPort(ni.getSocketPort());
aNi.setSiteToSiteAddress(ni.getSiteToSiteAddress());
aNi.setSiteToSitePort(ni.getSiteToSitePort());
aNi.setSiteToSiteSecure(ni.isSiteToSiteSecure());
return aNi;
}
}
@ -43,7 +46,8 @@ public class NodeIdentifierAdapter extends XmlAdapter<AdaptedNodeIdentifier, Nod
if (aNi == null) {
return null;
} else {
return new NodeIdentifier(aNi.getId(), aNi.getApiAddress(), aNi.getApiPort(), aNi.getSocketAddress(), aNi.getSocketPort());
return new NodeIdentifier(aNi.getId(), aNi.getApiAddress(), aNi.getApiPort(), aNi.getSocketAddress(), aNi.getSocketPort(),
aNi.getSiteToSiteAddress(), aNi.getSiteToSitePort(), aNi.isSiteToSiteSecure());
}
}

View File

@ -87,7 +87,7 @@ public class ClusterManagerProtocolSenderImplTest {
when(mockHandler.canHandle(any(ProtocolMessage.class))).thenReturn(Boolean.TRUE);
when(mockHandler.handle(any(ProtocolMessage.class))).thenReturn(new FlowResponseMessage());
FlowRequestMessage request = new FlowRequestMessage();
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port));
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port, "localhost", 3821, false));
FlowResponseMessage response = sender.requestFlow(request);
assertNotNull(response);
}
@ -98,7 +98,7 @@ public class ClusterManagerProtocolSenderImplTest {
when(mockHandler.canHandle(any(ProtocolMessage.class))).thenReturn(Boolean.TRUE);
when(mockHandler.handle(any(ProtocolMessage.class))).thenReturn(new PingMessage());
FlowRequestMessage request = new FlowRequestMessage();
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port));
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port, "localhost", 3821, false));
try {
sender.requestFlow(request);
fail("failed to throw exception");
@ -122,7 +122,7 @@ public class ClusterManagerProtocolSenderImplTest {
}
});
FlowRequestMessage request = new FlowRequestMessage();
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port));
request.setNodeId(new NodeIdentifier("id", "api-address", 1, "localhost", port, "localhost", 3821, false));
try {
sender.requestFlow(request);
fail("failed to throw exception");

View File

@ -47,7 +47,6 @@ import org.apache.nifi.io.socket.ServerSocketConfiguration;
import org.apache.nifi.io.socket.SocketConfiguration;
import org.apache.nifi.io.socket.multicast.DiscoverableService;
import org.apache.nifi.io.socket.multicast.DiscoverableServiceImpl;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
@ -80,7 +79,7 @@ public class NodeProtocolSenderImplTest {
mockServiceLocator = mock(ClusterServiceLocator.class);
mockHandler = mock(ProtocolHandler.class);
nodeIdentifier = new NodeIdentifier("1", "localhost", 1234, "localhost", 5678);
nodeIdentifier = new NodeIdentifier("1", "localhost", 1234, "localhost", 5678, "localhost", 3821, false);
ProtocolContext protocolContext = new JaxbProtocolContext(JaxbProtocolUtils.JAXB_CONTEXT);
@ -110,7 +109,7 @@ public class NodeProtocolSenderImplTest {
when(mockHandler.canHandle(any(ProtocolMessage.class))).thenReturn(Boolean.TRUE);
ConnectionResponseMessage mockMessage = new ConnectionResponseMessage();
mockMessage.setConnectionResponse(new ConnectionResponse(nodeIdentifier,
new StandardDataFlow("flow".getBytes("UTF-8"), new byte[0], new byte[0]), false, null, null, UUID.randomUUID().toString()));
new StandardDataFlow("flow".getBytes("UTF-8"), new byte[0], new byte[0]), false, null, null, UUID.randomUUID().toString()));
when(mockHandler.handle(any(ProtocolMessage.class))).thenReturn(mockMessage);
ConnectionRequestMessage request = new ConnectionRequestMessage();
@ -178,7 +177,7 @@ public class NodeProtocolSenderImplTest {
when(mockHandler.handle(any(ProtocolMessage.class))).thenReturn(null);
HeartbeatMessage hb = new HeartbeatMessage();
hb.setHeartbeat(new Heartbeat(new NodeIdentifier("id", "localhost", 3, "localhost", 4), false, false, new byte[]{1, 2, 3}));
hb.setHeartbeat(new Heartbeat(new NodeIdentifier("id", "localhost", 3, "localhost", 4, "localhost", 3821, false), false, false, new byte[] {1, 2, 3}));
sender.heartbeat(hb);
}
@ -190,7 +189,7 @@ public class NodeProtocolSenderImplTest {
when(mockHandler.handle(any(ProtocolMessage.class))).thenReturn(null);
ControllerStartupFailureMessage msg = new ControllerStartupFailureMessage();
msg.setNodeId(new NodeIdentifier("some-id", "some-addr", 1, "some-addr", 1));
msg.setNodeId(new NodeIdentifier("some-id", "some-addr", 1, "some-addr", 1, "localhost", 3821, false));
msg.setExceptionMessage("some exception");
sender.notifyControllerStartupFailure(msg);
}

View File

@ -17,6 +17,7 @@
package org.apache.nifi.cluster.event;
import java.util.Date;
import org.apache.commons.lang3.StringUtils;
/**

View File

@ -25,6 +25,7 @@ import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import org.apache.nifi.cluster.event.Event;
import org.apache.nifi.cluster.event.EventManager;

View File

@ -24,10 +24,11 @@ import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.nifi.cluster.firewall.ClusterNodeFirewall;
import org.apache.nifi.util.file.FileUtils;
import org.apache.nifi.logging.NiFiLog;
import org.apache.nifi.util.file.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -16,19 +16,19 @@
*/
package org.apache.nifi.cluster.manager;
import org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.UriConstructionException;
import org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.NoConnectedNodesException;
import org.apache.nifi.cluster.manager.exception.NoResponseFromNodesException;
import org.apache.nifi.cluster.manager.exception.SafeModeMutableRequestException;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.NoConnectedNodesException;
import org.apache.nifi.cluster.manager.exception.NoResponseFromNodesException;
import org.apache.nifi.cluster.manager.exception.SafeModeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.UriConstructionException;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
/**
* Extends the ClusterManager interface to define how requests issued to the cluster manager are federated to the nodes. Specifically, the HTTP protocol is used for communicating requests to the
* cluster manager and to the nodes.

View File

@ -16,11 +16,12 @@
*/
package org.apache.nifi.cluster.manager;
import org.apache.nifi.cluster.manager.exception.UriConstructionException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.nifi.cluster.manager.exception.UriConstructionException;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
/**

View File

@ -19,6 +19,7 @@ package org.apache.nifi.cluster.manager;
import java.net.URI;
import java.util.Map;
import java.util.Set;
import org.apache.nifi.cluster.node.Node.Status;
/**

View File

@ -16,7 +16,6 @@
*/
package org.apache.nifi.cluster.manager;
import com.sun.jersey.api.client.ClientResponse;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.OutputStream;
@ -31,14 +30,16 @@ import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.StreamingOutput;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import org.apache.nifi.web.api.entity.Entity;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import org.apache.nifi.web.api.entity.Entity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sun.jersey.api.client.ClientResponse;
/**
* Encapsulates a node's response in regards to receiving a external API request.
*

View File

@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.cluster.manager.exception;
public class ConflictingNodeIdException extends Exception {
private static final long serialVersionUID = 1L;
private final String nodeId;
private final String conflictingNodeAddress;
private final int conflictingNodePort;
public ConflictingNodeIdException(final String nodeId, final String conflictingNodeAddress, final int conflictingNodePort) {
super("Node Identifier " + nodeId + " conflicts with existing node " + conflictingNodeAddress + ":" + conflictingNodePort);
this.nodeId = nodeId;
this.conflictingNodeAddress = conflictingNodeAddress;
this.conflictingNodePort = conflictingNodePort;
}
public String getNodeId() {
return nodeId;
}
public String getConflictingNodeAddress() {
return conflictingNodeAddress;
}
public int getConflictingNodePort() {
return conflictingNodePort;
}
}

View File

@ -16,6 +16,10 @@
*/
package org.apache.nifi.cluster.manager.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.nifi.action.Action;
import org.apache.nifi.admin.service.AuditService;
import org.apache.nifi.controller.status.ProcessGroupStatus;
@ -30,10 +34,6 @@ import org.apache.nifi.provenance.search.QuerySubmission;
import org.apache.nifi.provenance.search.SearchableField;
import org.apache.nifi.reporting.EventAccess;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class ClusteredEventAccess implements EventAccess {
private final WebClusterManager clusterManager;

View File

@ -25,6 +25,7 @@ import org.apache.nifi.attribute.expression.language.Query;
import org.apache.nifi.attribute.expression.language.StandardPropertyValue;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.controller.service.ControllerServiceProvider;
@ -47,13 +48,15 @@ public class ClusteredReportingContext implements ReportingContext {
private final ControllerServiceProvider serviceProvider;
private final Map<PropertyDescriptor, String> properties;
private final Map<PropertyDescriptor, PreparedQuery> preparedQueries;
private final StateManager stateManager;
public ClusteredReportingContext(final EventAccess eventAccess, final BulletinRepository bulletinRepository,
final Map<PropertyDescriptor, String> properties, final ControllerServiceProvider serviceProvider) {
public ClusteredReportingContext(final EventAccess eventAccess, final BulletinRepository bulletinRepository, final Map<PropertyDescriptor, String> properties,
final ControllerServiceProvider serviceProvider, final StateManager stateManager) {
this.eventAccess = eventAccess;
this.bulletinRepository = bulletinRepository;
this.properties = Collections.unmodifiableMap(properties);
this.serviceProvider = serviceProvider;
this.stateManager = stateManager;
preparedQueries = new HashMap<>();
for (final Map.Entry<PropertyDescriptor, String> entry : properties.entrySet()) {
@ -206,4 +209,9 @@ public class ClusteredReportingContext implements ReportingContext {
return null;
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

View File

@ -16,14 +16,6 @@
*/
package org.apache.nifi.cluster.manager.impl;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.filter.GZIPContentEncodingFilter;
import com.sun.jersey.core.util.MultivaluedMapImpl;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
@ -47,17 +39,24 @@ import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.cluster.manager.HttpRequestReplicator;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.apache.nifi.cluster.manager.exception.UriConstructionException;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import org.apache.nifi.logging.NiFiLog;
import org.apache.nifi.util.FormatUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.filter.GZIPContentEncodingFilter;
import com.sun.jersey.core.util.MultivaluedMapImpl;
/**
* An implementation of the <code>HttpRequestReplicator</code> interface. This implementation parallelizes the node HTTP requests using the given <code>ExecutorService</code> instance. Individual
* requests may have connection and read timeouts set, which may be set during instance construction. Otherwise, the default is not to timeout.

View File

@ -20,6 +20,7 @@ import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.nifi.cluster.manager.HttpResponseMapper;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.apache.nifi.cluster.node.Node;

View File

@ -71,6 +71,7 @@ import javax.xml.transform.stream.StreamResult;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.admin.service.AuditService;
import org.apache.nifi.annotation.lifecycle.OnAdded;
import org.apache.nifi.annotation.lifecycle.OnConfigurationRestored;
import org.apache.nifi.annotation.lifecycle.OnRemoved;
import org.apache.nifi.cluster.BulletinsPayload;
import org.apache.nifi.cluster.HeartbeatPayload;
@ -87,6 +88,7 @@ import org.apache.nifi.cluster.manager.HttpClusterManager;
import org.apache.nifi.cluster.manager.HttpRequestReplicator;
import org.apache.nifi.cluster.manager.HttpResponseMapper;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.apache.nifi.cluster.manager.exception.ConflictingNodeIdException;
import org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException;
import org.apache.nifi.cluster.manager.exception.IllegalClusterStateException;
@ -126,6 +128,7 @@ import org.apache.nifi.cluster.protocol.message.ProtocolMessage.MessageType;
import org.apache.nifi.cluster.protocol.message.ReconnectionFailureMessage;
import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.state.StateManagerProvider;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.Heartbeater;
import org.apache.nifi.controller.ReportingTaskNode;
@ -148,6 +151,7 @@ import org.apache.nifi.controller.service.ControllerServiceNode;
import org.apache.nifi.controller.service.ControllerServiceProvider;
import org.apache.nifi.controller.service.ControllerServiceState;
import org.apache.nifi.controller.service.StandardControllerServiceProvider;
import org.apache.nifi.controller.state.manager.StandardStateManagerProvider;
import org.apache.nifi.controller.status.ProcessGroupStatus;
import org.apache.nifi.controller.status.RemoteProcessGroupStatus;
import org.apache.nifi.controller.status.history.ComponentStatusRepository;
@ -197,6 +201,7 @@ import org.apache.nifi.util.ReflectionUtils;
import org.apache.nifi.web.OptimisticLockingManager;
import org.apache.nifi.web.Revision;
import org.apache.nifi.web.UpdateRevision;
import org.apache.nifi.web.api.dto.ComponentStateDTO;
import org.apache.nifi.web.api.dto.ControllerServiceDTO;
import org.apache.nifi.web.api.dto.ControllerServiceReferencingComponentDTO;
import org.apache.nifi.web.api.dto.DropRequestDTO;
@ -211,6 +216,8 @@ import org.apache.nifi.web.api.dto.RemoteProcessGroupContentsDTO;
import org.apache.nifi.web.api.dto.RemoteProcessGroupDTO;
import org.apache.nifi.web.api.dto.RemoteProcessGroupPortDTO;
import org.apache.nifi.web.api.dto.ReportingTaskDTO;
import org.apache.nifi.web.api.dto.StateEntryDTO;
import org.apache.nifi.web.api.dto.StateMapDTO;
import org.apache.nifi.web.api.dto.provenance.ProvenanceDTO;
import org.apache.nifi.web.api.dto.provenance.ProvenanceEventDTO;
import org.apache.nifi.web.api.dto.provenance.ProvenanceRequestDTO;
@ -219,6 +226,7 @@ import org.apache.nifi.web.api.dto.status.ClusterStatusHistoryDTO;
import org.apache.nifi.web.api.dto.status.NodeStatusHistoryDTO;
import org.apache.nifi.web.api.dto.status.StatusHistoryDTO;
import org.apache.nifi.web.api.dto.status.StatusSnapshotDTO;
import org.apache.nifi.web.api.entity.ComponentStateEntity;
import org.apache.nifi.web.api.entity.ControllerServiceEntity;
import org.apache.nifi.web.api.entity.ControllerServiceReferencingComponentsEntity;
import org.apache.nifi.web.api.entity.ControllerServicesEntity;
@ -305,6 +313,7 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
public static final Pattern PROCESSORS_URI_PATTERN = Pattern.compile("/nifi-api/controller/process-groups/(?:(?:root)|(?:[a-f0-9\\-]{36}))/processors");
public static final Pattern PROCESSOR_URI_PATTERN = Pattern.compile("/nifi-api/controller/process-groups/(?:(?:root)|(?:[a-f0-9\\-]{36}))/processors/[a-f0-9\\-]{36}");
public static final Pattern PROCESSOR_STATE_URI_PATTERN = Pattern.compile("/nifi-api/controller/process-groups/(?:(?:root)|(?:[a-f0-9\\-]{36}))/processors/[a-f0-9\\-]{36}/state");
public static final Pattern CLUSTER_PROCESSOR_URI_PATTERN = Pattern.compile("/nifi-api/cluster/processors/[a-f0-9\\-]{36}");
public static final Pattern REMOTE_PROCESS_GROUPS_URI_PATTERN = Pattern.compile("/nifi-api/controller/process-groups/(?:(?:root)|(?:[a-f0-9\\-]{36}))/remote-process-groups");
@ -321,9 +330,11 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
public static final Pattern COUNTERS_URI = Pattern.compile("/nifi-api/controller/counters/[a-f0-9\\-]{36}");
public static final String CONTROLLER_SERVICES_URI = "/nifi-api/controller/controller-services/node";
public static final Pattern CONTROLLER_SERVICE_URI_PATTERN = Pattern.compile("/nifi-api/controller/controller-services/node/[a-f0-9\\-]{36}");
public static final Pattern CONTROLLER_SERVICE_STATE_URI_PATTERN = Pattern.compile("/nifi-api/controller/controller-services/node/[a-f0-9\\-]{36}/state");
public static final Pattern CONTROLLER_SERVICE_REFERENCES_URI_PATTERN = Pattern.compile("/nifi-api/controller/controller-services/node/[a-f0-9\\-]{36}/references");
public static final String REPORTING_TASKS_URI = "/nifi-api/controller/reporting-tasks/node";
public static final Pattern REPORTING_TASK_URI_PATTERN = Pattern.compile("/nifi-api/controller/reporting-tasks/node/[a-f0-9\\-]{36}");
public static final Pattern REPORTING_TASK_STATE_URI_PATTERN = Pattern.compile("/nifi-api/controller/reporting-tasks/node/[a-f0-9\\-]{36}/state");
@Deprecated
public static final Pattern QUEUE_CONTENTS_URI = Pattern.compile("/nifi-api/controller/process-groups/(?:(?:root)|(?:[a-f0-9\\-]{36}))/connections/[a-f0-9\\-]{36}/contents");
@ -368,8 +379,10 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
private final FlowEngine reportingTaskEngine;
private final Map<NodeIdentifier, ComponentStatusRepository> componentMetricsRepositoryMap = new HashMap<>();
private final StandardProcessScheduler processScheduler;
private final StateManagerProvider stateManagerProvider;
private final long componentStatusSnapshotMillis;
public WebClusterManager(final HttpRequestReplicator httpRequestReplicator, final HttpResponseMapper httpResponseMapper,
final DataFlowManagementService dataFlowManagementService, final ClusterManagerProtocolSenderListener senderListener,
final NiFiProperties properties, final StringEncryptor encryptor, final OptimisticLockingManager optimisticLockingManager) {
@ -408,40 +421,6 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
componentStatusSnapshotMillis = snapshotMillis;
Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
readLock.lock();
try {
for (final Node node : nodes) {
if (Status.CONNECTED.equals(node.getStatus())) {
ComponentStatusRepository statusRepository = componentMetricsRepositoryMap.get(node.getNodeId());
if (statusRepository == null) {
statusRepository = createComponentStatusRepository();
componentMetricsRepositoryMap.put(node.getNodeId(), statusRepository);
}
// ensure this node has a payload
if (node.getHeartbeat() != null && node.getHeartbeatPayload() != null) {
// if nothing has been captured or the current heartbeat is newer, capture it - comparing the heatbeat created timestamp
// is safe since its marked as XmlTransient so we're assured that its based off the same clock that created the last capture date
if (statusRepository.getLastCaptureDate() == null || node.getHeartbeat().getCreatedTimestamp() > statusRepository.getLastCaptureDate().getTime()) {
statusRepository.capture(node.getHeartbeatPayload().getProcessGroupStatus());
}
}
}
}
} catch (final Throwable t) {
logger.warn("Unable to capture component metrics from Node heartbeats: " + t);
if (logger.isDebugEnabled()) {
logger.warn("", t);
}
} finally {
readLock.unlock("capture component metrics from node heartbeats");
}
}
}, componentStatusSnapshotMillis, componentStatusSnapshotMillis, TimeUnit.MILLISECONDS);
remoteInputPort = properties.getRemoteInputPort();
if (remoteInputPort == null) {
remoteSiteListener = null;
@ -465,11 +444,17 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
reportingTaskEngine = new FlowEngine(8, "Reporting Task Thread");
try {
this.stateManagerProvider = StandardStateManagerProvider.create(properties);
} catch (final IOException e) {
throw new RuntimeException(e);
}
processScheduler = new StandardProcessScheduler(new Heartbeater() {
@Override
public void heartbeat() {
}
}, this, encryptor);
}, this, encryptor, stateManagerProvider);
// When we construct the scheduling agents, we can pass null for a lot of the arguments because we are only
// going to be scheduling Reporting Tasks. Otherwise, it would not be okay.
@ -477,14 +462,14 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
processScheduler.setSchedulingAgent(SchedulingStrategy.CRON_DRIVEN, new QuartzSchedulingAgent(null, reportingTaskEngine, null, encryptor));
processScheduler.setMaxThreadCount(SchedulingStrategy.TIMER_DRIVEN, 10);
processScheduler.setMaxThreadCount(SchedulingStrategy.CRON_DRIVEN, 10);
processScheduler.scheduleFrameworkTask(new CaptureComponentMetrics(), "Capture Component Metrics", componentStatusSnapshotMillis, componentStatusSnapshotMillis, TimeUnit.MILLISECONDS);
controllerServiceProvider = new StandardControllerServiceProvider(processScheduler, bulletinRepository);
controllerServiceProvider = new StandardControllerServiceProvider(processScheduler, bulletinRepository, stateManagerProvider);
}
public void start() throws IOException {
writeLock.lock();
try {
if (isRunning()) {
throw new IllegalStateException("Instance is already started.");
}
@ -539,6 +524,8 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
if (serializedReportingTasks != null && serializedReportingTasks.length > 0) {
loadReportingTasks(serializedReportingTasks);
}
notifyComponentsConfigurationRestored();
} catch (final IOException ioe) {
logger.warn("Failed to initialize cluster services due to: " + ioe, ioe);
stop();
@ -675,6 +662,25 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
}
private void notifyComponentsConfigurationRestored() {
for (final ControllerServiceNode serviceNode : getAllControllerServices()) {
final ControllerService service = serviceNode.getControllerServiceImplementation();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, service);
}
}
for (final ReportingTaskNode taskNode : getAllReportingTasks()) {
final ReportingTask task = taskNode.getReportingTask();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, task);
}
}
}
/**
* Services connection requests. If the data flow management service is unable to provide a current copy of the data flow, then the returned connection response will indicate the node should try
* later. Otherwise, the connection response will contain the the flow and the node identifier.
@ -709,7 +715,14 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
try {
// resolve the proposed node identifier to a valid node identifier
final NodeIdentifier resolvedNodeIdentifier = resolveProposedNodeIdentifier(request.getProposedNodeIdentifier());
final NodeIdentifier resolvedNodeIdentifier;
try {
resolvedNodeIdentifier = resolveProposedNodeIdentifier(request.getProposedNodeIdentifier());
} catch (final ConflictingNodeIdException e) {
logger.info("Rejecting node {} from connecting to cluster because it provided a Node ID of {} but that Node ID already belongs to {}:{}",
request.getProposedNodeIdentifier().getSocketAddress(), request.getProposedNodeIdentifier().getId(), e.getConflictingNodeAddress(), e.getConflictingNodePort());
return ConnectionResponse.createConflictingNodeIdResponse(e.getConflictingNodeAddress() + ":" + e.getConflictingNodePort());
}
if (isBlockedByFirewall(resolvedNodeIdentifier.getSocketAddress())) {
// if the socket address is not listed in the firewall, then return a null response
@ -1093,13 +1106,15 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
final ValidationContextFactory validationContextFactory = new StandardValidationContextFactory(this);
final ReportingTaskNode taskNode = new ClusteredReportingTaskNode(task, id, processScheduler,
new ClusteredEventAccess(this, auditService), bulletinRepository, controllerServiceProvider, validationContextFactory);
new ClusteredEventAccess(this, auditService), bulletinRepository, controllerServiceProvider,
validationContextFactory, stateManagerProvider.getStateManager(id));
taskNode.setName(task.getClass().getSimpleName());
reportingTasks.put(id, taskNode);
if (firstTimeAdded) {
try (final NarCloseable x = NarCloseable.withNarLoader()) {
ReflectionUtils.invokeMethodsWithAnnotation(OnAdded.class, task);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, taskNode.getReportingTask());
} catch (final Exception e) {
throw new ComponentLifeCycleException("Failed to invoke On-Added Lifecycle methods of " + task, e);
}
@ -1351,8 +1366,9 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
private NodeIdentifier addRequestorDn(final NodeIdentifier nodeId, final String dn) {
return new NodeIdentifier(nodeId.getId(), nodeId.getApiAddress(),
nodeId.getApiPort(), nodeId.getSocketAddress(), nodeId.getSocketPort(), dn);
return new NodeIdentifier(nodeId.getId(), nodeId.getApiAddress(), nodeId.getApiPort(),
nodeId.getSocketAddress(), nodeId.getSocketPort(),
nodeId.getSiteToSiteAddress(), nodeId.getSiteToSitePort(), nodeId.isSiteToSiteSecure(), dn);
}
private ConnectionResponseMessage handleConnectionRequest(final ConnectionRequestMessage requestMessage) {
@ -1405,9 +1421,21 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
logRepository.addObserver(StandardProcessorNode.BULLETIN_OBSERVER_ID, LogLevel.WARN,
new ControllerServiceLogObserver(getBulletinRepository(), serviceNode));
if (firstTimeAdded) {
final ControllerService service = serviceNode.getControllerServiceImplementation();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, service);
}
}
return serviceNode;
}
public StateManagerProvider getStateManagerProvider() {
return stateManagerProvider;
}
@Override
public ControllerService getControllerService(String serviceIdentifier) {
return controllerServiceProvider.getControllerService(serviceIdentifier);
@ -1845,6 +1873,7 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
}
private ComponentStatusRepository createComponentStatusRepository() {
final String implementationClassName = properties.getProperty(NiFiProperties.COMPONENT_STATUS_REPOSITORY_IMPLEMENTATION, DEFAULT_COMPONENT_STATUS_REPO_IMPLEMENTATION);
if (implementationClassName == null) {
@ -2404,6 +2433,10 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
return false;
}
private static boolean isProcessorStateEndpoint(final URI uri, final String method) {
return "GET".equalsIgnoreCase(method) && PROCESSOR_STATE_URI_PATTERN.matcher(uri.getPath()).matches();
}
private static boolean isProcessGroupEndpoint(final URI uri, final String method) {
return ("GET".equalsIgnoreCase(method) || "PUT".equalsIgnoreCase(method)) && PROCESS_GROUP_URI_PATTERN.matcher(uri.getPath()).matches();
}
@ -2471,6 +2504,10 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
return false;
}
private static boolean isControllerServiceStateEndpoint(final URI uri, final String method) {
return "GET".equalsIgnoreCase(method) && CONTROLLER_SERVICE_STATE_URI_PATTERN.matcher(uri.getPath()).matches();
}
private static boolean isControllerServiceReferenceEndpoint(final URI uri, final String method) {
if (("GET".equalsIgnoreCase(method) || "PUT".equalsIgnoreCase(method)) && CONTROLLER_SERVICE_REFERENCES_URI_PATTERN.matcher(uri.getPath()).matches()) {
return true;
@ -2493,6 +2530,10 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
return false;
}
private static boolean isReportingTaskStateEndpoint(final URI uri, final String method) {
return "GET".equalsIgnoreCase(method) && REPORTING_TASK_STATE_URI_PATTERN.matcher(uri.getPath()).matches();
}
private static boolean isDropRequestEndpoint(final URI uri, final String method) {
if ("DELETE".equalsIgnoreCase(method) && QUEUE_CONTENTS_URI.matcher(uri.getPath()).matches()) {
return true;
@ -2506,13 +2547,14 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
static boolean isResponseInterpreted(final URI uri, final String method) {
return isProcessorsEndpoint(uri, method) || isProcessorEndpoint(uri, method)
return isProcessorsEndpoint(uri, method) || isProcessorEndpoint(uri, method) || isProcessorStateEndpoint(uri, method)
|| isRemoteProcessGroupsEndpoint(uri, method) || isRemoteProcessGroupEndpoint(uri, method)
|| isProcessGroupEndpoint(uri, method)
|| isTemplateEndpoint(uri, method) || isFlowSnippetEndpoint(uri, method)
|| isProvenanceQueryEndpoint(uri, method) || isProvenanceEventEndpoint(uri, method)
|| isControllerServicesEndpoint(uri, method) || isControllerServiceEndpoint(uri, method) || isControllerServiceReferenceEndpoint(uri, method)
|| isReportingTasksEndpoint(uri, method) || isReportingTaskEndpoint(uri, method)
|| isControllerServicesEndpoint(uri, method) || isControllerServiceEndpoint(uri, method)
|| isControllerServiceReferenceEndpoint(uri, method) || isControllerServiceStateEndpoint(uri, method)
|| isReportingTasksEndpoint(uri, method) || isReportingTaskEndpoint(uri, method) || isReportingTaskStateEndpoint(uri, method)
|| isDropRequestEndpoint(uri, method) || isListFlowFilesEndpoint(uri, method);
}
@ -2531,6 +2573,28 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
processor.setValidationErrors(normalizedMergedValidationErrors(validationErrorMap, processorMap.size()));
}
private void mergeComponentState(final ComponentStateDTO componentState, Map<NodeIdentifier, ComponentStateDTO> componentStateMap) {
final List<StateEntryDTO> localStateEntries = new ArrayList<>();
for (final Map.Entry<NodeIdentifier, ComponentStateDTO> nodeEntry : componentStateMap.entrySet()) {
final ComponentStateDTO nodeComponentState = nodeEntry.getValue();
final NodeIdentifier nodeId = nodeEntry.getKey();
final String nodeAddress = nodeId.getApiAddress() + ":" + nodeId.getApiPort();
final StateMapDTO nodeLocalStateMap = nodeComponentState.getLocalState();
if (nodeLocalStateMap.getState() != null) {
for (final StateEntryDTO nodeStateEntry : nodeLocalStateMap.getState()) {
nodeStateEntry.setClusterNodeId(nodeId.getId());
nodeStateEntry.setClusterNodeAddress(nodeAddress);
localStateEntries.add(nodeStateEntry);
}
}
}
// add all the local state entries
componentState.getLocalState().setState(localStateEntries);
}
private void mergeProvenanceQueryResults(final ProvenanceDTO provenanceDto, final Map<NodeIdentifier, ProvenanceDTO> resultMap, final Set<NodeResponse> problematicResponses) {
final ProvenanceResultsDTO results = provenanceDto.getResults();
final ProvenanceRequestDTO request = provenanceDto.getRequest();
@ -3448,6 +3512,24 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
}
mergeListingRequests(listingRequest, resultsMap);
clientResponse = new NodeResponse(clientResponse, responseEntity);
} else if (hasSuccessfulClientResponse && (isProcessorStateEndpoint(uri, method) || isControllerServiceStateEndpoint(uri, method) || isReportingTaskStateEndpoint(uri, method))) {
final ComponentStateEntity responseEntity = clientResponse.getClientResponse().getEntity(ComponentStateEntity.class);
final ComponentStateDTO componentState = responseEntity.getComponentState();
final Map<NodeIdentifier, ComponentStateDTO> resultsMap = new HashMap<>();
for (final NodeResponse nodeResponse : updatedNodesMap.values()) {
if (problematicNodeResponses.contains(nodeResponse)) {
continue;
}
final ComponentStateEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().getEntity(ComponentStateEntity.class);
final ComponentStateDTO nodeComponentState = nodeResponseEntity.getComponentState();
resultsMap.put(nodeResponse.getNodeId(), nodeComponentState);
}
mergeComponentState(componentState, resultsMap);
clientResponse = new NodeResponse(clientResponse, responseEntity);
} else {
if (!nodeResponsesToDrain.isEmpty()) {
@ -3672,7 +3754,7 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
*
* @return the node identifier that should be used
*/
private NodeIdentifier resolveProposedNodeIdentifier(final NodeIdentifier proposedNodeId) {
private NodeIdentifier resolveProposedNodeIdentifier(final NodeIdentifier proposedNodeId) throws ConflictingNodeIdException {
readLock.lock();
try {
for (final Node node : nodes) {
@ -3688,32 +3770,32 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
// we know about this node and it has the same ID, so the proposal is fine
return proposedNodeId;
} else if (sameId && !sameServiceCoordinates) {
// proposed ID conflicts with existing node ID, so assign a new ID
final NodeIdentifier resolvedIdentifier = new NodeIdentifier(
UUID.randomUUID().toString(),
proposedNodeId.getApiAddress(),
proposedNodeId.getApiPort(),
proposedNodeId.getSocketAddress(),
proposedNodeId.getSocketPort());
logger.info(String.format("Using Node Identifier %s because proposed node identifier %s conflicts existing node identifiers",
resolvedIdentifier, proposedNodeId));
return resolvedIdentifier;
throw new ConflictingNodeIdException(nodeId.getId(), node.getNodeId().getApiAddress(), node.getNodeId().getApiPort());
} else if (!sameId && sameServiceCoordinates) {
// we know about this node, so we'll use the existing ID
logger.debug(String.format("Using Node Identifier %s because proposed node identifier %s matches the service coordinates",
nodeId, proposedNodeId));
return nodeId;
logger.debug(String.format("Using Node Identifier %s because proposed node identifier %s matches the service coordinates", nodeId, proposedNodeId));
// return a new Node Identifier that uses the existing Node UUID, Node Index, and ZooKeeper Port from the existing Node (because these are the
// elements that are assigned by the NCM), but use the other parameters from the proposed identifier, since these elements are determined by
// the node rather than the NCM.
return new NodeIdentifier(nodeId.getId(),
proposedNodeId.getApiAddress(), proposedNodeId.getApiPort(),
proposedNodeId.getSocketAddress(), proposedNodeId.getSocketPort(),
proposedNodeId.getSiteToSiteAddress(), proposedNodeId.getSiteToSitePort(), proposedNodeId.isSiteToSiteSecure());
}
}
// proposal does not conflict with existing nodes
return proposedNodeId;
// proposal does not conflict with existing nodes - this is a new node. Assign a new Node Index to it
return new NodeIdentifier(proposedNodeId.getId(), proposedNodeId.getApiAddress(), proposedNodeId.getApiPort(),
proposedNodeId.getSocketAddress(), proposedNodeId.getSocketPort(),
proposedNodeId.getSiteToSiteAddress(), proposedNodeId.getSiteToSitePort(), proposedNodeId.isSiteToSiteSecure());
} finally {
readLock.unlock("resolveProposedNodeIdentifier");
}
}
private boolean isHeartbeatMonitorRunning() {
readLock.lock();
try {
@ -3907,13 +3989,13 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
continue;
}
final Integer siteToSitePort = heartbeat.getSiteToSitePort();
final Integer siteToSitePort = id.getSiteToSitePort();
if (siteToSitePort == null) {
continue;
}
final int flowFileCount = (int) heartbeat.getTotalFlowFileCount();
final NodeInformation nodeInfo = new NodeInformation(id.getApiAddress(), siteToSitePort, id.getApiPort(),
heartbeat.isSiteToSiteSecure(), flowFileCount);
final NodeInformation nodeInfo = new NodeInformation(id.getSiteToSiteAddress(), siteToSitePort, id.getApiPort(),
id.isSiteToSiteSecure(), flowFileCount);
nodeInfos.add(nodeInfo);
}
@ -4487,4 +4569,41 @@ public class WebClusterManager implements HttpClusterManager, ProtocolHandler, C
public Set<String> getControllerServiceIdentifiers(final Class<? extends ControllerService> serviceType) {
return controllerServiceProvider.getControllerServiceIdentifiers(serviceType);
}
/**
* Captures snapshots of components' metrics
*/
private class CaptureComponentMetrics implements Runnable {
@Override
public void run() {
readLock.lock();
try {
for (final Node node : nodes) {
if (Status.CONNECTED.equals(node.getStatus())) {
ComponentStatusRepository statusRepository = componentMetricsRepositoryMap.get(node.getNodeId());
if (statusRepository == null) {
statusRepository = createComponentStatusRepository();
componentMetricsRepositoryMap.put(node.getNodeId(), statusRepository);
}
// ensure this node has a payload
if (node.getHeartbeat() != null && node.getHeartbeatPayload() != null) {
// if nothing has been captured or the current heartbeat is newer, capture it - comparing the heatbeat created timestamp
// is safe since its marked as XmlTransient so we're assured that its based off the same clock that created the last capture date
if (statusRepository.getLastCaptureDate() == null || node.getHeartbeat().getCreatedTimestamp() > statusRepository.getLastCaptureDate().getTime()) {
statusRepository.capture(node.getHeartbeatPayload().getProcessGroupStatus());
}
}
}
}
} catch (final Throwable t) {
logger.warn("Unable to capture component metrics from Node heartbeats: " + t);
if (logger.isDebugEnabled()) {
logger.warn("", t);
}
} finally {
readLock.unlock("capture component metrics from node heartbeats");
}
}
}
}

View File

@ -22,7 +22,6 @@ import org.apache.nifi.cluster.HeartbeatPayload;
import org.apache.nifi.cluster.protocol.Heartbeat;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import org.apache.nifi.cluster.protocol.ProtocolException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -25,7 +25,6 @@ import org.apache.nifi.io.socket.multicast.DiscoverableService;
import org.apache.nifi.io.socket.multicast.DiscoverableServiceImpl;
import org.apache.nifi.util.FormatUtils;
import org.apache.nifi.util.NiFiProperties;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.FactoryBean;

View File

@ -17,6 +17,7 @@
package org.apache.nifi.cluster.spring;
import java.io.File;
import org.apache.nifi.cluster.firewall.impl.FileBasedClusterNodeFirewall;
import org.apache.nifi.util.NiFiProperties;
import org.springframework.beans.factory.FactoryBean;

View File

@ -17,6 +17,7 @@
package org.apache.nifi.controller.reporting;
import org.apache.nifi.cluster.manager.impl.ClusteredReportingContext;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ProcessScheduler;
import org.apache.nifi.controller.ValidationContextFactory;
import org.apache.nifi.controller.service.ControllerServiceProvider;
@ -30,20 +31,22 @@ public class ClusteredReportingTaskNode extends AbstractReportingTaskNode {
private final EventAccess eventAccess;
private final BulletinRepository bulletinRepository;
private final ControllerServiceProvider serviceProvider;
private final StateManager stateManager;
public ClusteredReportingTaskNode(final ReportingTask reportingTask, final String id, final ProcessScheduler scheduler,
final EventAccess eventAccess, final BulletinRepository bulletinRepository, final ControllerServiceProvider serviceProvider,
final ValidationContextFactory validationContextFactory) {
final ValidationContextFactory validationContextFactory, final StateManager stateManager) {
super(reportingTask, id, serviceProvider, scheduler, validationContextFactory);
this.eventAccess = eventAccess;
this.bulletinRepository = bulletinRepository;
this.serviceProvider = serviceProvider;
this.stateManager = stateManager;
}
@Override
public ReportingContext getReportingContext() {
return new ClusteredReportingContext(eventAccess, bulletinRepository, getProperties(), serviceProvider);
return new ClusteredReportingContext(eventAccess, bulletinRepository, getProperties(), serviceProvider, stateManager);
}
}

View File

@ -16,15 +16,17 @@
*/
package org.apache.nifi.cluster.event.impl;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.nifi.cluster.event.Event;
import org.apache.nifi.cluster.event.Event.Category;
import org.apache.nifi.cluster.event.EventManager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.nifi.cluster.event.Event;
import org.apache.nifi.cluster.event.Event.Category;
import org.apache.nifi.cluster.event.EventManager;
import org.junit.Test;
/**

View File

@ -16,13 +16,15 @@
*/
package org.apache.nifi.cluster.firewall.impl;
import java.io.File;
import java.net.InetAddress;
import java.net.UnknownHostException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;

View File

@ -30,6 +30,7 @@ import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.io.FileUtils;
import org.apache.nifi.cluster.flow.DataFlowDao;
import org.apache.nifi.cluster.flow.PersistedFlowState;
import org.apache.nifi.cluster.protocol.ClusterManagerProtocolSender;
@ -46,8 +47,6 @@ import org.apache.nifi.cluster.protocol.message.FlowResponseMessage;
import org.apache.nifi.cluster.protocol.message.ProtocolMessage;
import org.apache.nifi.io.socket.ServerSocketConfiguration;
import org.apache.nifi.io.socket.SocketConfiguration;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -148,7 +147,7 @@ public class DataFlowManagementServiceImplTest {
byte[] flowBytes = flowStr.getBytes();
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
service.setNodeIds(new HashSet<>(Arrays.asList(nodeId)));
service.setPersistedFlowState(PersistedFlowState.STALE);
@ -168,8 +167,8 @@ public class DataFlowManagementServiceImplTest {
String flowStr = "<rootGroup />";
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
service.setNodeIds(new HashSet<>(Arrays.asList(nodeId1, nodeId2)));
service.setPersistedFlowState(PersistedFlowState.STALE);
@ -196,8 +195,8 @@ public class DataFlowManagementServiceImplTest {
byte[] flowBytes = flowStr.getBytes();
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1, "localhost", 1234, false);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
service.setNodeIds(new HashSet<>(Arrays.asList(nodeId1, nodeId2)));
service.setPersistedFlowState(PersistedFlowState.STALE);
@ -217,8 +216,8 @@ public class DataFlowManagementServiceImplTest {
byte[] flowBytes = flowStr.getBytes();
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1, "localhost", 1234, false);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
for (int i = 0; i < 1000; i++) {
service.setNodeIds(new HashSet<>(Arrays.asList(nodeId1, nodeId2)));
@ -239,8 +238,8 @@ public class DataFlowManagementServiceImplTest {
String flowStr = "<rootGroup />";
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1, "localhost", 1234, false);
NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
service.setRetrievalDelay("5 sec");
for (int i = 0; i < 1000; i++) {
@ -263,9 +262,9 @@ public class DataFlowManagementServiceImplTest {
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
Set<NodeIdentifier> nodeIds = new HashSet<>();
for (int i = 0; i < 1000; i++) {
nodeIds.add(new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1));
nodeIds.add(new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort + 1, "localhost", 1234, false));
}
nodeIds.add(new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort));
nodeIds.add(new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false));
long lastRetrievalTime = service.getLastRetrievalTime();
@ -291,7 +290,7 @@ public class DataFlowManagementServiceImplTest {
String flowStr = "<rootGroup />";
byte[] flowBytes = flowStr.getBytes();
listener.addHandler(new FlowRequestProtocolHandler(new StandardDataFlow(flowStr.getBytes("UTF-8"), new byte[0], new byte[0])));
NodeIdentifier nodeId = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort);
NodeIdentifier nodeId = new NodeIdentifier("1", "localhost", apiDummyPort, "localhost", socketPort, "localhost", 1234, false);
service.setNodeIds(new HashSet<>(Arrays.asList(nodeId)));
service.setPersistedFlowState(PersistedFlowState.STALE);

View File

@ -16,36 +16,40 @@
*/
package org.apache.nifi.cluster.manager.impl;
import javax.ws.rs.core.Response;
import javax.xml.bind.annotation.XmlRootElement;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MultivaluedMap;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Iterator;
import javax.ws.rs.core.StreamingOutput;
import org.apache.nifi.cluster.manager.testutils.HttpResponse;
import org.apache.nifi.cluster.manager.testutils.HttpServer;
import com.sun.jersey.api.client.Client;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.core.Response.Status;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.nifi.cluster.manager.testutils.HttpResponseAction;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.StreamingOutput;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.apache.nifi.cluster.manager.testutils.HttpResponse;
import org.apache.nifi.cluster.manager.testutils.HttpResponseAction;
import org.apache.nifi.cluster.manager.testutils.HttpServer;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.sun.jersey.api.client.Client;
/**
*/
public class HttpRequestReplicatorImplTest {
@ -341,7 +345,7 @@ public class HttpRequestReplicatorImplTest {
private Set<NodeIdentifier> createNodes(int num, String host, int apiPort) {
Set<NodeIdentifier> result = new HashSet<>();
for (int i = 0; i < num; i++) {
result.add(new NodeIdentifier(String.valueOf(i), host, apiPort, host, 1));
result.add(new NodeIdentifier(String.valueOf(i), host, apiPort, host, 1, "localhost", 1234, false));
}
return result;
}

View File

@ -16,23 +16,26 @@
*/
package org.apache.nifi.cluster.manager.impl;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.core.util.MultivaluedMapImpl;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.util.Map;
import java.util.HashSet;
import java.util.Set;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.nifi.cluster.manager.NodeResponse;
import org.apache.nifi.cluster.node.Node;
import org.apache.nifi.cluster.node.Node.Status;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.core.util.MultivaluedMapImpl;
/**
*/
@ -119,7 +122,7 @@ public class HttpResponseMapperImplTest {
when(clientResponse.getHeaders()).thenReturn(new MultivaluedMapImpl());
when(clientResponse.getEntityInputStream()).thenReturn(new ByteArrayInputStream(new byte[0]));
NodeIdentifier nodeIdentifier = new NodeIdentifier(nodeId, "localhost", 1, "localhost", 1);
NodeIdentifier nodeIdentifier = new NodeIdentifier(nodeId, "localhost", 1, "localhost", 1, "localhost", 1234, false);
return new NodeResponse(nodeIdentifier, "GET", dummyUri, clientResponse, 1L, "111");
}
}

View File

@ -16,8 +16,6 @@
*/
package org.apache.nifi.cluster.manager.impl;
import org.apache.nifi.cluster.manager.impl.WebClusterManager;
import static org.junit.Assert.assertEquals;
import java.text.DateFormat;

View File

@ -25,8 +25,10 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MediaType;
import org.apache.commons.lang3.StringUtils;
/**

View File

@ -19,6 +19,7 @@ package org.apache.nifi.cluster.manager.testutils;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.core.Response.Status;
/**

View File

@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.nifi.cluster.manager.testutils.HttpRequest.HttpRequestBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -0,0 +1,60 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.components.state;
/**
* <p>
* Interface that provides a mechanism for obtaining the {@link StateManager} for a particular component
* </p>
*/
public interface StateManagerProvider {
/**
* Returns the StateManager for the component with the given ID, or <code>null</code> if no State Manager
* exists for the component with the given ID
*
* @param componentId the id of the component for which the StateManager should be returned
*
* @return the StateManager for the component with the given ID, or <code>null</code> if no State Manager
* exists for the component with the given ID
*/
StateManager getStateManager(String componentId);
/**
* Notifies the State Manager Provider that the component with the given ID has been removed from the NiFi instance
* and will no longer be needed, so the appropriate resource cleanup can take place.
*
* @param componentId the ID of the component that has been removed
*/
void onComponentRemoved(String componentId);
/**
* Shuts down the state managers, cleaning up any resources that they occupy
*/
void shutdown();
/**
* Initializes the Cluster State Provider and enables it for use
*/
void enableClusterProvider();
/**
* Disables the Cluster State Provider and begins using the Local State Provider to persist and retrieve
* state, even when components request a clustered provider
*/
void disableClusterProvider();
}

View File

@ -270,5 +270,7 @@ public interface Connectable extends Triggerable {
void verifyCanDisable() throws IllegalStateException;
void verifyCanClearState() throws IllegalStateException;
SchedulingStrategy getSchedulingStrategy();
}

View File

@ -117,7 +117,7 @@ public abstract class AbstractConfiguredComponent implements ConfigurableCompone
try {
component.onPropertyModified(descriptor, oldValue, value);
} catch (final Throwable t) {
} catch (final Exception e) {
// nothing really to do here...
}
}
@ -161,7 +161,12 @@ public abstract class AbstractConfiguredComponent implements ConfigurableCompone
}
}
component.onPropertyModified(descriptor, value, null);
try {
component.onPropertyModified(descriptor, value, null);
} catch (final Exception e) {
// nothing really to do here...
}
return true;
}
}

View File

@ -605,4 +605,8 @@ public abstract class AbstractPort implements Port {
throw new IllegalStateException(this + " has " + threadCount + " threads still active");
}
}
@Override
public void verifyCanClearState() {
}
}

View File

@ -34,6 +34,12 @@ public interface ConfiguredComponent {
public void setAnnotationData(String data);
/**
* Sets the property with the given name to the given value
*
* @param name the name of the property to update
* @param value the value to update the property to
*/
public void setProperty(String name, String value);
/**

View File

@ -98,4 +98,6 @@ public interface ReportingTaskNode extends ConfiguredComponent {
void verifyCanDelete();
void verifyCanUpdate();
void verifyCanClearState();
}

View File

@ -531,6 +531,10 @@ public class StandardFunnel implements Funnel {
public void verifyCanDisable() {
}
@Override
public void verifyCanClearState() {
}
@Override
public SchedulingStrategy getSchedulingStrategy() {
return SchedulingStrategy.TIMER_DRIVEN;

View File

@ -134,6 +134,8 @@ public interface ControllerServiceNode extends ConfiguredComponent {
void verifyCanUpdate();
void verifyCanClearState();
/**
* Returns 'true' if this service is active. The service is considered to be
* active if and only if it's

View File

@ -64,4 +64,16 @@ public interface LogRepository {
* Removes all LogObservers from this Repository
*/
void removeAllObservers();
/**
* Sets the current logger for the component
*
* @param logger the logger to use
*/
void setLogger(ComponentLog logger);
/**
* @return the current logger for the component
*/
ComponentLog getLogger();
}

View File

@ -119,6 +119,27 @@
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-write-ahead-log</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-processor-utils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.nifi</groupId>
<artifactId>nifi-mock</artifactId>

View File

@ -56,8 +56,6 @@ public class HeartbeatPayload {
private long totalFlowFileCount;
private long totalFlowFileBytes;
private SystemDiagnostics systemDiagnostics;
private Integer siteToSitePort;
private boolean siteToSiteSecure;
private long systemStartTime;
@XmlJavaTypeAdapter(CounterAdapter.class)
@ -109,22 +107,6 @@ public class HeartbeatPayload {
this.systemDiagnostics = systemDiagnostics;
}
public boolean isSiteToSiteSecure() {
return siteToSiteSecure;
}
public void setSiteToSiteSecure(final boolean secure) {
this.siteToSiteSecure = secure;
}
public Integer getSiteToSitePort() {
return siteToSitePort;
}
public void setSiteToSitePort(final Integer port) {
this.siteToSitePort = port;
}
public long getSystemStartTime() {
return systemStartTime;
}

View File

@ -22,6 +22,7 @@ import org.apache.nifi.action.Action;
import org.apache.nifi.admin.service.AuditService;
import org.apache.nifi.admin.service.UserService;
import org.apache.nifi.annotation.lifecycle.OnAdded;
import org.apache.nifi.annotation.lifecycle.OnConfigurationRestored;
import org.apache.nifi.annotation.lifecycle.OnRemoved;
import org.apache.nifi.annotation.lifecycle.OnShutdown;
import org.apache.nifi.annotation.notification.OnPrimaryNodeStateChange;
@ -37,6 +38,7 @@ import org.apache.nifi.cluster.protocol.UnknownServiceAddressException;
import org.apache.nifi.cluster.protocol.message.HeartbeatMessage;
import org.apache.nifi.cluster.protocol.message.NodeBulletinsMessage;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.state.StateManagerProvider;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.connectable.ConnectableType;
import org.apache.nifi.connectable.Connection;
@ -88,6 +90,8 @@ import org.apache.nifi.controller.service.ControllerServiceNode;
import org.apache.nifi.controller.service.ControllerServiceProvider;
import org.apache.nifi.controller.service.StandardConfigurationContext;
import org.apache.nifi.controller.service.StandardControllerServiceProvider;
import org.apache.nifi.controller.state.manager.StandardStateManagerProvider;
import org.apache.nifi.controller.state.server.ZooKeeperStateServer;
import org.apache.nifi.controller.status.ConnectionStatus;
import org.apache.nifi.controller.status.PortStatus;
import org.apache.nifi.controller.status.ProcessGroupStatus;
@ -173,6 +177,7 @@ import org.apache.nifi.web.api.dto.RemoteProcessGroupDTO;
import org.apache.nifi.web.api.dto.RemoteProcessGroupPortDTO;
import org.apache.nifi.web.api.dto.TemplateDTO;
import org.apache.nifi.web.api.dto.status.StatusHistoryDTO;
import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -205,6 +210,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static java.util.Objects.requireNonNull;
@ -251,9 +257,12 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
private final AuditService auditService;
private final EventDrivenWorkerQueue eventDrivenWorkerQueue;
private final ComponentStatusRepository componentStatusRepository;
private final StateManagerProvider stateManagerProvider;
private final long systemStartTime = System.currentTimeMillis(); // time at which the node was started
private final ConcurrentMap<String, ReportingTaskNode> reportingTasks = new ConcurrentHashMap<>();
private volatile ZooKeeperStateServer zooKeeperStateServer;
// The Heartbeat Bean is used to provide an Atomic Reference to data that is used in heartbeats that may
// change while the instance is running. We do this because we want to generate heartbeats even if we
// are unable to obtain a read lock on the entire FlowController.
@ -419,13 +428,19 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
throw new RuntimeException("Unable to create Provenance Repository", e);
}
processScheduler = new StandardProcessScheduler(this, this, encryptor);
try {
this.stateManagerProvider = StandardStateManagerProvider.create(properties);
} catch (final IOException e) {
throw new RuntimeException(e);
}
processScheduler = new StandardProcessScheduler(this, this, encryptor, stateManagerProvider);
eventDrivenWorkerQueue = new EventDrivenWorkerQueue(false, false, processScheduler);
controllerServiceProvider = new StandardControllerServiceProvider(processScheduler, bulletinRepository);
controllerServiceProvider = new StandardControllerServiceProvider(processScheduler, bulletinRepository, stateManagerProvider);
final ProcessContextFactory contextFactory = new ProcessContextFactory(contentRepository, flowFileRepository, flowFileEventRepository, counterRepositoryRef.get(), provenanceEventRepository);
processScheduler.setSchedulingAgent(SchedulingStrategy.EVENT_DRIVEN, new EventDrivenSchedulingAgent(
eventDrivenEngineRef.get(), this, eventDrivenWorkerQueue, contextFactory, maxEventDrivenThreads.get(), encryptor));
eventDrivenEngineRef.get(), this, stateManagerProvider, eventDrivenWorkerQueue, contextFactory, maxEventDrivenThreads.get(), encryptor));
final QuartzSchedulingAgent quartzSchedulingAgent = new QuartzSchedulingAgent(this, timerDrivenEngineRef.get(), contextFactory, encryptor);
final TimerDrivenSchedulingAgent timerDrivenAgent = new TimerDrivenSchedulingAgent(this, timerDrivenEngineRef.get(), contextFactory, encryptor);
@ -469,7 +484,7 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
this.snippetManager = new SnippetManager();
rootGroup = new StandardProcessGroup(UUID.randomUUID().toString(), this, processScheduler, properties, encryptor);
rootGroup = new StandardProcessGroup(UUID.randomUUID().toString(), this, processScheduler, properties, encryptor, this);
rootGroup.setName(DEFAULT_ROOT_GROUP_NAME);
instanceId = UUID.randomUUID().toString();
@ -496,6 +511,17 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
snapshotMillis = FormatUtils.getTimeDuration(NiFiProperties.DEFAULT_COMPONENT_STATUS_SNAPSHOT_FREQUENCY, TimeUnit.MILLISECONDS);
}
// Initialize the Embedded ZooKeeper server, if applicable
if (properties.isStartEmbeddedZooKeeper()) {
try {
zooKeeperStateServer = ZooKeeperStateServer.create(properties);
} catch (final IOException | ConfigException e) {
throw new IllegalStateException("Unable to initailize Flow because NiFi was configured to start an Embedded Zookeeper server but failed to do so", e);
}
} else {
zooKeeperStateServer = null;
}
componentStatusRepository = createComponentStatusRepository();
timerDrivenEngineRef.get().scheduleWithFixedDelay(new Runnable() {
@Override
@ -582,6 +608,8 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
externalSiteListener.start();
}
notifyComponentsConfigurationRestored();
timerDrivenEngineRef.get().scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
@ -602,6 +630,31 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
}
}
private void notifyComponentsConfigurationRestored() {
for (final ProcessorNode procNode : getGroup(getRootGroupId()).findAllProcessors()) {
final Processor processor = procNode.getProcessor();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, processor);
}
}
for (final ControllerServiceNode serviceNode : getAllControllerServices()) {
final ControllerService service = serviceNode.getControllerServiceImplementation();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, service);
}
}
for (final ReportingTaskNode taskNode : getAllReportingTasks()) {
final ReportingTask task = taskNode.getReportingTask();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, task);
}
}
}
/**
* <p>
* Causes any processors that were added to the flow with a 'delayStart' flag of true to now start
@ -836,7 +889,7 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
* @throws NullPointerException if the argument is null
*/
public ProcessGroup createProcessGroup(final String id) {
return new StandardProcessGroup(requireNonNull(id).intern(), this, processScheduler, properties, encryptor);
return new StandardProcessGroup(requireNonNull(id).intern(), this, processScheduler, properties, encryptor, this);
}
/**
@ -883,6 +936,12 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
logRepository.removeObserver(StandardProcessorNode.BULLETIN_OBSERVER_ID);
throw new ComponentLifeCycleException("Failed to invoke @OnAdded methods of " + procNode.getProcessor(), e);
}
if (firstTimeAdded) {
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, procNode.getProcessor());
}
}
}
return procNode;
@ -909,6 +968,8 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
final ProcessorLog processorLogger = new SimpleProcessLogger(identifier, processor);
final ProcessorInitializationContext ctx = new StandardProcessorInitializationContext(identifier, processorLogger, this);
processor.initialize(ctx);
LogRepositoryFactory.getRepository(identifier).setLogger(processorLogger);
return processor;
} catch (final Throwable t) {
throw new ProcessorInstantiationException(type, t);
@ -946,6 +1007,10 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
return snippetManager;
}
public StateManagerProvider getStateManagerProvider() {
return stateManagerProvider;
}
/**
* Creates a Port to use as an Input Port for the root Process Group, which is used for Site-to-Site communications
*
@ -1099,24 +1164,25 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
} else {
this.timerDrivenEngineRef.get().shutdown();
this.eventDrivenEngineRef.get().shutdown();
LOG.info("Initiated graceful shutdown of flow controller...waiting up to " + gracefulShutdownSeconds
+ " seconds");
LOG.info("Initiated graceful shutdown of flow controller...waiting up to " + gracefulShutdownSeconds + " seconds");
}
clusterTaskExecutor.shutdown();
// Trigger any processors' methods marked with @OnShutdown to be
// called
if (zooKeeperStateServer != null) {
zooKeeperStateServer.shutdown();
}
// Trigger any processors' methods marked with @OnShutdown to be called
rootGroup.shutdown();
// invoke any methods annotated with @OnShutdown on Controller
// Services
stateManagerProvider.shutdown();
// invoke any methods annotated with @OnShutdown on Controller Services
for (final ControllerServiceNode serviceNode : getAllControllerServices()) {
try (final NarCloseable narCloseable = NarCloseable.withNarLoader()) {
final ConfigurationContext configContext = new StandardConfigurationContext(serviceNode,
controllerServiceProvider, null);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class,
serviceNode.getControllerServiceImplementation(), configContext);
final ConfigurationContext configContext = new StandardConfigurationContext(serviceNode, controllerServiceProvider, null);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, serviceNode.getControllerServiceImplementation(), configContext);
}
}
@ -1124,8 +1190,7 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
for (final ReportingTaskNode taskNode : getAllReportingTasks()) {
final ConfigurationContext configContext = taskNode.getConfigurationContext();
try (final NarCloseable narCloseable = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, taskNode.getReportingTask(),
configContext);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, taskNode.getReportingTask(), configContext);
}
}
@ -1139,14 +1204,14 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
try {
flowFileRepository.close();
} catch (final Throwable t) {
LOG.warn("Unable to shut down FlowFileRepository due to {}", new Object[] { t });
LOG.warn("Unable to shut down FlowFileRepository due to {}", new Object[] {t});
}
if (this.timerDrivenEngineRef.get().isTerminated() && eventDrivenEngineRef.get().isTerminated()) {
LOG.info("Controller has been terminated successfully.");
} else {
LOG.warn("Controller hasn't terminated properly. There exists an uninterruptable thread that "
+ "will take an indeterminate amount of time to stop. Might need to kill the program manually.");
+ "will take an indeterminate amount of time to stop. Might need to kill the program manually.");
}
if (externalSiteListener != null) {
@ -1174,7 +1239,6 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
} finally {
readLock.unlock();
}
}
/**
@ -2605,6 +2669,7 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
try (final NarCloseable x = NarCloseable.withNarLoader()) {
ReflectionUtils.invokeMethodsWithAnnotation(OnAdded.class, task);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, taskNode.getReportingTask());
} catch (final Exception e) {
throw new ComponentLifeCycleException("Failed to invoke On-Added Lifecycle methods of " + task, e);
}
@ -2688,6 +2753,14 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
logRepository.addObserver(StandardProcessorNode.BULLETIN_OBSERVER_ID, LogLevel.WARN,
new ControllerServiceLogObserver(getBulletinRepository(), serviceNode));
if (firstTimeAdded) {
final ControllerService service = serviceNode.getControllerServiceImplementation();
try (final NarCloseable nc = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnConfigurationRestored.class, service);
}
}
return serviceNode;
}
@ -3054,8 +3127,49 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
if (clustered) {
nodeBulletinSubscriber.set(new NodeBulletinProcessingStrategy());
bulletinRepository.overrideDefaultBulletinProcessing(nodeBulletinSubscriber.get());
stateManagerProvider.enableClusterProvider();
if (zooKeeperStateServer != null) {
processScheduler.submitFrameworkTask(new Runnable() {
@Override
public void run() {
try {
zooKeeperStateServer.start();
} catch (final Exception e) {
LOG.error("NiFi was connected to the cluster but failed to start embedded ZooKeeper Server", e);
final Bulletin bulletin = BulletinFactory.createBulletin("Embedded ZooKeeper Server", Severity.ERROR.name(),
"Unable to started embedded ZooKeeper Server. See logs for more details. Will continue trying to start embedded server.");
getBulletinRepository().addBulletin(bulletin);
// We failed to start the server. Wait a bit and try again.
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(5));
} catch (final InterruptedException ie) {
// If we are interrupted, stop trying.
Thread.currentThread().interrupt();
return;
}
processScheduler.submitFrameworkTask(this);
}
}
});
// Give the server just a bit to start up, so that we don't get connection
// failures on startup if we are using the embedded ZooKeeper server. We need to launch
// the ZooKeeper Server in the background because ZooKeeper blocks indefinitely when we start
// the server. Unfortunately, we have no way to know when it's up & ready. So we wait 1 second.
// We could still get connection failures if we are on a slow machine but this at least makes it far
// less likely. If we do get connection failures, we will still reconnect, but we will get bulletins
// showing failures. This 1-second sleep is an attempt to at least make that occurrence rare.
LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1L));
}
} else {
bulletinRepository.restoreDefaultBulletinProcessing();
if (zooKeeperStateServer != null) {
zooKeeperStateServer.shutdown();
}
stateManagerProvider.disableClusterProvider();
}
final List<RemoteProcessGroup> remoteGroups = getGroup(getRootGroupId()).findAllRemoteProcessGroups();
@ -3698,8 +3812,6 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R
hbPayload.setCounters(getCounters());
hbPayload.setSystemDiagnostics(getSystemDiagnostics());
hbPayload.setProcessGroupStatus(procGroupStatus);
hbPayload.setSiteToSitePort(remoteInputSocketPort);
hbPayload.setSiteToSiteSecure(isSiteToSiteSecure);
// create heartbeat message
final Heartbeat heartbeat = new Heartbeat(getNodeId(), bean.isPrimary(), bean.isConnected(), hbPayload.marshal());

View File

@ -26,7 +26,9 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@ -37,6 +39,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.cluster.ConnectionException;
import org.apache.nifi.cluster.protocol.ConnectionRequest;
import org.apache.nifi.cluster.protocol.ConnectionResponse;
@ -57,9 +60,11 @@ import org.apache.nifi.cluster.protocol.message.ProtocolMessage;
import org.apache.nifi.cluster.protocol.message.ReconnectionFailureMessage;
import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage;
import org.apache.nifi.cluster.protocol.message.ReconnectionResponseMessage;
import org.apache.nifi.components.state.Scope;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.encrypt.StringEncryptor;
import org.apache.nifi.engine.FlowEngine;
import org.apache.nifi.events.BulletinFactory;
import org.apache.nifi.util.file.FileUtils;
import org.apache.nifi.groups.ProcessGroup;
import org.apache.nifi.lifecycle.LifeCycleStartException;
import org.apache.nifi.logging.LogLevel;
@ -69,15 +74,17 @@ import org.apache.nifi.reporting.Bulletin;
import org.apache.nifi.services.FlowService;
import org.apache.nifi.util.FormatUtils;
import org.apache.nifi.util.NiFiProperties;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.encrypt.StringEncryptor;
import org.apache.nifi.util.file.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StandardFlowService implements FlowService, ProtocolHandler {
private static final String EVENT_CATEGORY = "Controller";
private static final String CLUSTER_NODE_CONFIG = "Cluster Node Configuration";
// state keys
private static final String NODE_UUID = "Node UUID";
private final FlowController controller;
private final Path flowXml;
@ -169,8 +176,21 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
final InetSocketAddress nodeApiAddress = properties.getNodeApiAddress();
final InetSocketAddress nodeSocketAddress = properties.getClusterNodeProtocolAddress();
String nodeUuid = null;
final StateManager stateManager = controller.getStateManagerProvider().getStateManager(CLUSTER_NODE_CONFIG);
if (stateManager != null) {
nodeUuid = stateManager.getState(Scope.LOCAL).get(NODE_UUID);
}
if (nodeUuid == null) {
nodeUuid = UUID.randomUUID().toString();
}
// use a random UUID as the proposed node identifier
this.nodeId = new NodeIdentifier(UUID.randomUUID().toString(), nodeApiAddress.getHostName(), nodeApiAddress.getPort(), nodeSocketAddress.getHostName(), nodeSocketAddress.getPort());
this.nodeId = new NodeIdentifier(nodeUuid,
nodeApiAddress.getHostName(), nodeApiAddress.getPort(),
nodeSocketAddress.getHostName(), nodeSocketAddress.getPort(),
properties.getRemoteInputHost(), properties.getRemoteInputPort(), properties.isSiteToSiteSecure());
} else {
this.configuredForClustering = false;
@ -179,6 +199,7 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
}
@Override
public void saveFlowChanges() throws IOException {
writeLock.lock();
@ -507,7 +528,7 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
// reconnect
final ConnectionResponse connectionResponse = new ConnectionResponse(nodeId, request.getDataFlow(), request.isPrimary(),
request.getManagerRemoteSiteListeningPort(), request.isManagerRemoteSiteCommsSecure(), request.getInstanceId());
request.getManagerRemoteSiteListeningPort(), request.isManagerRemoteSiteCommsSecure(), request.getInstanceId());
connectionResponse.setClusterManagerDN(request.getRequestorDN());
loadFromConnectionResponse(connectionResponse);
@ -616,8 +637,6 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
if (firstControllerInitialization) {
logger.debug("First controller initialization. Loading reporting tasks and initializing controller.");
// load the controller tasks
// dao.loadReportingTasks(controller);
// initialize the flow
controller.initializeFlow();
@ -650,8 +669,8 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
for (int i = 0; i < maxAttempts || retryIndefinitely; i++) {
try {
response = senderListener.requestConnection(requestMsg).getConnectionResponse();
if (response.isBlockedByFirewall()) {
logger.warn("Connection request was blocked by cluster manager's firewall.");
if (response.getRejectionReason() != null) {
logger.warn("Connection request was blocked by cluster manager with the explanation: " + response.getRejectionReason());
// set response to null and treat a firewall blockage the same as getting no response from manager
response = null;
break;
@ -667,7 +686,6 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
// we received a successful connection response from manager
break;
}
} catch (final Exception pe) {
// could not create a socket and communicate with manager
logger.warn("Failed to connect to cluster due to: " + pe, pe);
@ -691,6 +709,16 @@ public class StandardFlowService implements FlowService, ProtocolHandler {
return null;
} else {
// cluster manager provided a successful response with a current dataflow
// persist node uuid and index returned by NCM and return the response to the caller
try {
// Ensure that we have registered our 'cluster node configuration' state key
final Map<String, String> map = Collections.singletonMap(NODE_UUID, response.getNodeIdentifier().getId());
controller.getStateManagerProvider().getStateManager(CLUSTER_NODE_CONFIG).setState(map, Scope.LOCAL);
} catch (final IOException ioe) {
logger.warn("Received successful response from Cluster Manager but failed to persist state about the Node's Unique Identifier and the Node's Index. "
+ "This node may be assigned a different UUID when the node is restarted.", ioe);
}
return response;
}
} finally {

View File

@ -171,7 +171,7 @@ public class StandardFlowSynchronizer implements FlowSynchronizer {
controller.setMaxEventDrivenThreadCount(maxThreadCount / 3);
}
final Element reportingTasksElement = (Element) DomUtils.getChild(rootElement, "reportingTasks");
final Element reportingTasksElement = DomUtils.getChild(rootElement, "reportingTasks");
final List<Element> taskElements;
if (reportingTasksElement == null) {
taskElements = Collections.emptyList();
@ -179,7 +179,7 @@ public class StandardFlowSynchronizer implements FlowSynchronizer {
taskElements = DomUtils.getChildElementsByTagName(reportingTasksElement, "reportingTask");
}
final Element controllerServicesElement = (Element) DomUtils.getChild(rootElement, "controllerServices");
final Element controllerServicesElement = DomUtils.getChild(rootElement, "controllerServices");
final List<Element> controllerServiceElements;
if (controllerServicesElement == null) {
controllerServiceElements = Collections.emptyList();
@ -252,7 +252,7 @@ public class StandardFlowSynchronizer implements FlowSynchronizer {
// get the root group XML element
final Element rootGroupElement = (Element) rootElement.getElementsByTagName("rootGroup").item(0);
final Element controllerServicesElement = (Element) DomUtils.getChild(rootElement, "controllerServices");
final Element controllerServicesElement = DomUtils.getChild(rootElement, "controllerServices");
if (controllerServicesElement != null) {
final List<Element> serviceElements = DomUtils.getChildElementsByTagName(controllerServicesElement, "controllerService");
@ -274,7 +274,7 @@ public class StandardFlowSynchronizer implements FlowSynchronizer {
updateProcessGroup(controller, /* parent group */ null, rootGroupElement, encryptor);
}
final Element reportingTasksElement = (Element) DomUtils.getChild(rootElement, "reportingTasks");
final Element reportingTasksElement = DomUtils.getChild(rootElement, "reportingTasks");
if (reportingTasksElement != null) {
final List<Element> taskElements = DomUtils.getChildElementsByTagName(reportingTasksElement, "reportingTask");
for (final Element taskElement : taskElements) {

View File

@ -1303,6 +1303,11 @@ public class StandardProcessorNode extends ProcessorNode implements Connectable
}
}
@Override
public void verifyCanClearState() throws IllegalStateException {
verifyCanUpdate();
}
private void verifyNoActiveThreads() throws IllegalStateException {
final int threadCount = processScheduler.getActiveThreadCount(this);
if (threadCount > 0) {

View File

@ -217,6 +217,11 @@ public abstract class AbstractReportingTaskNode extends AbstractConfiguredCompon
}
}
@Override
public void verifyCanClearState() {
verifyCanUpdate();
}
@Override
public void verifyCanStart(final Set<ControllerServiceNode> ignoredReferences) {
switch (getScheduledState()) {

View File

@ -26,6 +26,7 @@ import org.apache.nifi.attribute.expression.language.Query;
import org.apache.nifi.attribute.expression.language.StandardPropertyValue;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceLookup;
@ -37,24 +38,27 @@ import org.apache.nifi.reporting.Bulletin;
import org.apache.nifi.reporting.BulletinRepository;
import org.apache.nifi.reporting.EventAccess;
import org.apache.nifi.reporting.ReportingContext;
import org.apache.nifi.reporting.ReportingTask;
import org.apache.nifi.reporting.Severity;
public class StandardReportingContext implements ReportingContext, ControllerServiceLookup {
private final FlowController flowController;
private final EventAccess eventAccess;
private final ReportingTask reportingTask;
private final BulletinRepository bulletinRepository;
private final ControllerServiceProvider serviceProvider;
private final Map<PropertyDescriptor, String> properties;
private final Map<PropertyDescriptor, PreparedQuery> preparedQueries;
public StandardReportingContext(final FlowController flowController, final BulletinRepository bulletinRepository,
final Map<PropertyDescriptor, String> properties, final ControllerServiceProvider serviceProvider) {
final Map<PropertyDescriptor, String> properties, final ControllerServiceProvider serviceProvider, final ReportingTask reportingTask) {
this.flowController = flowController;
this.eventAccess = flowController;
this.bulletinRepository = bulletinRepository;
this.properties = Collections.unmodifiableMap(properties);
this.serviceProvider = serviceProvider;
this.reportingTask = reportingTask;
preparedQueries = new HashMap<>();
for (final Map.Entry<PropertyDescriptor, String> entry : properties.entrySet()) {
@ -140,4 +144,8 @@ public class StandardReportingContext implements ReportingContext, ControllerSer
return serviceProvider.getControllerServiceName(serviceIdentifier);
}
@Override
public StateManager getStateManager() {
return flowController.getStateManagerProvider().getStateManager(reportingTask.getIdentifier());
}
}

View File

@ -35,6 +35,6 @@ public class StandardReportingTaskNode extends AbstractReportingTaskNode impleme
@Override
public ReportingContext getReportingContext() {
return new StandardReportingContext(flowController, flowController.getBulletinRepository(), getProperties(), flowController);
return new StandardReportingContext(flowController, flowController.getBulletinRepository(), getProperties(), flowController, getReportingTask());
}
}

View File

@ -49,8 +49,8 @@ public class BatchingSessionFactory implements ProcessSessionFactory {
return highThroughputSession;
}
private class HighThroughputSession implements ProcessSession {
private class HighThroughputSession implements ProcessSession {
private final StandardProcessSession session;
public HighThroughputSession(final StandardProcessSession session) {
@ -241,7 +241,5 @@ public class BatchingSessionFactory implements ProcessSessionFactory {
public ProvenanceReporter getProvenanceReporter() {
return session.getProvenanceReporter();
}
}
}

View File

@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.PropertyValue;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.connectable.Connection;
import org.apache.nifi.controller.ControllerService;
@ -46,10 +47,12 @@ public class ConnectableProcessContext implements ProcessContext {
private final Connectable connectable;
private final StringEncryptor encryptor;
private final StateManager stateManager;
public ConnectableProcessContext(final Connectable connectable, final StringEncryptor encryptor) {
public ConnectableProcessContext(final Connectable connectable, final StringEncryptor encryptor, final StateManager stateManager) {
this.connectable = connectable;
this.encryptor = encryptor;
this.stateManager = stateManager;
}
@Override
@ -235,4 +238,9 @@ public class ConnectableProcessContext implements ProcessContext {
public boolean isExpressionLanguagePresent(PropertyDescriptor property) {
return false;
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

View File

@ -24,6 +24,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.nifi.annotation.lifecycle.OnStopped;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.components.state.StateManagerProvider;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.controller.EventBasedWorker;
import org.apache.nifi.controller.EventDrivenWorkerQueue;
@ -54,7 +56,8 @@ public class EventDrivenSchedulingAgent implements SchedulingAgent {
private static final Logger logger = LoggerFactory.getLogger(EventDrivenSchedulingAgent.class);
private final FlowEngine flowEngine;
private final ControllerServiceProvider controllerServiceProvider;
private final ControllerServiceProvider serviceProvider;
private final StateManagerProvider stateManagerProvider;
private final EventDrivenWorkerQueue workerQueue;
private final ProcessContextFactory contextFactory;
private final AtomicInteger maxThreadCount;
@ -65,10 +68,11 @@ public class EventDrivenSchedulingAgent implements SchedulingAgent {
private final ConcurrentMap<Connectable, AtomicLong> connectionIndexMap = new ConcurrentHashMap<>();
private final ConcurrentMap<Connectable, ScheduleState> scheduleStates = new ConcurrentHashMap<>();
public EventDrivenSchedulingAgent(final FlowEngine flowEngine, final ControllerServiceProvider flowController,
final EventDrivenWorkerQueue workerQueue, final ProcessContextFactory contextFactory, final int maxThreadCount, final StringEncryptor encryptor) {
public EventDrivenSchedulingAgent(final FlowEngine flowEngine, final ControllerServiceProvider serviceProvider, final StateManagerProvider stateManagerProvider,
final EventDrivenWorkerQueue workerQueue, final ProcessContextFactory contextFactory, final int maxThreadCount, final StringEncryptor encryptor) {
this.flowEngine = flowEngine;
this.controllerServiceProvider = flowController;
this.serviceProvider = serviceProvider;
this.stateManagerProvider = stateManagerProvider;
this.workerQueue = workerQueue;
this.contextFactory = contextFactory;
this.maxThreadCount = new AtomicInteger(maxThreadCount);
@ -80,6 +84,10 @@ public class EventDrivenSchedulingAgent implements SchedulingAgent {
}
}
private StateManager getStateManager(final String componentId) {
return stateManagerProvider.getStateManager(componentId);
}
@Override
public void shutdown() {
flowEngine.shutdown();
@ -177,7 +185,8 @@ public class EventDrivenSchedulingAgent implements SchedulingAgent {
if (connectable instanceof ProcessorNode) {
final ProcessorNode procNode = (ProcessorNode) connectable;
final StandardProcessContext standardProcessContext = new StandardProcessContext(procNode, controllerServiceProvider, encryptor);
final StandardProcessContext standardProcessContext = new StandardProcessContext(procNode, serviceProvider,
encryptor, getStateManager(connectable.getIdentifier()));
final long runNanos = procNode.getRunDuration(TimeUnit.NANOSECONDS);
final ProcessSessionFactory sessionFactory;
@ -251,7 +260,7 @@ public class EventDrivenSchedulingAgent implements SchedulingAgent {
}
} else {
final ProcessSessionFactory sessionFactory = new StandardProcessSessionFactory(context);
final ConnectableProcessContext connectableProcessContext = new ConnectableProcessContext(connectable, encryptor);
final ConnectableProcessContext connectableProcessContext = new ConnectableProcessContext(connectable, encryptor, getStateManager(connectable.getIdentifier()));
trigger(connectable, scheduleState, connectableProcessContext, sessionFactory);
// See explanation above for the ProcessorNode as to why we do this.

View File

@ -25,6 +25,7 @@ import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.connectable.ConnectableType;
import org.apache.nifi.controller.FlowController;
@ -61,6 +62,10 @@ public class QuartzSchedulingAgent implements SchedulingAgent {
this.encryptor = enryptor;
}
private StateManager getStateManager(final String componentId) {
return flowController.getStateManagerProvider().getStateManager(componentId);
}
@Override
public void shutdown() {
}
@ -133,14 +138,15 @@ public class QuartzSchedulingAgent implements SchedulingAgent {
final List<AtomicBoolean> triggers = new ArrayList<>();
for (int i = 0; i < connectable.getMaxConcurrentTasks(); i++) {
final Callable<Boolean> continuallyRunTask;
if (connectable.getConnectableType() == ConnectableType.PROCESSOR) {
final ProcessorNode procNode = (ProcessorNode) connectable;
final StandardProcessContext standardProcContext = new StandardProcessContext(procNode, flowController, encryptor);
final StandardProcessContext standardProcContext = new StandardProcessContext(procNode, flowController, encryptor, getStateManager(connectable.getIdentifier()));
ContinuallyRunProcessorTask runnableTask = new ContinuallyRunProcessorTask(this, procNode, flowController, contextFactory, scheduleState, standardProcContext);
continuallyRunTask = runnableTask;
} else {
final ConnectableProcessContext connProcContext = new ConnectableProcessContext(connectable, encryptor);
final ConnectableProcessContext connProcContext = new ConnectableProcessContext(connectable, encryptor, getStateManager(connectable.getIdentifier()));
continuallyRunTask = new ContinuallyRunConnectableTask(contextFactory, connectable, scheduleState, connProcContext);
}

View File

@ -24,6 +24,8 @@ import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.nifi.annotation.lifecycle.OnStopped;
public class ScheduleState {
private final AtomicInteger activeThreadCount = new AtomicInteger(0);
@ -62,12 +64,12 @@ public class ScheduleState {
}
/**
* Maintains an AtomicBoolean so that the first thread to call this method after a Processor is no longer scheduled to run will receive a <code>true</code> and MUST call the methods annotated with
*
* @OnStopped
* Maintains an AtomicBoolean so that the first thread to call this method after a Processor is no longer
* scheduled to run will receive a <code>true</code> and MUST call the methods annotated with
* {@link OnStopped @OnStopped}
*
* @return <code>true</code> if the caller is required to call Processor methods annotated with
* @OnStopped, <code>false</code> otherwise
* @OnStopped, <code>false</code> otherwise
*/
public boolean mustCallOnStoppedMethods() {
return mustCallOnStoppedMethods.getAndSet(false);

View File

@ -32,6 +32,8 @@ import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.annotation.lifecycle.OnStopped;
import org.apache.nifi.annotation.lifecycle.OnUnscheduled;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.components.state.StateManagerProvider;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.connectable.Funnel;
import org.apache.nifi.connectable.Port;
@ -74,6 +76,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
private final Heartbeater heartbeater;
private final long administrativeYieldMillis;
private final String administrativeYieldDuration;
private final StateManagerProvider stateManagerProvider;
private final ConcurrentMap<Object, ScheduleState> scheduleStates = new ConcurrentHashMap<>();
private final ScheduledExecutorService frameworkTaskExecutor;
@ -84,10 +87,12 @@ public final class StandardProcessScheduler implements ProcessScheduler {
private final StringEncryptor encryptor;
public StandardProcessScheduler(final Heartbeater heartbeater, final ControllerServiceProvider controllerServiceProvider, final StringEncryptor encryptor) {
public StandardProcessScheduler(final Heartbeater heartbeater, final ControllerServiceProvider controllerServiceProvider, final StringEncryptor encryptor,
final StateManagerProvider stateManagerProvider) {
this.heartbeater = heartbeater;
this.controllerServiceProvider = controllerServiceProvider;
this.encryptor = encryptor;
this.stateManagerProvider = stateManagerProvider;
administrativeYieldDuration = NiFiProperties.getInstance().getAdministrativeYieldDuration();
administrativeYieldMillis = FormatUtils.getTimeDuration(administrativeYieldDuration, TimeUnit.MILLISECONDS);
@ -95,6 +100,10 @@ public final class StandardProcessScheduler implements ProcessScheduler {
frameworkTaskExecutor = new FlowEngine(4, "Framework Task Thread");
}
private StateManager getStateManager(final String componentId) {
return stateManagerProvider.getStateManager(componentId);
}
public void scheduleFrameworkTask(final Runnable command, final String taskName, final long initialDelay, final long delay, final TimeUnit timeUnit) {
frameworkTaskExecutor.scheduleWithFixedDelay(new Runnable() {
@Override
@ -102,7 +111,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
try {
command.run();
} catch (final Throwable t) {
LOG.error("Failed to run Framework Task {} due to {}", command, t.toString());
LOG.error("Failed to run Framework Task {} due to {}", taskName, t.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", t);
}
@ -111,6 +120,15 @@ public final class StandardProcessScheduler implements ProcessScheduler {
}, initialDelay, delay, timeUnit);
}
/**
* Submits the given task to be executed exactly once in a background thread
*
* @param task the task to perform
*/
public void submitFrameworkTask(final Runnable task) {
frameworkTaskExecutor.submit(task);
}
@Override
public void setMaxThreadCount(final SchedulingStrategy schedulingStrategy, final int maxThreadCount) {
final SchedulingAgent agent = getSchedulingAgent(schedulingStrategy);
@ -299,7 +317,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
public void run() {
try (final NarCloseable x = NarCloseable.withNarLoader()) {
final long lastStopTime = scheduleState.getLastStopTime();
final StandardProcessContext processContext = new StandardProcessContext(procNode, controllerServiceProvider, encryptor);
final StandardProcessContext processContext = new StandardProcessContext(procNode, controllerServiceProvider, encryptor, getStateManager(procNode.getIdentifier()));
final Set<String> serviceIds = new HashSet<>();
for (final PropertyDescriptor descriptor : processContext.getProperties().keySet()) {
@ -343,7 +361,8 @@ public final class StandardProcessScheduler implements ProcessScheduler {
return;
}
final SchedulingContext schedulingContext = new StandardSchedulingContext(processContext, controllerServiceProvider, procNode);
final SchedulingContext schedulingContext = new StandardSchedulingContext(processContext, controllerServiceProvider,
procNode, getStateManager(procNode.getIdentifier()));
ReflectionUtils.invokeMethodsWithAnnotations(OnScheduled.class, org.apache.nifi.processor.annotation.OnScheduled.class, procNode.getProcessor(), schedulingContext);
getSchedulingAgent(procNode).schedule(procNode, scheduleState);
@ -420,7 +439,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
@Override
public void run() {
try (final NarCloseable x = NarCloseable.withNarLoader()) {
final StandardProcessContext processContext = new StandardProcessContext(procNode, controllerServiceProvider, encryptor);
final StandardProcessContext processContext = new StandardProcessContext(procNode, controllerServiceProvider, encryptor, getStateManager(procNode.getIdentifier()));
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, procNode.getProcessor(), processContext);
@ -503,7 +522,7 @@ public final class StandardProcessScheduler implements ProcessScheduler {
getSchedulingAgent(connectable).unschedule(connectable, state);
if (!state.isScheduled() && state.getActiveThreadCount() == 0 && state.mustCallOnStoppedMethods()) {
final ConnectableProcessContext processContext = new ConnectableProcessContext(connectable, encryptor);
final ConnectableProcessContext processContext = new ConnectableProcessContext(connectable, encryptor, getStateManager(connectable.getIdentifier()));
try (final NarCloseable x = NarCloseable.withNarLoader()) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnStopped.class, connectable, processContext);
heartbeater.heartbeat();

View File

@ -23,6 +23,7 @@ import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.connectable.Connectable;
import org.apache.nifi.connectable.ConnectableType;
import org.apache.nifi.controller.FlowController;
@ -67,6 +68,10 @@ public class TimerDrivenSchedulingAgent implements SchedulingAgent {
}
}
private StateManager getStateManager(final String componentId) {
return flowController.getStateManagerProvider().getStateManager(componentId);
}
@Override
public void shutdown() {
flowEngine.shutdown();
@ -96,14 +101,14 @@ public class TimerDrivenSchedulingAgent implements SchedulingAgent {
// Determine the task to run and create it.
if (connectable.getConnectableType() == ConnectableType.PROCESSOR) {
final ProcessorNode procNode = (ProcessorNode) connectable;
final StandardProcessContext standardProcContext = new StandardProcessContext(procNode, flowController, encryptor);
final StandardProcessContext standardProcContext = new StandardProcessContext(procNode, flowController, encryptor, getStateManager(connectable.getIdentifier()));
final ContinuallyRunProcessorTask runnableTask = new ContinuallyRunProcessorTask(this, procNode, flowController,
contextFactory, scheduleState, standardProcContext);
continuallyRunTask = runnableTask;
processContext = standardProcContext;
} else {
processContext = new ConnectableProcessContext(connectable, encryptor);
processContext = new ConnectableProcessContext(connectable, encryptor, getStateManager(connectable.getIdentifier()));
continuallyRunTask = new ContinuallyRunConnectableTask(contextFactory, connectable, scheduleState, processContext);
}

View File

@ -18,6 +18,7 @@ package org.apache.nifi.controller.service;
import java.util.Set;
import org.apache.nifi.components.state.StateManager;
import org.apache.nifi.controller.ControllerService;
import org.apache.nifi.controller.ControllerServiceInitializationContext;
import org.apache.nifi.controller.ControllerServiceLookup;
@ -28,11 +29,13 @@ public class StandardControllerServiceInitializationContext implements Controlle
private final String id;
private final ControllerServiceProvider serviceProvider;
private final ComponentLog logger;
private final StateManager stateManager;
public StandardControllerServiceInitializationContext(final String identifier, final ComponentLog logger, final ControllerServiceProvider serviceProvider) {
public StandardControllerServiceInitializationContext(final String identifier, final ComponentLog logger, final ControllerServiceProvider serviceProvider, final StateManager stateManager) {
this.id = identifier;
this.logger = logger;
this.serviceProvider = serviceProvider;
this.stateManager = stateManager;
}
@Override
@ -79,4 +82,9 @@ public class StandardControllerServiceInitializationContext implements Controlle
public ComponentLog getLogger() {
return logger;
}
@Override
public StateManager getStateManager() {
return stateManager;
}
}

Some files were not shown because too many files have changed in this diff Show More