mirror of https://github.com/apache/nifi.git
parent
1b88552b46
commit
446cd44702
|
@ -22,7 +22,7 @@ import org.apache.nifi.registry.VariableRegistry;
|
|||
|
||||
/**
|
||||
* Defines a type of expression language statement that can be applied
|
||||
* parameterized by various attributes and properties as specified in each of
|
||||
* parametrized by various attributes and properties as specified in each of
|
||||
* the method calls. AttributeExpression evaluations may be also backed by a
|
||||
* {@link VariableRegistry} used to substitute attributes and variables found in
|
||||
* the expression for which the registry has a value.
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.nifi.components.ConfigurableComponent;
|
|||
public interface NotificationService extends ConfigurableComponent {
|
||||
|
||||
/**
|
||||
* Provides the NotificatoinService with access to objects that may be of use
|
||||
* Provides the NotificationService with access to objects that may be of use
|
||||
* throughout the life of the service
|
||||
*
|
||||
* @param context of initialization
|
||||
|
|
|
@ -532,7 +532,7 @@ public final class CertificateUtils {
|
|||
* @param signingAlgorithm the signing algorithm to use
|
||||
* @param days the number of days it should be valid for
|
||||
* @return an issued {@link X509Certificate} from the given issuer certificate and {@link KeyPair}
|
||||
* @throws CertificateException if there is an error issueing the certificate
|
||||
* @throws CertificateException if there is an error issuing the certificate
|
||||
*/
|
||||
public static X509Certificate generateIssuedCertificate(String dn, PublicKey publicKey, X509Certificate issuer, KeyPair issuerKeyPair, String signingAlgorithm, int days)
|
||||
throws CertificateException {
|
||||
|
|
|
@ -115,7 +115,7 @@ public abstract class AbstractChannelReader implements Runnable {
|
|||
buffer.flip();
|
||||
if (buffer.remaining() > 0) {
|
||||
consumer.addFilledBuffer(buffer);
|
||||
buffer = null; //clear the reference - is now the consumer's responsiblity
|
||||
buffer = null; //clear the reference - is now the consumer's responsibility
|
||||
} else {
|
||||
buffer.clear();
|
||||
bufferPool.returnBuffer(buffer, 0);
|
||||
|
|
|
@ -257,7 +257,7 @@ The `nifi.login.identity.provider.configuration.file` property specifies the con
|
|||
The `nifi.security.user.login.identity.provider` property indicates which of the configured Login Identity Provider should be
|
||||
used. If this property is not configured, NiFi will not support username/password authentication and will require client
|
||||
certificates for authenticating users over HTTPS. By default, this property is not configured meaning that username/password must be
|
||||
explicity enabled.
|
||||
explicitly enabled.
|
||||
|
||||
NiFi does not perform user authentication over HTTP. Using HTTP all users will be granted all roles.
|
||||
|
||||
|
|
|
@ -638,7 +638,7 @@ or by calling the `getLogger` method of `AbstractProcessor`.
|
|||
|
||||
Additionally, Processors should use the `ProvenanceReporter`
|
||||
interface, obtained via the ProcessSession's
|
||||
`getProvenanceReporter` method. The ProvenanceReoprter should be used
|
||||
`getProvenanceReporter` method. The ProvenanceReporter should be used
|
||||
to indicate any time that content is
|
||||
received from an external source or sent to an external location. The
|
||||
ProvenanceReporter also has methods for
|
||||
|
@ -1387,7 +1387,7 @@ If at any point in the InputStreamCallback, a condition is reached in
|
|||
which processing cannot continue
|
||||
(for example, the input is malformed), a `ProcessException` should be
|
||||
thrown. The call to the
|
||||
ProcesssSession's `read` method is wrapped in a `try/catch` block
|
||||
ProcessSession's `read` method is wrapped in a `try/catch` block
|
||||
where `ProcessException` is
|
||||
caught. If an Exception is caught, a log message is generated
|
||||
explaining the error. The List of
|
||||
|
|
|
@ -109,7 +109,7 @@ public interface FlowFileRepository extends Closeable {
|
|||
void swapFlowFilesOut(List<FlowFileRecord> swappedOut, FlowFileQueue flowFileQueue, String swapLocation) throws IOException;
|
||||
|
||||
/**
|
||||
* Updates the Repository to indicate that the given FlowFileRecpords were
|
||||
* Updates the Repository to indicate that the given FlowFileRecords were
|
||||
* Swapped In to memory
|
||||
*
|
||||
* @param swapLocation the location (e.g., a filename) from which FlowFiles
|
||||
|
|
|
@ -164,7 +164,7 @@ public class MockProcessSession implements ProcessSession {
|
|||
* {@link org.apache.nifi.processor.Processor#onTrigger} commits or rolls back the
|
||||
* session
|
||||
*/
|
||||
public void clearCommited() {
|
||||
public void clearCommitted() {
|
||||
committed = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ final class AMQPPublisher extends AMQPWorker {
|
|||
}
|
||||
} else {
|
||||
throw new IllegalStateException("This instance of AMQPPublisher is invalid since "
|
||||
+ "its publishigChannel is closed");
|
||||
+ "its publishingChannel is closed");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -99,11 +99,11 @@ public abstract class AbstractAWSCredentialsProviderProcessor<ClientType extends
|
|||
}
|
||||
|
||||
/**
|
||||
* Abstract method to create aws client using credetials provider. This is the preferred method
|
||||
* Abstract method to create aws client using credentials provider. This is the preferred method
|
||||
* for creating aws clients
|
||||
* @param context process context
|
||||
* @param credentialsProvider aws credentials provider
|
||||
* @param config aws client configuraiton
|
||||
* @param config aws client configuration
|
||||
* @return ClientType the client
|
||||
*/
|
||||
protected abstract ClientType createClient(final ProcessContext context, final AWSCredentialsProvider credentialsProvider, final ClientConfiguration config);
|
||||
|
|
|
@ -47,7 +47,7 @@ import com.amazonaws.services.dynamodbv2.document.DynamoDB;
|
|||
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
|
||||
|
||||
/**
|
||||
* Base class for Nifi dynamo db related processors
|
||||
* Base class for NiFi dynamo db related processors
|
||||
*
|
||||
* @see DeleteDynamoDB
|
||||
* @see PutDynamoDB
|
||||
|
|
|
@ -68,7 +68,7 @@ public abstract class AbstractKinesisFirehoseProcessor extends AbstractAWSCreden
|
|||
}
|
||||
|
||||
/**
|
||||
* Create client using AWSCredentails
|
||||
* Create client using AWSCredentials
|
||||
*
|
||||
* @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
|
||||
*/
|
||||
|
|
|
@ -59,7 +59,7 @@ public abstract class AbstractAWSLambdaProcessor extends AbstractAWSCredentialsP
|
|||
}
|
||||
|
||||
/**
|
||||
* Create client using AWSCredentails
|
||||
* Create client using AWSCredentials
|
||||
*
|
||||
* @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
|
||||
*/
|
||||
|
|
|
@ -62,7 +62,7 @@ public abstract class AbstractSNSProcessor extends AbstractAWSCredentialsProvide
|
|||
}
|
||||
|
||||
/**
|
||||
* Create client using AWSCredentails
|
||||
* Create client using AWSCredentials
|
||||
*
|
||||
* @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
|
||||
*/
|
||||
|
|
|
@ -55,7 +55,7 @@ public abstract class AbstractSQSProcessor extends AbstractAWSCredentialsProvide
|
|||
}
|
||||
|
||||
/**
|
||||
* Create client using AWSCredentails
|
||||
* Create client using AWSCredentials
|
||||
*
|
||||
* @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
|
||||
*/
|
||||
|
|
|
@ -55,7 +55,7 @@ public abstract class AbstractS3IT {
|
|||
protected final static String REGION = System.getProperty("it.aws.region", "us-west-1");
|
||||
// Adding REGION to bucket prevents errors of
|
||||
// "A conflicting conditional operation is currently in progress against this resource."
|
||||
// when bucket is rapidly added/deleted and consistency propogation causes this error.
|
||||
// when bucket is rapidly added/deleted and consistency propagation causes this error.
|
||||
// (Should not be necessary if REGION remains static, but added to prevent future frustration.)
|
||||
// [see http://stackoverflow.com/questions/13898057/aws-error-message-a-conflicting-conditional-operation-is-currently-in-progress]
|
||||
protected final static String BUCKET_NAME = "test-bucket-00000000-0000-0000-0000-123456789021-" + REGION;
|
||||
|
|
|
@ -847,7 +847,7 @@ public class ITPutS3Object extends AbstractS3IT {
|
|||
// sleep() delays long enough to satisfy interval and age intervals.
|
||||
Thread.sleep(2000L);
|
||||
|
||||
// System millis are used for timing, but it is incrememtned on each
|
||||
// System millis are used for timing, but it is incremented on each
|
||||
// call to circumvent what appears to be caching in the AWS library.
|
||||
// The increments are 1000 millis because AWS returns upload
|
||||
// initiation times in whole seconds.
|
||||
|
|
|
@ -43,7 +43,7 @@ import com.couchbase.client.java.CouchbaseCluster;
|
|||
@CapabilityDescription("Provides a centralized Couchbase connection and bucket passwords management."
|
||||
+ " Bucket passwords can be specified via dynamic properties.")
|
||||
@Tags({ "nosql", "couchbase", "database", "connection" })
|
||||
@DynamicProperty(name = "Bucket Password for BUCKET_NAME", value = "bucket password", description = "Specify bucket password if neseccery.")
|
||||
@DynamicProperty(name = "Bucket Password for BUCKET_NAME", value = "bucket password", description = "Specify bucket password if necessary.")
|
||||
public class CouchbaseClusterService extends AbstractControllerService implements CouchbaseClusterControllerService {
|
||||
|
||||
public static final PropertyDescriptor CONNECTION_STRING = new PropertyDescriptor
|
||||
|
|
|
@ -118,7 +118,7 @@ public abstract class AbstractElasticsearchTransportClientProcessor extends Abst
|
|||
protected String authToken;
|
||||
|
||||
/**
|
||||
* Instantiate ElasticSearch Client. This chould be called by subclasses' @OnScheduled method to create a client
|
||||
* Instantiate ElasticSearch Client. This should be called by subclasses' @OnScheduled method to create a client
|
||||
* if one does not yet exist. If called when scheduled, closeClient() should be called by the subclasses' @OnStopped
|
||||
* method so the client will be destroyed when the processor is stopped.
|
||||
*
|
||||
|
@ -252,7 +252,7 @@ public abstract class AbstractElasticsearchTransportClientProcessor extends Abst
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the ElasticSearch hosts from a Nifi attribute, e.g.
|
||||
* Get the ElasticSearch hosts from a NiFi attribute, e.g.
|
||||
*
|
||||
* @param hosts A comma-separated list of ElasticSearch hosts (host:port,host2:port2, etc.)
|
||||
* @return List of InetSocketAddresses for the ES hosts
|
||||
|
|
|
@ -222,7 +222,7 @@ abstract class AbstractEmailProcessor<T extends AbstractMailReceiver> extends Ab
|
|||
protected abstract T buildMessageReceiver(ProcessContext context);
|
||||
|
||||
/**
|
||||
* Return the target receivere's mail protocol (e.g., imap, pop etc.)
|
||||
* Return the target receiver's mail protocol (e.g., imap, pop etc.)
|
||||
*/
|
||||
protected abstract String getProtocol(ProcessContext processContext);
|
||||
|
||||
|
@ -368,7 +368,7 @@ abstract class AbstractEmailProcessor<T extends AbstractMailReceiver> extends Ab
|
|||
fromAddressesString = Arrays.asList(fromAddresses).toString();
|
||||
}
|
||||
} catch (MessagingException e) {
|
||||
this.logger.warn("Faild to retrieve 'From' attribute from Message.");
|
||||
this.logger.warn("Failed to retrieve 'From' attribute from Message.");
|
||||
}
|
||||
|
||||
processSession.getProvenanceReporter().receive(flowFile, this.displayUrl, "Received message from " + fromAddressesString, executionDuration);
|
||||
|
|
|
@ -86,9 +86,9 @@ public class BinaryReader {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the byte that would be read without changing the posiiton
|
||||
* Returns the byte that would be read without changing the position
|
||||
*
|
||||
* @return the byte that would be read without changing the posiiton
|
||||
* @return the byte that would be read without changing the position
|
||||
*/
|
||||
public int peek() {
|
||||
return bytes[position];
|
||||
|
|
|
@ -85,7 +85,7 @@ public class BulletinDTO {
|
|||
* @return category of this message
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The catagory of this bulletin."
|
||||
value = "The category of this bulletin."
|
||||
)
|
||||
public String getCategory() {
|
||||
return category;
|
||||
|
|
|
@ -33,7 +33,7 @@ public class ControllerConfigurationDTO {
|
|||
* @return maximum number of timer driven threads this NiFi has available
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The maimum number of timer driven threads the NiFi has available."
|
||||
value = "The maximum number of timer driven threads the NiFi has available."
|
||||
)
|
||||
public Integer getMaxTimerDrivenThreadCount() {
|
||||
return maxTimerDrivenThreadCount;
|
||||
|
@ -47,7 +47,7 @@ public class ControllerConfigurationDTO {
|
|||
* @return maximum number of event driven thread this NiFi has available
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The maximum number of event driven threads the NiFi has avaiable."
|
||||
value = "The maximum number of event driven threads the NiFi has available."
|
||||
)
|
||||
public Integer getMaxEventDrivenThreadCount() {
|
||||
return maxEventDrivenThreadCount;
|
||||
|
|
|
@ -227,7 +227,7 @@ public class ControllerDTO {
|
|||
* @return number of inactive remote ports contained in this process group
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The number of inactive remote porst contained in the NiFi."
|
||||
value = "The number of inactive remote ports contained in the NiFi."
|
||||
)
|
||||
public Integer getInactiveRemotePortCount() {
|
||||
return inactiveRemotePortCount;
|
||||
|
|
|
@ -165,7 +165,7 @@ public class ControllerServiceDTO extends ComponentDTO {
|
|||
* @return annotation data for this controller service
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The annontation for the controller service. This is how the custom UI relays configuration to the controller service."
|
||||
value = "The annotation for the controller service. This is how the custom UI relays configuration to the controller service."
|
||||
)
|
||||
public String getAnnotationData() {
|
||||
return annotationData;
|
||||
|
|
|
@ -152,7 +152,7 @@ public class ControllerServiceReferencingComponentDTO {
|
|||
* @return descriptors for the components properties
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The descriptors for the componet properties."
|
||||
value = "The descriptors for the component properties."
|
||||
)
|
||||
public Map<String, PropertyDescriptorDTO> getDescriptors() {
|
||||
return descriptors;
|
||||
|
@ -194,7 +194,7 @@ public class ControllerServiceReferencingComponentDTO {
|
|||
* @return If this referencing component represents a ControllerService, these are the components that reference it
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "If the referencing component represents a controller service, these are the components that referenc it."
|
||||
value = "If the referencing component represents a controller service, these are the components that reference it."
|
||||
)
|
||||
public Set<ControllerServiceReferencingComponentEntity> getReferencingComponents() {
|
||||
return referencingComponents;
|
||||
|
|
|
@ -48,7 +48,7 @@ public class DocumentedTypeDTO {
|
|||
* @return The type is the fully-qualified name of a Java class
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The fulley qualified name of the type."
|
||||
value = "The fully qualified name of the type."
|
||||
)
|
||||
public String getType() {
|
||||
return type;
|
||||
|
|
|
@ -134,7 +134,7 @@ public class ProcessGroupDTO extends ComponentDTO {
|
|||
* @return number of running component in this process group
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The number of running componetns in this process group."
|
||||
value = "The number of running components in this process group."
|
||||
)
|
||||
public Integer getRunningCount() {
|
||||
return runningCount;
|
||||
|
|
|
@ -202,7 +202,7 @@ public class ProcessorDTO extends ComponentDTO {
|
|||
* @return The processor configuration details
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The configuration details for the processor. These details will be included in a resopnse if the verbose flag is included in a request."
|
||||
value = "The configuration details for the processor. These details will be included in a response if the verbose flag is included in a request."
|
||||
)
|
||||
public ProcessorConfigDTO getConfig() {
|
||||
return config;
|
||||
|
|
|
@ -71,7 +71,7 @@ public class PropertyDescriptorDTO {
|
|||
* @return An explanation of the meaning of the given property. This description is meant to be displayed to a user or simply provide a mechanism of documenting intent
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The descriptoin for the property. Used to relay additional details to a user or provide a mechanism of documenting intent."
|
||||
value = "The description for the property. Used to relay additional details to a user or provide a mechanism of documenting intent."
|
||||
)
|
||||
public String getDescription() {
|
||||
return description;
|
||||
|
|
|
@ -55,7 +55,7 @@ public class ControllerStatusDTO implements Cloneable {
|
|||
/**
|
||||
* @return queue for the controller
|
||||
*/
|
||||
@ApiModelProperty("The number of flowfilew queued in the NiFi.")
|
||||
@ApiModelProperty("The number of flowfiles queued in the NiFi.")
|
||||
public String getQueued() {
|
||||
return queued;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import javax.xml.bind.annotation.XmlRootElement;
|
|||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A serialized representation of this class can be placed in the entity body of a response to the API. This particular entity holds a reference to a list of ConnectionEntitys.
|
||||
* A serialized representation of this class can be placed in the entity body of a response to the API. This particular entity holds a reference to a list of ConnectionEntity.
|
||||
*/
|
||||
@XmlRootElement(name = "connectionsEntity")
|
||||
public class ConnectionsEntity extends Entity {
|
||||
|
@ -28,7 +28,7 @@ public class ConnectionsEntity extends Entity {
|
|||
private Set<ConnectionEntity> connections;
|
||||
|
||||
/**
|
||||
* @return list of ConnectionEntitys that are being serialized
|
||||
* @return list of ConnectionEntity that are being serialized
|
||||
*/
|
||||
public Set<ConnectionEntity> getConnections() {
|
||||
return connections;
|
||||
|
|
|
@ -114,7 +114,7 @@ public class ProcessGroupEntity extends ComponentEntity implements Permissible<P
|
|||
* @return number of running component in this process group
|
||||
*/
|
||||
@ApiModelProperty(
|
||||
value = "The number of running componetns in this process group."
|
||||
value = "The number of running components in this process group."
|
||||
)
|
||||
public Integer getRunningCount() {
|
||||
return runningCount;
|
||||
|
|
|
@ -105,12 +105,12 @@ public class DocGenerator {
|
|||
final File directory = new File(docsDir, componentClass.getCanonicalName());
|
||||
directory.mkdirs();
|
||||
|
||||
final File baseDocumenationFile = new File(directory, "index.html");
|
||||
if (baseDocumenationFile.exists()) {
|
||||
logger.warn(baseDocumenationFile + " already exists, overwriting!");
|
||||
final File baseDocumentationFile = new File(directory, "index.html");
|
||||
if (baseDocumentationFile.exists()) {
|
||||
logger.warn(baseDocumentationFile + " already exists, overwriting!");
|
||||
}
|
||||
|
||||
try (final OutputStream output = new BufferedOutputStream(new FileOutputStream(baseDocumenationFile))) {
|
||||
try (final OutputStream output = new BufferedOutputStream(new FileOutputStream(baseDocumentationFile))) {
|
||||
writer.write(component, output, hasAdditionalInfo(directory));
|
||||
}
|
||||
|
||||
|
|
|
@ -1009,7 +1009,7 @@ public class FileAuthorizerTest {
|
|||
authorizer.onConfigured(configurationContext);
|
||||
assertEquals(2, authorizer.getUsers().size());
|
||||
|
||||
// retrieve user-1 and verify it exsits
|
||||
// retrieve user-1 and verify it exists
|
||||
final User user = authorizer.getUser("user-1");
|
||||
assertEquals("user-1", user.getIdentifier());
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.nifi.cluster.coordination.http;
|
|||
import org.apache.nifi.cluster.coordination.http.endpoints.BulletinBoardEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ComponentStateEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ConnectionEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ConnectionStatusEndpiontMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ConnectionStatusEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ConnectionsEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ControllerBulletinsEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.ControllerConfigurationEndpointMerger;
|
||||
|
@ -30,7 +30,7 @@ import org.apache.nifi.cluster.coordination.http.endpoints.ControllerServicesEnd
|
|||
import org.apache.nifi.cluster.coordination.http.endpoints.ControllerStatusEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.CountersEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.CurrentUserEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.DropRequestEndpiontMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.DropRequestEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.FlowConfigurationEndpointMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.FlowMerger;
|
||||
import org.apache.nifi.cluster.coordination.http.endpoints.FlowSnippetEndpointMerger;
|
||||
|
@ -94,7 +94,7 @@ public class StandardHttpResponseMerger implements HttpResponseMerger {
|
|||
endpointMergers.add(new ControllerBulletinsEndpointMerger());
|
||||
endpointMergers.add(new GroupStatusEndpointMerger());
|
||||
endpointMergers.add(new ProcessorStatusEndpointMerger());
|
||||
endpointMergers.add(new ConnectionStatusEndpiontMerger());
|
||||
endpointMergers.add(new ConnectionStatusEndpointMerger());
|
||||
endpointMergers.add(new PortStatusEndpointMerger());
|
||||
endpointMergers.add(new RemoteProcessGroupStatusEndpointMerger());
|
||||
endpointMergers.add(new ProcessorEndpointMerger());
|
||||
|
@ -116,7 +116,7 @@ public class StandardHttpResponseMerger implements HttpResponseMerger {
|
|||
endpointMergers.add(new ControllerServiceReferenceEndpointMerger());
|
||||
endpointMergers.add(new ReportingTaskEndpointMerger());
|
||||
endpointMergers.add(new ReportingTasksEndpointMerger());
|
||||
endpointMergers.add(new DropRequestEndpiontMerger());
|
||||
endpointMergers.add(new DropRequestEndpointMerger());
|
||||
endpointMergers.add(new ListFlowFilesEndpointMerger());
|
||||
endpointMergers.add(new ComponentStateEndpointMerger());
|
||||
endpointMergers.add(new BulletinBoardEndpointMerger());
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class ConnectionStatusEndpiontMerger extends AbstractSingleEntityEndpoint<ConnectionStatusEntity> implements ComponentEntityStatusMerger<ConnectionStatusDTO> {
|
||||
public class ConnectionStatusEndpointMerger extends AbstractSingleEntityEndpoint<ConnectionStatusEntity> implements ComponentEntityStatusMerger<ConnectionStatusDTO> {
|
||||
public static final Pattern CONNECTION_STATUS_URI_PATTERN = Pattern.compile("/nifi-api/flow/connections/[a-f0-9\\-]{36}/status");
|
||||
|
||||
@Override
|
|
@ -29,7 +29,7 @@ import org.apache.nifi.util.FormatUtils;
|
|||
import org.apache.nifi.web.api.dto.DropRequestDTO;
|
||||
import org.apache.nifi.web.api.entity.DropRequestEntity;
|
||||
|
||||
public class DropRequestEndpiontMerger extends AbstractSingleDTOEndpoint<DropRequestEntity, DropRequestDTO> {
|
||||
public class DropRequestEndpointMerger extends AbstractSingleDTOEndpoint<DropRequestEntity, DropRequestDTO> {
|
||||
public static final Pattern DROP_REQUESTS_URI = Pattern.compile("/nifi-api/flowfile-queues/[a-f0-9\\-]{36}/drop-requests");
|
||||
public static final Pattern DROP_REQUEST_URI = Pattern.compile("/nifi-api/flowfile-queues/[a-f0-9\\-]{36}/drop-requests/[a-f0-9\\-]{36}");
|
||||
|
|
@ -43,7 +43,7 @@ public interface ReportingTaskNode extends ConfiguredComponent {
|
|||
*
|
||||
* @param schedulingPeriod new period
|
||||
*/
|
||||
void setScheduldingPeriod(String schedulingPeriod);
|
||||
void setSchedulingPeriod(String schedulingPeriod);
|
||||
|
||||
ReportingTask getReportingTask();
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ public enum ControllerServiceState {
|
|||
ENABLING,
|
||||
/**
|
||||
* Controller Service has been enabled and has finished its lifecycle
|
||||
* methods. The Controller SErvice is ready to be used.
|
||||
* methods. The Controller Service is ready to be used.
|
||||
*/
|
||||
ENABLED;
|
||||
}
|
||||
|
|
|
@ -510,7 +510,7 @@ public class StandardFlowSynchronizer implements FlowSynchronizer {
|
|||
final ReportingTaskNode reportingTask = controller.createReportingTask(dto.getType(), dto.getId(), false);
|
||||
reportingTask.setName(dto.getName());
|
||||
reportingTask.setComments(dto.getComments());
|
||||
reportingTask.setScheduldingPeriod(dto.getSchedulingPeriod());
|
||||
reportingTask.setSchedulingPeriod(dto.getSchedulingPeriod());
|
||||
reportingTask.setSchedulingStrategy(SchedulingStrategy.valueOf(dto.getSchedulingStrategy()));
|
||||
|
||||
reportingTask.setAnnotationData(dto.getAnnotationData());
|
||||
|
|
|
@ -94,7 +94,7 @@ public abstract class AbstractReportingTaskNode extends AbstractConfiguredCompon
|
|||
}
|
||||
|
||||
@Override
|
||||
public void setScheduldingPeriod(final String schedulingPeriod) {
|
||||
public void setSchedulingPeriod(final String schedulingPeriod) {
|
||||
this.schedulingPeriod.set(schedulingPeriod);
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ public class FileSystemRepository implements ContentRepository {
|
|||
|
||||
private ResourceClaimManager resourceClaimManager; // effectively final
|
||||
|
||||
// Map of contianer to archived files that should be deleted next.
|
||||
// Map of container to archived files that should be deleted next.
|
||||
private final Map<String, BlockingQueue<ArchiveInfo>> archivedFiles = new HashMap<>();
|
||||
|
||||
// guarded by synchronizing on this
|
||||
|
|
|
@ -80,7 +80,7 @@ public final class FlowEngine extends ScheduledThreadPoolExecutor {
|
|||
}
|
||||
|
||||
/**
|
||||
* Hook method called by the thread that executed the given runnable after execution of the runnable completed. Logs the fact of completion and any errors that might have occured.
|
||||
* Hook method called by the thread that executed the given runnable after execution of the runnable completed. Logs the fact of completion and any errors that might have occurred.
|
||||
*
|
||||
* @param runnable runnable
|
||||
* @param throwable throwable
|
||||
|
|
|
@ -171,11 +171,11 @@ public interface AuthorizableLookup {
|
|||
/**
|
||||
* Get the authorizable referencing component.
|
||||
*
|
||||
* @param controllerSeriveId controller service id
|
||||
* @param controllerServiceId controller service id
|
||||
* @param id component id
|
||||
* @return authorizable
|
||||
*/
|
||||
Authorizable getControllerServiceReferencingComponent(String controllerSeriveId, String id);
|
||||
Authorizable getControllerServiceReferencingComponent(String controllerServiceId, String id);
|
||||
|
||||
/**
|
||||
* Get the authorizable ReportingTask.
|
||||
|
|
|
@ -392,8 +392,8 @@ class StandardAuthorizableLookup implements AuthorizableLookup {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Authorizable getControllerServiceReferencingComponent(String controllerSeriveId, String id) {
|
||||
final ControllerServiceNode controllerService = controllerServiceDAO.getControllerService(controllerSeriveId);
|
||||
public Authorizable getControllerServiceReferencingComponent(String controllerServiceId, String id) {
|
||||
final ControllerServiceNode controllerService = controllerServiceDAO.getControllerService(controllerServiceId);
|
||||
final ControllerServiceReference referencingComponents = controllerService.getReferences();
|
||||
final ConfiguredComponent reference = findControllerServiceReferencingComponent(referencingComponents, id);
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ public class StandardReportingTaskDAO extends ComponentDAO implements ReportingT
|
|||
reportingTask.setName(name);
|
||||
}
|
||||
if (isNotNull(schedulingPeriod)) {
|
||||
reportingTask.setScheduldingPeriod(schedulingPeriod);
|
||||
reportingTask.setSchedulingPeriod(schedulingPeriod);
|
||||
}
|
||||
if (isNotNull(annotationData)) {
|
||||
reportingTask.setAnnotationData(annotationData);
|
||||
|
|
|
@ -41,7 +41,7 @@ public class NiFiTestLoginIdentityProvider implements LoginIdentityProvider {
|
|||
*/
|
||||
public NiFiTestLoginIdentityProvider() {
|
||||
users = new HashMap<>();
|
||||
users.put("user@nifi", "whateve");
|
||||
users.put("user@nifi", "whatever");
|
||||
users.put("unregistered-user@nifi", "password");
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ public final class DownloadableContent {
|
|||
/**
|
||||
* The content stream.
|
||||
*
|
||||
* @return the intput stream of the content
|
||||
* @return the input stream of the content
|
||||
*/
|
||||
public InputStream getContent() {
|
||||
return content;
|
||||
|
|
|
@ -117,7 +117,7 @@ public class ContentViewerController extends HttpServlet {
|
|||
return;
|
||||
} catch (final Exception e) {
|
||||
request.setAttribute("title", "Error");
|
||||
request.setAttribute("messages", "An unexcepted error has occurred: " + e.getMessage());
|
||||
request.setAttribute("messages", "An unexpected error has occurred: " + e.getMessage());
|
||||
|
||||
// forward to the error page
|
||||
final ServletContext viewerContext = servletContext.getContext("/nifi");
|
||||
|
@ -147,7 +147,7 @@ public class ContentViewerController extends HttpServlet {
|
|||
return;
|
||||
}
|
||||
|
||||
// buffer the content to support reseting in case we need to detect the content type or char encoding
|
||||
// buffer the content to support resetting in case we need to detect the content type or char encoding
|
||||
try (final BufferedInputStream bis = new BufferedInputStream(downloadableContent.getContent());) {
|
||||
final String mimeType;
|
||||
final String normalizedMimeType;
|
||||
|
|
|
@ -130,9 +130,9 @@ public class ByteFilteringOutputStream extends FilterOutputStream {
|
|||
}
|
||||
|
||||
public boolean matches(final byte[] candidate, final int offset, final int length) {
|
||||
final boolean finsihedReplacing = (numMatches >= maxMatches && maxMatches > -1);
|
||||
final boolean finishedReplacing = (numMatches >= maxMatches && maxMatches > -1);
|
||||
|
||||
if (finsihedReplacing || (length != toReplace.length)) {
|
||||
if (finishedReplacing || (length != toReplace.length)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,9 +16,6 @@
|
|||
*/
|
||||
package org.apache.nifi.util.hive;
|
||||
|
||||
/**
|
||||
* Created by mburgess on 5/4/16.
|
||||
*/
|
||||
public class AuthenticationFailedException extends Exception {
|
||||
public AuthenticationFailedException(String reason, Exception cause) {
|
||||
super(reason, cause);
|
||||
|
|
|
@ -64,7 +64,7 @@ public abstract class AbstractIgniteProcessor extends AbstractProcessor {
|
|||
|
||||
/**
|
||||
* Get ignite instance
|
||||
* @return iginte instance
|
||||
* @return ignite instance
|
||||
*/
|
||||
protected Ignite getIgnite() {
|
||||
return ignite;
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
Once message is constructed it is sent to a pre-configured JMS Destination.
|
||||
Standard <a href="http://docs.spring.io/spring-integration/api/org/springframework/integration/jms/JmsHeaders.html">JMS Headers</a>
|
||||
will be extracted from the FlowFile and set on <i>javax.jms.Message</i> as JMS headers while other
|
||||
FlowFile attributes will be set as properties of <i>javax.jms.Message</i>. Upon success the incoming FlowFile is transfered
|
||||
FlowFile attributes will be set as properties of <i>javax.jms.Message</i>. Upon success the incoming FlowFile is transferred
|
||||
to the <i>success</i> Relationship and upon failure FlowFile is
|
||||
penalized and transfered to the <i>failure</i> Relationship.
|
||||
penalized and transferred to the <i>failure</i> Relationship.
|
||||
</p>
|
||||
<h2>Configuration Details</h2>
|
||||
<p>
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
If the property is not set, the entire contents of the FlowFile
|
||||
will be sent as a single message. When using the demarcator, if some messages are
|
||||
successfully sent but other messages fail to send, the resulting FlowFile will be
|
||||
considered a failed FlowFuile and will have additional attributes to that effect.
|
||||
considered a failed FlowFile and will have additional attributes to that effect.
|
||||
One of such attributes is 'failed.last.idx' which indicates the index of the last message
|
||||
that was successfully ACKed by Kafka. (if no demarcator is used the value of this index will be -1).
|
||||
This will allow PublishKafka to only re-send un-ACKed messages on the next re-try.
|
||||
|
|
|
@ -121,7 +121,7 @@ public class EmbeddedKafka {
|
|||
logger.info("Starting Kafka server");
|
||||
this.kafkaServer.startup();
|
||||
|
||||
logger.info("Embeded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
logger.info("Embedded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
+ ". Zookeeper connection string: " + this.kafkaConfig.getProperty("zookeeper.connect"));
|
||||
this.started = true;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ public class EmbeddedKafka {
|
|||
this.kafkaServer.awaitShutdown();
|
||||
logger.info("Shutting down Zookeeper server");
|
||||
this.shutdownZookeeper();
|
||||
logger.info("Embeded Kafka is shut down.");
|
||||
logger.info("Embedded Kafka is shut down.");
|
||||
this.cleanupKafkaWorkDir();
|
||||
this.started = false;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ public class EmbeddedKafka {
|
|||
logger.info("Starting Kafka server");
|
||||
this.kafkaServer.startup();
|
||||
|
||||
logger.info("Embeded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
logger.info("Embedded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
+ ". Zookeeper connection string: " + this.kafkaConfig.getProperty("zookeeper.connect"));
|
||||
this.started = true;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ public class EmbeddedKafka {
|
|||
this.kafkaServer.awaitShutdown();
|
||||
logger.info("Shutting down Zookeeper server");
|
||||
this.shutdownZookeeper();
|
||||
logger.info("Embeded Kafka is shut down.");
|
||||
logger.info("Embedded Kafka is shut down.");
|
||||
this.cleanupKafkaWorkDir();
|
||||
this.started = false;
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ public abstract class ConsumerLease implements Closeable, ConsumerRebalanceListe
|
|||
if (bundleMap.size() > 200) { //a magic number - the number of simultaneous bundles to track
|
||||
return false;
|
||||
} else {
|
||||
return totalFlowFiles < 15000;//admittedlly a magic number - good candidate for processor property
|
||||
return totalFlowFiles < 15000;//admittedly a magic number - good candidate for processor property
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
If the property is not set, the entire contents of the FlowFile
|
||||
will be sent as a single message. When using the demarcator, if some messages are
|
||||
successfully sent but other messages fail to send, the resulting FlowFile will be
|
||||
considered a failed FlowFuile and will have additional attributes to that effect.
|
||||
considered a failed FlowFile and will have additional attributes to that effect.
|
||||
One of such attributes is 'failed.last.idx' which indicates the index of the last message
|
||||
that was successfully ACKed by Kafka. (if no demarcator is used the value of this index will be -1).
|
||||
This will allow PublishKafka to only re-send un-ACKed messages on the next re-try.
|
||||
|
|
|
@ -121,7 +121,7 @@ public class EmbeddedKafka {
|
|||
logger.info("Starting Kafka server");
|
||||
this.kafkaServer.startup();
|
||||
|
||||
logger.info("Embeded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
logger.info("Embedded Kafka is started at localhost:" + this.kafkaServer.serverConfig().port()
|
||||
+ ". Zookeeper connection string: " + this.kafkaConfig.getProperty("zookeeper.connect"));
|
||||
this.started = true;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ public class EmbeddedKafka {
|
|||
this.kafkaServer.awaitShutdown();
|
||||
logger.info("Shutting down Zookeeper server");
|
||||
this.shutdownZookeeper();
|
||||
logger.info("Embeded Kafka is shut down.");
|
||||
logger.info("Embedded Kafka is shut down.");
|
||||
this.cleanupKafkaWorkDir();
|
||||
this.started = false;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
<p>
|
||||
When the processor is triggered, it sends the SNMP request and gets the information associated to request OID(s). Once response is received
|
||||
from the SNMP agent, a FlowFile is constructed. The FlowFile content is empty, all the information is written in the FlowFile attributes.
|
||||
In case of a single GET request, the properties associated to the received PDU are transfered into the FlowFile as attributes. In case of a
|
||||
In case of a single GET request, the properties associated to the received PDU are transferred into the FlowFile as attributes. In case of a
|
||||
WALK request, only the couples "OID/value" are transferred into the FlowFile as attributes. SNMP attributes names are prefixed with
|
||||
<i>snmp$</i> prefix.
|
||||
</p>
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
</p>
|
||||
<p>
|
||||
The processor constructs SNMP Set requests by extracting information from FlowFile attributes. The processor is looking for attributes
|
||||
prefixed with <i>snmp$</i>. If such an attribute is found, the attribute name is splitted using the $ character. The second element must
|
||||
prefixed with <i>snmp$</i>. If such an attribute is found, the attribute name is split using the $ character. The second element must
|
||||
respect the OID format to be considered as a valid OID. If there is a third element, it must represents the SMI Syntax integer value of
|
||||
the type of data associated to the given OID to allow a correct conversion. If there is no third element, the value is considered as a String
|
||||
and the value will be sent as an OctetString object.
|
||||
|
|
|
@ -42,7 +42,7 @@ public class LocationUtil {
|
|||
static final Pattern LOCATION_PATTERN = Pattern.compile(LOCATION);
|
||||
|
||||
/**
|
||||
* One or more locations separated by a comma, exampple: lon,lat,lon,lat,lon,lat,lon,lat
|
||||
* One or more locations separated by a comma, example: lon,lat,lon,lat,lon,lat,lon,lat
|
||||
*/
|
||||
static final Pattern LOCATIONS_PATTERN = Pattern.compile("(?:" + LOCATION + ")(?:,\\s*" + LOCATION + ")*");
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
In this case NiFi simply plays the role of the runtime host.</li>
|
||||
<li><i>One way (NiFi -> Spring or Spring -> NiFi). </i> - This depends on existence of pre-defined message channel in Spring
|
||||
Application Context. The name of the channel should be “fromNiFi” and the type <i>org.springframework.messaging.MessageChannel.</i></li>
|
||||
<li><i>By-directional (NiFi -> Spring -> Nifi or Spring -> NiFi -> Spring)</i> - This depends on existence of two channels
|
||||
<li><i>By-directional (NiFi -> Spring -> NiFi or Spring -> NiFi -> Spring)</i> - This depends on existence of two channels
|
||||
in Spring Application Context. One channel receives messages from NiFi with name “fromNiFi” and type <i>org.springframework.messaging.MessageChannel</i>i>
|
||||
and another is to receive messages from Spring with name “toNiFi” and type <i>org.springframework.messaging.PollableChannel.</i></li>
|
||||
</ul>
|
||||
|
@ -87,7 +87,7 @@
|
|||
Defaults to 0 (don't wait). FlowFiles that were successfully sent to Spring will be removed from session while FlowFiles that could not be sent to
|
||||
Spring will be routed to <i>failure</i> relationship.</li>
|
||||
<li><i>Receive Timeout</i> - [OPTIONAL] - the wait time for receiving messages from Spring Application Context. Only required if NiFi plans to receive data from Spring.
|
||||
Defaults to 0 (don't wait). FlowFile is created if and only if a message is successfully received from Spring. It is then transfered to <i>success</i> relationship.</li>
|
||||
Defaults to 0 (don't wait). FlowFile is created if and only if a message is successfully received from Spring. It is then transferred to <i>success</i> relationship.</li>
|
||||
</ul>
|
||||
</p>
|
||||
</body>
|
||||
|
|
|
@ -58,7 +58,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
/**
|
||||
* <p>
|
||||
* This processor reads files in as text according to the specified character set and it outputs another text file according to the given characeter set. The character sets supported depend on the
|
||||
* This processor reads files in as text according to the specified character set and it outputs another text file according to the given character set. The character sets supported depend on the
|
||||
* version of the JRE and is platform specific. In addition, the JVM can be expanded with additional character sets to support. More information on which character sets are supported can be found in
|
||||
* the JDK documentation under the docs directory in the following path: ....\technotes\guides\intl\encoding.doc.html</p>
|
||||
*
|
||||
|
|
|
@ -86,10 +86,10 @@ import org.codehaus.jackson.node.JsonNodeFactory;
|
|||
@WritesAttribute(attribute="fragment.index", description="The position of this FlowFile in the list of outgoing FlowFiles that were all derived from the same incoming FlowFile. This can be "
|
||||
+ "used in conjunction with the fragment.identifier and fragment.count attributes to know which FlowFiles originated from the same incoming FlowFile and in what order the SQL "
|
||||
+ "FlowFiles were produced"),
|
||||
@WritesAttribute(attribute="sql.args.N.type", description="The output SQL statements are parameterized in order to avoid SQL Injection Attacks. The types of the Parameters "
|
||||
@WritesAttribute(attribute="sql.args.N.type", description="The output SQL statements are parametrized in order to avoid SQL Injection Attacks. The types of the Parameters "
|
||||
+ "to use are stored in attributes named sql.args.1.type, sql.args.2.type, sql.args.3.type, and so on. The type is a number representing a JDBC Type constant. "
|
||||
+ "Generally, this is useful only for software to read and interpret but is added so that a processor such as PutSQL can understand how to interpret the values."),
|
||||
@WritesAttribute(attribute="sql.args.N.value", description="The output SQL statements are parameterized in order to avoid SQL Injection Attacks. The values of the Parameters "
|
||||
@WritesAttribute(attribute="sql.args.N.value", description="The output SQL statements are parametrized in order to avoid SQL Injection Attacks. The values of the Parameters "
|
||||
+ "to use are stored in the attributes named sql.args.1.value, sql.args.2.value, sql.args.3.value, and so on. Each of these attributes has a corresponding "
|
||||
+ "sql.args.N.type attribute that indicates how the value should be interpreted when inserting it into the database.")
|
||||
})
|
||||
|
|
|
@ -119,7 +119,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
|
||||
public static final PropertyDescriptor CASE_INSENSITIVE = new PropertyDescriptor.Builder()
|
||||
.name("Enable Case-insensitive Matching")
|
||||
.description("Indicates that two characters match even if they are in a different case. Can also be specified via the embeded flag (?i).")
|
||||
.description("Indicates that two characters match even if they are in a different case. Can also be specified via the embedded flag (?i).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -127,7 +127,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
|
||||
public static final PropertyDescriptor COMMENTS = new PropertyDescriptor.Builder()
|
||||
.name("Permit Whitespace and Comments in Pattern")
|
||||
.description("In this mode, whitespace is ignored, and embedded comments starting with # are ignored until the end of a line. Can also be specified via the embeded flag (?x).")
|
||||
.description("In this mode, whitespace is ignored, and embedded comments starting with # are ignored until the end of a line. Can also be specified via the embedded flag (?x).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -135,7 +135,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
|
||||
public static final PropertyDescriptor DOTALL = new PropertyDescriptor.Builder()
|
||||
.name("Enable DOTALL Mode")
|
||||
.description("Indicates that the expression '.' should match any character, including a line terminator. Can also be specified via the embeded flag (?s).")
|
||||
.description("Indicates that the expression '.' should match any character, including a line terminator. Can also be specified via the embedded flag (?s).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -152,7 +152,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
public static final PropertyDescriptor MULTILINE = new PropertyDescriptor.Builder()
|
||||
.name("Enable Multiline Mode")
|
||||
.description("Indicates that '^' and '$' should match just after and just before a line terminator or end of sequence, instead of "
|
||||
+ "only the begining or end of the entire input. Can also be specified via the embeded flag (?m).")
|
||||
+ "only the beginning or end of the entire input. Can also be specified via the embeded flag (?m).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -161,7 +161,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
public static final PropertyDescriptor UNICODE_CASE = new PropertyDescriptor.Builder()
|
||||
.name("Enable Unicode-aware Case Folding")
|
||||
.description("When used with 'Enable Case-insensitive Matching', matches in a manner consistent with the Unicode Standard. Can also "
|
||||
+ "be specified via the embeded flag (?u).")
|
||||
+ "be specified via the embedded flag (?u).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -170,7 +170,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
public static final PropertyDescriptor UNICODE_CHARACTER_CLASS = new PropertyDescriptor.Builder()
|
||||
.name("Enable Unicode Predefined Character Classes")
|
||||
.description("Specifies conformance with the Unicode Technical Standard #18: Unicode Regular Expression Annex C: Compatibility "
|
||||
+ "Properties. Can also be specified via the embeded flag (?U).")
|
||||
+ "Properties. Can also be specified via the embedded flag (?U).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
@ -179,7 +179,7 @@ public class ExtractText extends AbstractProcessor {
|
|||
public static final PropertyDescriptor UNIX_LINES = new PropertyDescriptor.Builder()
|
||||
.name("Enable Unix Lines Mode")
|
||||
.description("Indicates that only the '\n' line terminator is recognized in the behavior of '.', '^', and '$'. Can also be specified "
|
||||
+ "via the embeded flag (?d).")
|
||||
+ "via the embedded flag (?d).")
|
||||
.required(true)
|
||||
.allowableValues("true", "false")
|
||||
.defaultValue("false")
|
||||
|
|
|
@ -103,7 +103,7 @@ public class FetchFile extends AbstractProcessor {
|
|||
.build();
|
||||
static final PropertyDescriptor FILE_NOT_FOUND_LOG_LEVEL = new PropertyDescriptor.Builder()
|
||||
.name("Log level when file not found")
|
||||
.description("Log level to use in case the file does not exist when the processor is trigerred")
|
||||
.description("Log level to use in case the file does not exist when the processor is triggered")
|
||||
.allowableValues(LogLevel.values())
|
||||
.defaultValue(LogLevel.ERROR.toString())
|
||||
.required(true)
|
||||
|
|
|
@ -117,7 +117,7 @@ public abstract class GetFileTransfer extends AbstractProcessor {
|
|||
BlockingQueue<FileInfo> fileQueue = fileQueueRef.get();
|
||||
final ComponentLog logger = getLogger();
|
||||
|
||||
// dont do the listing if there are already 100 or more items in our queue
|
||||
// do not do the listing if there are already 100 or more items in our queue
|
||||
// 100 is really just a magic number that seems to work out well in practice
|
||||
FileTransfer transfer = null;
|
||||
if (System.currentTimeMillis() >= nextPollTime && (fileQueue == null || fileQueue.size() < 100) && listingLock.tryLock()) {
|
||||
|
|
|
@ -184,7 +184,7 @@ public class FTPUtils {
|
|||
* @param client - the ftp client with an already active connection
|
||||
* @param dirPath - the path to change or create directories to
|
||||
* @param createDirs - if true will attempt to create any missing directories
|
||||
* @param processor - used solely for targetting logging output.
|
||||
* @param processor - used solely for targeting logging output.
|
||||
* @throws IOException if any access problem occurs
|
||||
*/
|
||||
public static void changeWorkingDirectory(final FTPClient client, final String dirPath, final boolean createDirs, final Processor processor) throws IOException {
|
||||
|
@ -204,7 +204,7 @@ public class FTPUtils {
|
|||
} catch (final IOException ioe) {
|
||||
logger.debug(processor + " could not change directory to '" + forwardPaths + "' from '" + currentWorkingDirectory + "' so trying the hard way.");
|
||||
}
|
||||
if (!dirExists) { //coulnd't navigate directly...begin hard work
|
||||
if (!dirExists) { //couldn't navigate directly...begin hard work
|
||||
final Deque<String> stack = new LinkedList<>();
|
||||
File fakeFile = new File(dir.getPath());
|
||||
do {
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.security.SecureRandom;
|
|||
* This password hashing system tries to thwart off-line password
|
||||
* cracking using a computationally-intensive hashing algorithm,
|
||||
* based on Bruce Schneier's Blowfish cipher. The work factor of
|
||||
* the algorithm is parameterised, so it can be increased as
|
||||
* the algorithm is parametrized, so it can be increased as
|
||||
* computers get faster.
|
||||
* <p/>
|
||||
* Usage is really simple. To hash a password for the first time,
|
||||
|
|
|
@ -90,7 +90,7 @@ public class MonitorMemoryTest {
|
|||
public void doValidate(String threshold) throws Exception {
|
||||
CapturingLogger capturingLogger = this.wrapAndReturnCapturingLogger();
|
||||
ReportingTaskNode reportingTask = fc.createReportingTask(MonitorMemory.class.getName());
|
||||
reportingTask.setScheduldingPeriod("1 sec");
|
||||
reportingTask.setSchedulingPeriod("1 sec");
|
||||
reportingTask.setProperty(MonitorMemory.MEMORY_POOL_PROPERTY.getName(), "PS Old Gen");
|
||||
reportingTask.setProperty(MonitorMemory.REPORTING_INTERVAL.getName(), "100 millis");
|
||||
reportingTask.setProperty(MonitorMemory.THRESHOLD_PROPERTY.getName(), threshold);
|
||||
|
|
|
@ -102,7 +102,7 @@ public class Criteria {
|
|||
throw new IllegalArgumentException("New rule order does not account for all known rules or contains unknown rules.");
|
||||
}
|
||||
|
||||
// create the new rule lookup - using a LinkedHashMap to perserve insertion order
|
||||
// create the new rule lookup - using a LinkedHashMap to preserve insertion order
|
||||
final Map<String, Rule> newRuleLookup = new LinkedHashMap<>();
|
||||
for (final String ruleId : newRuleOrder) {
|
||||
newRuleLookup.put(ruleId, rules.get(ruleId));
|
||||
|
|
|
@ -194,7 +194,7 @@
|
|||
|
||||
<p>
|
||||
Deleting attributes is a simple as providing a regular expression for attribute names to be deleted. This can be a simple regular expression that will
|
||||
match a single attribute or more complex regular expression to match a group of similarly named attributes or even seveal individual attribute names.
|
||||
match a single attribute or more complex regular expression to match a group of similarly named attributes or even several individual attribute names.
|
||||
</p>
|
||||
<ul>
|
||||
<li><strong>lastUser</strong> - will delete an attribute with the name "lastUser".
|
||||
|
|
|
@ -96,7 +96,7 @@ public class EventSubscribeXmlRenderingCallbackTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testUnuccessfulRender() {
|
||||
public void testUnsuccessfulRender() {
|
||||
String large = "abcde";
|
||||
handle = ConsumeWindowsEventLogTest.mockEventHandles(wEvtApi, kernel32, Arrays.asList(large)).get(0);
|
||||
eventSubscribeXmlRenderingCallback.onEvent(WEvtApi.EvtSubscribeNotifyAction.DELIVER, null, handle);
|
||||
|
|
Loading…
Reference in New Issue