NIFI-14108 Removed the instantiation of extraneous Object arrays vararg methods. (#9594)

Signed-off-by: Lucas Ottersbach <ottersbach@apache.org>
This commit is contained in:
dan-s1 2024-12-25 03:48:29 -05:00 committed by GitHub
parent 7051aca832
commit 8c2ceac6a4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 45 additions and 45 deletions

View File

@ -312,6 +312,6 @@ public abstract class GetFileTransfer extends AbstractProcessor {
}
getLogger().info("Obtained file listing in {} milliseconds; listing had {} items, {} of which were new",
new Object[]{millis, listing.size(), newItems});
millis, listing.size(), newItems);
}
}

View File

@ -371,7 +371,7 @@ public abstract class AbstractHadoopProcessor extends AbstractProcessor implemen
final Path workingDir = fs.getWorkingDirectory();
getLogger().info("Initialized a new HDFS File System with working dir: {} default block size: {} default replication: {} config: {}",
new Object[]{workingDir, fs.getDefaultBlockSize(workingDir), fs.getDefaultReplication(workingDir), config.toString()});
workingDir, fs.getDefaultBlockSize(workingDir), fs.getDefaultReplication(workingDir), config.toString());
return new HdfsResources(config, fs, ugi, kerberosUser);
}

View File

@ -132,7 +132,7 @@ public class RollbackOnFailure {
if (adjusted != null) {
if (logger.isDebugEnabled()) {
logger.debug("Adjusted {} to {} based on context rollbackOnFailure={}, processedCount={}, transactional={}",
new Object[]{t, adjusted, c.isRollbackOnFailure(), c.getProcessedCount(), c.isTransactional()});
t, adjusted, c.isRollbackOnFailure(), c.getProcessedCount(), c.isTransactional());
}
return adjusted;
}

View File

@ -377,20 +377,20 @@ public class MoveHDFS extends AbstractHadoopProcessor {
// Remove destination file (newFile) to replace
if (hdfs.delete(newFile, false)) {
getLogger().info("deleted {} in order to replace with the contents of {}",
new Object[]{newFile, flowFile});
newFile, flowFile);
}
break;
case IGNORE_RESOLUTION:
session.transfer(flowFile, REL_SUCCESS);
getLogger().info(
"transferring {} to success because file with same name already exists",
new Object[]{flowFile});
flowFile);
return null;
case FAIL_RESOLUTION:
session.transfer(session.penalize(flowFile), REL_FAILURE);
getLogger().warn(
"penalizing {} and routing to failure because file with same name already exists",
new Object[]{flowFile});
flowFile);
return null;
default:
break;

View File

@ -107,7 +107,7 @@ public class SequenceFileWriterImpl implements SequenceFileWriter {
}
});
logger.debug("Wrote Sequence File {} ({}).",
new Object[]{sequenceFilename, watch.calculateDataRate(flowFile.getSize())});
sequenceFilename, watch.calculateDataRate(flowFile.getSize()));
return sfFlowFile;
}

View File

@ -325,7 +325,7 @@ public class SiteToSiteProvenanceReportingTask extends AbstractSiteToSiteReporti
final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
getLogger().info("Successfully sent {} Provenance Events to destination in {} ms; Transaction ID = {}; First Event ID = {}",
new Object[] {events.size(), transferMillis, transactionId, events.get(0).getEventId()});
events.size(), transferMillis, transactionId, events.get(0).getEventId());
} catch (final Exception e) {
if (transaction != null) {
transaction.error();

View File

@ -188,7 +188,7 @@ public class SiteToSiteStatusReportingTask extends AbstractSiteToSiteReportingTa
final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
getLogger().info("Successfully sent {} Status Records to destination in {} ms; Transaction ID = {}",
new Object[]{jsonArray.size(), transferMillis, transactionId});
jsonArray.size(), transferMillis, transactionId);
fromIndex = toIndex;
toIndex = Math.min(fromIndex + batchSize, jsonArray.size());

View File

@ -188,7 +188,7 @@ public class PutSplunkHTTP extends SplunkAPICall {
// fall-through
default:
getLogger().error("Putting data into Splunk was not successful. Response with header {} was: {}",
new Object[] {responseMessage.getStatus(), IOUtils.toString(responseMessage.getContent(), "UTF-8")});
responseMessage.getStatus(), IOUtils.toString(responseMessage.getContent(), "UTF-8"));
}
} catch (final Exception e) {
getLogger().error("Error during communication with Splunk: {}", e.getMessage(), e);

View File

@ -170,7 +170,7 @@ public class QuerySplunkIndexingStatus extends SplunkAPICall {
if (!sentAt.isPresent() || !ackId.isPresent()) {
getLogger().error("Flow file ({}) attributes {} and {} are expected to be set using 64-bit integer values!",
new Object[]{flowFile.getId(), SplunkAPICall.RESPONDED_AT_ATTRIBUTE, SplunkAPICall.ACKNOWLEDGEMENT_ID_ATTRIBUTE});
flowFile.getId(), SplunkAPICall.RESPONDED_AT_ATTRIBUTE, SplunkAPICall.ACKNOWLEDGEMENT_ID_ATTRIBUTE);
session.transfer(flowFile, RELATIONSHIP_FAILURE);
} else {
undetermined.put(ackId.get(), flowFile);

View File

@ -436,7 +436,7 @@ public abstract class AbstractQueryDatabaseTable extends AbstractDatabaseFetchPr
sqlWriter.updateCounters(session);
logger.debug("{} contains {} records; transferring to 'success'",
new Object[]{fileToProcess, nrOfRows.get()});
fileToProcess, nrOfRows.get());
session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
resultSetFlowFiles.add(fileToProcess);

View File

@ -158,7 +158,7 @@ public class ConvertCharacterSet extends AbstractProcessor {
session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
logger.info("successfully converted characters from {} to {} for {}",
new Object[]{inputCharset, outputCharset, flowFile});
inputCharset, outputCharset, flowFile);
session.transfer(flowFile, REL_SUCCESS);
} catch (final Exception e) {
throw new ProcessException(e);

View File

@ -417,7 +417,7 @@ public class DebugFlow extends AbstractProcessor {
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
if (logger.isErrorEnabled()) {
logger.error("{} unexpected exception throwing DebugFlow exception: {}",
new Object[] {this, e});
this, e);
}
}
} else {
@ -461,8 +461,8 @@ public class DebugFlow extends AbstractProcessor {
if (flowFileCurrSuccess < flowFileMaxSuccess) {
flowFileCurrSuccess += 1;
logger.info("DebugFlow transferring to success file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
session.transfer(ff, REL_SUCCESS);
break;
} else {
@ -475,8 +475,8 @@ public class DebugFlow extends AbstractProcessor {
if (flowFileCurrFailure < flowFileMaxFailure) {
flowFileCurrFailure += 1;
logger.info("DebugFlow transferring to failure file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
session.transfer(ff, REL_FAILURE);
break;
} else {
@ -489,8 +489,8 @@ public class DebugFlow extends AbstractProcessor {
if (flowFileCurrRollback < flowFileMaxRollback) {
flowFileCurrRollback += 1;
logger.info("DebugFlow rolling back (no penalty) file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
session.rollback();
break;
} else {
@ -503,8 +503,8 @@ public class DebugFlow extends AbstractProcessor {
if (flowFileCurrYield < flowFileMaxYield) {
flowFileCurrYield += 1;
logger.info("DebugFlow yielding file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
session.rollback();
context.yield();
return;
@ -518,8 +518,8 @@ public class DebugFlow extends AbstractProcessor {
if (flowFileCurrPenalty < flowFileMaxPenalty) {
flowFileCurrPenalty += 1;
logger.info("DebugFlow rolling back (with penalty) file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
session.rollback(true);
break;
} else {
@ -533,8 +533,8 @@ public class DebugFlow extends AbstractProcessor {
flowFileCurrException += 1;
String message = "forced by " + this.getClass().getName();
logger.info("DebugFlow throwing NPE file={} UUID={}",
new Object[] {ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key())});
ff.getAttribute(CoreAttributes.FILENAME.key()),
ff.getAttribute(CoreAttributes.UUID.key()));
RuntimeException rte;
try {
rte = flowFileExceptionClass.getConstructor(String.class).newInstance(message);
@ -542,7 +542,7 @@ public class DebugFlow extends AbstractProcessor {
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
if (logger.isErrorEnabled()) {
logger.error("{} unexpected exception throwing DebugFlow exception: {}",
new Object[] {this, e});
this, e);
}
}
} else {

View File

@ -251,7 +251,7 @@ public class FetchFile extends AbstractProcessor {
Path filePath = file.toPath();
if (!Files.exists(filePath) && !Files.notExists(filePath)) { // see https://docs.oracle.com/javase/tutorial/essential/io/check.html for more details
getLogger().log(levelFileNotFound, "Could not fetch file {} from file system for {} because the existence of the file cannot be verified; routing to failure",
new Object[] {file, flowFile});
file, flowFile);
session.transfer(session.penalize(flowFile), REL_FAILURE);
return;
} else if (!Files.exists(filePath)) {
@ -265,7 +265,7 @@ public class FetchFile extends AbstractProcessor {
final String user = System.getProperty("user.name");
if (!isReadable(file)) {
getLogger().log(levelPermDenied, "Could not fetch file {} from file system for {} due to user {} not having sufficient permissions to read the file; routing to permission.denied",
new Object[] {file, flowFile, user});
file, flowFile, user);
session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED);
session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED);
return;
@ -281,7 +281,7 @@ public class FetchFile extends AbstractProcessor {
if (targetDir.exists() && (!isWritable(targetDir) || !isDirectory(targetDir))) {
getLogger().error("Could not fetch file {} from file system for {} because Completion Strategy is configured to move the original file to {}, "
+ "but that is not a directory or user {} does not have permissions to write to that directory",
new Object[] {file, flowFile, targetDir, user});
file, flowFile, targetDir, user);
session.transfer(flowFile, REL_FAILURE);
return;
}
@ -305,7 +305,7 @@ public class FetchFile extends AbstractProcessor {
if (targetFile.exists()) {
getLogger().error("Could not fetch file {} from file system for {} because Completion Strategy is configured to move the original file to {}, "
+ "but a file with name {} already exists in that directory and the Move Conflict Strategy is configured for failure",
new Object[] {file, flowFile, targetDir, file.getName()});
file, flowFile, targetDir, file.getName());
session.transfer(flowFile, REL_FAILURE);
return;
}

View File

@ -148,7 +148,7 @@ public class HandleHttpResponse extends AbstractProcessor {
final HttpServletResponse response = contextMap.getResponse(contextIdentifier);
if (response == null) {
getLogger().error("Failed to respond to HTTP request for {} because FlowFile had an '{}' attribute of {} but could not find an HTTP Response Object for this identifier",
new Object[]{flowFile, HTTPUtils.HTTP_CONTEXT_ID, contextIdentifier});
flowFile, HTTPUtils.HTTP_CONTEXT_ID, contextIdentifier);
session.transfer(flowFile, REL_FAILURE);
return;
}

View File

@ -1058,7 +1058,7 @@ public class ListFile extends AbstractListProcessor<FileInfo> {
if (duration > maxDiskOperationMillis) {
final String fullPath = getFullPath();
logger.warn("This Processor completed action {} on {} in {} milliseconds, which exceeds the configured threshold of {} milliseconds",
new Object[] {operation, fullPath, duration, maxDiskOperationMillis});
operation, fullPath, duration, maxDiskOperationMillis);
}
if (logger.isTraceEnabled()) {
@ -1332,7 +1332,7 @@ public class ListFile extends AbstractListProcessor<FileInfo> {
}
logger.warn("This Processor has currently spent {} milliseconds performing the {} action on {}, which exceeds the configured threshold of {} milliseconds",
new Object[] {activeTime, activeOperation.getOperation(), fullPath, maxDiskOperationMillis});
activeTime, activeOperation.getOperation(), fullPath, maxDiskOperationMillis);
}
}
@ -1347,7 +1347,7 @@ public class ListFile extends AbstractListProcessor<FileInfo> {
if (activeMillis > maxListingMillis) {
final String fullPath = activeDirectory.isEmpty() ? "the base directory" : activeDirectory;
logger.warn("This processor has currently spent {} milliseconds performing the listing of {}, which exceeds the configured threshold of {} milliseconds",
new Object[] {activeMillis, fullPath, maxListingMillis});
activeMillis, fullPath, maxListingMillis);
}
}
}

View File

@ -818,7 +818,7 @@ public class MergeContent extends BinFiles {
tarEntry.setMode(Integer.parseInt(permissionsVal));
} catch (final Exception e) {
getLogger().debug("Attribute {} of {} is set to {}; expected 3 digits between 0-7, so ignoring",
new Object[] {TAR_PERMISSIONS_ATTRIBUTE, flowFile, permissionsVal});
TAR_PERMISSIONS_ATTRIBUTE, flowFile, permissionsVal);
}
}
@ -829,7 +829,7 @@ public class MergeContent extends BinFiles {
tarEntry.setModTime(Instant.parse(modTime).toEpochMilli());
} catch (final Exception e) {
getLogger().debug("Attribute {} of {} is set to {}; expected ISO8601 format, so ignoring",
new Object[] {TAR_MODIFIED_TIME, flowFile, modTime});
TAR_MODIFIED_TIME, flowFile, modTime);
}
}
@ -1037,7 +1037,7 @@ public class MergeContent extends BinFiles {
// check that we're appending to the same schema
if (!schema.get().equals(reader.getSchema())) {
getLogger().debug("Input file {} has different schema - {}, not merging",
new Object[] {flowFile.getId(), reader.getSchema().getName()});
flowFile.getId(), reader.getSchema().getName());
canMerge = false;
unmerged.add(flowFile);
}
@ -1053,7 +1053,7 @@ public class MergeContent extends BinFiles {
// Ignore additional metadata if ALL_COMMON is the strategy, otherwise don't merge
if (!METADATA_STRATEGY_ALL_COMMON.getValue().equals(metadataStrategy) || writersMetadatum != null) {
getLogger().debug("Input file {} has different non-reserved metadata, not merging",
new Object[] {flowFile.getId()});
flowFile.getId());
canMerge = false;
unmerged.add(flowFile);
}
@ -1069,7 +1069,7 @@ public class MergeContent extends BinFiles {
}
if (!inputCodec.get().equals(thisCodec)) {
getLogger().debug("Input file {} has different codec, not merging",
new Object[] {flowFile.getId()});
flowFile.getId());
canMerge = false;
unmerged.add(flowFile);
}

View File

@ -115,7 +115,7 @@ public class ContentAcknowledgmentServlet extends HttpServlet {
logger.info("received {} files/{} bytes from Remote Host: [{}] Port [{}] SubjectDN [{}] in {} milliseconds at a rate of {}; "
+ "transferring to 'success': {}",
new Object[]{flowFiles.size(), totalFlowFileSize, request.getRemoteHost(), request.getRemotePort(), foundSubject, transferTime, transferRate, flowFiles});
flowFiles.size(), totalFlowFileSize, request.getRemoteHost(), request.getRemotePort(), foundSubject, transferTime, transferRate, flowFiles);
final String sendingSubject = foundSubject;
final ProcessSession session = timeWrapper.getSession();
@ -126,13 +126,13 @@ public class ContentAcknowledgmentServlet extends HttpServlet {
response.flushBuffer();
} catch (final Exception e) {
logger.error("Received DELETE for HOLD with ID {} from Remote Host: [{}] Port [{}] SubjectDN [{}]. FlowFiles were released but failed to acknowledge them.",
new Object[]{uuid, request.getRemoteHost(), request.getRemotePort(), sendingSubject, e.toString()});
uuid, request.getRemoteHost(), request.getRemotePort(), sendingSubject, e.toString());
}
});
} catch (final Throwable t) {
timeWrapper.getSession().rollback();
logger.error("Received DELETE for HOLD with ID {} from Remote Host: [{}] Port [{}] SubjectDN [{}], but failed to process the request due to {}",
new Object[]{uuid, request.getRemoteHost(), request.getRemotePort(), foundSubject, t.toString()});
uuid, request.getRemoteHost(), request.getRemotePort(), foundSubject, t.toString());
if (logger.isDebugEnabled()) {
logger.error("", t);
}

View File

@ -189,7 +189,7 @@ public class StandardHttpContextMap extends AbstractControllerService implements
final AsyncContext async = entry.getValue().getAsync();
getLogger().warn("Request from {} timed out; responding with SERVICE_UNAVAILABLE",
new Object[]{async.getRequest().getRemoteAddr()});
async.getRequest().getRemoteAddr());
((HttpServletResponse) async.getResponse()).sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "Timeout occurred");
async.complete();

View File

@ -372,7 +372,7 @@ public class RestLookupService extends AbstractControllerService implements Reco
if (getLogger().isDebugEnabled()) {
getLogger().debug("Response code {} was returned for coordinate {}",
new Object[]{response.code(), coordinates});
response.code(), coordinates);
}
if (!response.isSuccessful()