NIFI-10: Updated FetchHDFS, FetchFileTransfer to use new FETCH provenance event

This commit is contained in:
Mark Payne 2015-10-26 17:14:29 -04:00
parent 17006335e5
commit 1c1738670c
2 changed files with 2 additions and 3 deletions

View File

@ -111,7 +111,7 @@ public class FetchHDFS extends AbstractHadoopProcessor {
flowFile = session.importFrom(inStream, flowFile);
stopWatch.stop();
getLogger().info("Successfully received content from {} for {} in {}", new Object[] {uri, flowFile, stopWatch.getDuration()});
session.getProvenanceReporter().modifyContent(flowFile, "Fetched content from " + uri, stopWatch.getDuration(TimeUnit.MILLISECONDS));
session.getProvenanceReporter().fetch(flowFile, uri.toString(), stopWatch.getDuration(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
} catch (final FileNotFoundException | AccessControlException e) {
getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure", new Object[] {uri, flowFile, e});

View File

@ -278,8 +278,7 @@ public abstract class FetchFileTransfer extends AbstractProcessor {
flowFile = session.putAllAttributes(flowFile, attributes);
// emit provenance event and transfer FlowFile
session.getProvenanceReporter().modifyContent(flowFile, "Content replaced with content from " + protocolName + "://" + host + ":" + port + "/" + filename,
stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
// it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where