NIFI-4387 Adding record tag to Parquet processors

Signed-off-by: Pierre Villard <pierre.villard.fr@gmail.com>

This closes #2156.
This commit is contained in:
Bryan Bende 2017-09-15 14:17:43 -04:00 committed by Pierre Villard
parent eac47e90cb
commit 66479464be
2 changed files with 2 additions and 2 deletions

View File

@ -39,7 +39,7 @@ import java.io.IOException;
@SupportsBatching
@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
@Tags({"parquet", "hadoop", "HDFS", "get", "ingest", "fetch", "source", "restricted"})
@Tags({"parquet", "hadoop", "HDFS", "get", "ingest", "fetch", "source", "restricted", "record"})
@CapabilityDescription("Reads from a given Parquet file and writes records to the content of the flow file using " +
"the selected record writer. The original Parquet file will remain unchanged, and the content of the flow file " +
"will be replaced with records of the selected type. This processor can be used with ListHDFS or ListFile to obtain " +

View File

@ -53,7 +53,7 @@ import java.util.Collections;
import java.util.List;
@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
@Tags({"put", "parquet", "hadoop", "HDFS", "filesystem", "restricted"})
@Tags({"put", "parquet", "hadoop", "HDFS", "filesystem", "restricted", "record"})
@CapabilityDescription("Reads records from an incoming FlowFile using the provided Record Reader, and writes those records " +
"to a Parquet file. The schema for the Parquet file must be provided in the processor properties. This processor will " +
"first write a temporary dot file and upon successfully writing every record to the dot file, it will rename the " +