mirror of https://github.com/apache/nifi.git
NIFI-3366 This closes #2332. Added parent/child flowfile relationship between the incoming flowfile and the files that are moved from the input directory to the output directory.
Updated to allow tests to check for evaluation of properties that support expression language. Fixed bug with changeOwner attempting to operate on original file rather than the moved/copied file. Added license header to MoveHDFSTest.java Added test for moving a directory of files that contains a subdir, ensuring non-recursive behavior Added to the description of the processor that it is non-recursive when a directory is used as input. Added RAT exclude for test resource .dotfile to pom.xml Signed-off-by: joewitt <joewitt@apache.org>
This commit is contained in:
parent
3731fbee88
commit
600586d6be
|
@ -66,4 +66,19 @@
|
|||
<artifactId>nifi-properties</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.rat</groupId>
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<excludes combine.children="append">
|
||||
<exclude>src/test/resources/testdata/.dotfile</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.concurrent.locks.Lock;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -61,7 +62,7 @@ import org.apache.nifi.util.StopWatch;
|
|||
* This processor renames files on HDFS.
|
||||
*/
|
||||
@Tags({"hadoop", "HDFS", "put", "move", "filesystem", "restricted", "moveHDFS"})
|
||||
@CapabilityDescription("Rename existing files on Hadoop Distributed File System (HDFS)")
|
||||
@CapabilityDescription("Rename existing files or a directory of files (non-recursive) on Hadoop Distributed File System (HDFS).")
|
||||
@ReadsAttribute(attribute = "filename", description = "The name of the file written to HDFS comes from the value of this attribute.")
|
||||
@WritesAttributes({
|
||||
@WritesAttribute(attribute = "filename", description = "The name of the file written to HDFS is stored in this attribute."),
|
||||
|
@ -70,7 +71,6 @@ import org.apache.nifi.util.StopWatch;
|
|||
public class MoveHDFS extends AbstractHadoopProcessor {
|
||||
|
||||
// static global
|
||||
public static final int MAX_WORKING_QUEUE_SIZE = 25000;
|
||||
public static final String REPLACE_RESOLUTION = "replace";
|
||||
public static final String IGNORE_RESOLUTION = "ignore";
|
||||
public static final String FAIL_RESOLUTION = "fail";
|
||||
|
@ -84,9 +84,6 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
public static final AllowableValue FAIL_RESOLUTION_AV = new AllowableValue(FAIL_RESOLUTION, FAIL_RESOLUTION,
|
||||
"Failing to rename a file routes to failure.");
|
||||
|
||||
public static final String BUFFER_SIZE_KEY = "io.file.buffer.size";
|
||||
public static final int BUFFER_SIZE_DEFAULT = 4096;
|
||||
|
||||
public static final String ABSOLUTE_HDFS_PATH_ATTRIBUTE = "absolute.hdfs.path";
|
||||
|
||||
// relationships
|
||||
|
@ -119,7 +116,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
|
||||
public static final PropertyDescriptor INPUT_DIRECTORY_OR_FILE = new PropertyDescriptor.Builder()
|
||||
.name("Input Directory or File")
|
||||
.description("The HDFS directory from which files should be read, or a single file to read")
|
||||
.description("The HDFS directory from which files should be read, or a single file to read.")
|
||||
.defaultValue("${path}").addValidator(StandardValidators.ATTRIBUTE_EXPRESSION_LANGUAGE_VALIDATOR)
|
||||
.expressionLanguageSupported(true).build();
|
||||
|
||||
|
@ -219,7 +216,6 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
session.transfer(parentFlowFile, REL_FAILURE);
|
||||
return;
|
||||
}
|
||||
session.remove(parentFlowFile);
|
||||
|
||||
List<Path> files = new ArrayList<Path>();
|
||||
|
||||
|
@ -243,7 +239,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
getLogger().warn("Could not add to processing queue due to {}", new Object[] { e });
|
||||
getLogger().warn("Could not add to processing queue due to {}", new Object[]{e.getMessage()}, e);
|
||||
} finally {
|
||||
queueLock.unlock();
|
||||
}
|
||||
|
@ -268,6 +264,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
filePathQueue.drainTo(files);
|
||||
if (files.isEmpty()) {
|
||||
// nothing to do!
|
||||
session.remove(parentFlowFile);
|
||||
context.yield();
|
||||
return;
|
||||
}
|
||||
|
@ -275,7 +272,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
queueLock.unlock();
|
||||
}
|
||||
|
||||
processBatchOfFiles(files, context, session);
|
||||
processBatchOfFiles(files, context, session, parentFlowFile);
|
||||
|
||||
queueLock.lock();
|
||||
try {
|
||||
|
@ -283,10 +280,14 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
} finally {
|
||||
queueLock.unlock();
|
||||
}
|
||||
|
||||
session.remove(parentFlowFile);
|
||||
}
|
||||
|
||||
protected void processBatchOfFiles(final List<Path> files, final ProcessContext context,
|
||||
final ProcessSession session) {
|
||||
final ProcessSession session, FlowFile parentFlowFile) {
|
||||
Preconditions.checkState(parentFlowFile != null, "No parent flowfile for this batch was provided");
|
||||
|
||||
// process the batch of files
|
||||
final Configuration conf = getConfiguration();
|
||||
final FileSystem hdfs = getFileSystem();
|
||||
|
@ -294,7 +295,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
|
||||
if (conf == null || ugi == null) {
|
||||
getLogger().error("Configuration or UserGroupInformation not configured properly");
|
||||
session.transfer(session.get(), REL_FAILURE);
|
||||
session.transfer(parentFlowFile, REL_FAILURE);
|
||||
context.yield();
|
||||
return;
|
||||
}
|
||||
|
@ -304,7 +305,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
ugi.doAs(new PrivilegedAction<Object>() {
|
||||
@Override
|
||||
public Object run() {
|
||||
FlowFile flowFile = session.create();
|
||||
FlowFile flowFile = session.create(parentFlowFile);
|
||||
try {
|
||||
final String originalFilename = file.getName();
|
||||
final Path configuredRootOutputDirPath = processorConfig.getOutputDirectory();
|
||||
|
@ -364,15 +365,13 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
break;// copy was successful
|
||||
}
|
||||
}
|
||||
Thread.sleep(200L);// try waiting to let whatever
|
||||
// might cause rename failure to
|
||||
// resolve
|
||||
Thread.sleep(200L);// try waiting to let whatever might cause rename failure to resolve
|
||||
}
|
||||
if (!moved) {
|
||||
throw new ProcessException("Could not move file " + file + " to its final filename");
|
||||
}
|
||||
|
||||
changeOwner(context, hdfs, file);
|
||||
changeOwner(context, hdfs, newFile);
|
||||
final String outputPath = newFile.toString();
|
||||
final String newFilename = newFile.getName();
|
||||
final String hdfsPath = newFile.getParent().toString();
|
||||
|
@ -419,7 +418,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
hdfs.setOwner(name, owner, group);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
getLogger().warn("Could not change owner or group of {} on HDFS due to {}", new Object[] { name, e });
|
||||
getLogger().warn("Could not change owner or group of {} on HDFS due to {}", new Object[]{name, e.getMessage()}, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -441,10 +440,7 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
for (final FileStatus file : hdfs.listStatus(inputPath)) {
|
||||
final Path canonicalFile = file.getPath();
|
||||
|
||||
if (!filesVisited.add(canonicalFile)) { // skip files we've
|
||||
// already
|
||||
// seen (may be looping
|
||||
// directory links)
|
||||
if (!filesVisited.add(canonicalFile)) { // skip files we've already seen (may be looping directory links)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -474,9 +470,9 @@ public class MoveHDFS extends AbstractHadoopProcessor {
|
|||
ProcessorConfiguration(final ProcessContext context) {
|
||||
conflictResolution = context.getProperty(CONFLICT_RESOLUTION).getValue();
|
||||
operation = context.getProperty(OPERATION).getValue();
|
||||
final String inputDirValue = context.getProperty(INPUT_DIRECTORY_OR_FILE).getValue();
|
||||
final String inputDirValue = context.getProperty(INPUT_DIRECTORY_OR_FILE).evaluateAttributeExpressions().getValue();
|
||||
inputRootDirPath = new Path(inputDirValue);
|
||||
final String outputDirValue = context.getProperty(OUTPUT_DIRECTORY).getValue();
|
||||
final String outputDirValue = context.getProperty(OUTPUT_DIRECTORY).evaluateAttributeExpressions().getValue();
|
||||
outputRootDirPath = new Path(outputDirValue);
|
||||
final String fileFilterRegex = context.getProperty(FILE_FILTER_REGEX).getValue();
|
||||
fileFilterPattern = (fileFilterRegex == null) ? null : Pattern.compile(fileFilterRegex);
|
||||
|
|
|
@ -1,15 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.nifi.processors.hadoop;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.nifi.components.ValidationResult;
|
||||
import org.apache.nifi.hadoop.KerberosProperties;
|
||||
import org.apache.nifi.processor.ProcessContext;
|
||||
|
@ -23,11 +30,23 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class MoveHDFSTest {
|
||||
|
||||
private static final String OUTPUT_DIRECTORY = "src/test/resources/testdataoutput";
|
||||
private static final String INPUT_DIRECTORY = "src/test/resources/testdata";
|
||||
private static final String DOT_FILE_PATH = "src/test/resources/testdata/.testfordotfiles";
|
||||
private static final String OUTPUT_DIRECTORY = "target/test-data-output";
|
||||
private static final String TEST_DATA_DIRECTORY = "src/test/resources/testdata";
|
||||
private static final String INPUT_DIRECTORY = "target/test-data-input";
|
||||
private NiFiProperties mockNiFiProperties;
|
||||
private KerberosProperties kerberosProperties;
|
||||
|
||||
|
@ -40,38 +59,13 @@ public class MoveHDFSTest {
|
|||
|
||||
@After
|
||||
public void teardown() {
|
||||
File inputDirectory = new File(INPUT_DIRECTORY);
|
||||
File outputDirectory = new File(OUTPUT_DIRECTORY);
|
||||
if (inputDirectory.exists()) {
|
||||
Assert.assertTrue("Could not delete input directory: " + inputDirectory, FileUtils.deleteQuietly(inputDirectory));
|
||||
}
|
||||
if (outputDirectory.exists()) {
|
||||
if (outputDirectory.isDirectory()) {
|
||||
moveFilesFromOutputDirectoryToInput();
|
||||
}
|
||||
outputDirectory.delete();
|
||||
}
|
||||
removeDotFile();
|
||||
}
|
||||
|
||||
private void removeDotFile() {
|
||||
File dotFile = new File(DOT_FILE_PATH);
|
||||
if (dotFile.exists()) {
|
||||
dotFile.delete();
|
||||
}
|
||||
}
|
||||
|
||||
private void moveFilesFromOutputDirectoryToInput() {
|
||||
File folder = new File(OUTPUT_DIRECTORY);
|
||||
for (File file : folder.listFiles()) {
|
||||
if (file.isFile()) {
|
||||
String path = file.getAbsolutePath();
|
||||
if(!path.endsWith(".crc")) {
|
||||
String newPath = path.replaceAll("testdataoutput", "testdata");
|
||||
File newFile = new File(newPath);
|
||||
if (!newFile.exists()) {
|
||||
file.renameTo(newFile);
|
||||
}
|
||||
} else {
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
Assert.assertTrue("Could not delete output directory: " + outputDirectory, FileUtils.deleteQuietly(outputDirectory));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,13 +108,13 @@ public class MoveHDFSTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testOnScheduledShouldRunCleanly() {
|
||||
public void testOnScheduledShouldRunCleanly() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.setValidateExpressionUsage(false);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
|
@ -128,15 +122,30 @@ public class MoveHDFSTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDotFileFilter() throws IOException {
|
||||
createDotFile();
|
||||
public void testDotFileFilterIgnore() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.IGNORE_DOTTED_FILES, "true");
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
Assert.assertEquals(7, flowFiles.size());
|
||||
Assert.assertTrue(new File(INPUT_DIRECTORY, ".dotfile").exists());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDotFileFilterInclude() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.IGNORE_DOTTED_FILES, "false");
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.setValidateExpressionUsage(false);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
|
@ -144,14 +153,14 @@ public class MoveHDFSTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testFileFilterRegex() {
|
||||
public void testFileFilterRegex() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.FILE_FILTER_REGEX, ".*\\.gz");
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.setValidateExpressionUsage(false);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
|
@ -159,22 +168,69 @@ public class MoveHDFSTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testSingleFileAsInput() {
|
||||
public void testSingleFileAsInputCopy() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY + "/randombytes-1");
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OPERATION, "copy");
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
Assert.assertEquals(1, flowFiles.size());
|
||||
Assert.assertTrue(new File(INPUT_DIRECTORY, "randombytes-1").exists());
|
||||
Assert.assertTrue(new File(OUTPUT_DIRECTORY, "randombytes-1").exists());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSingleFileAsInputMove() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY + "/randombytes-1");
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.setValidateExpressionUsage(false);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
Assert.assertEquals(1, flowFiles.size());
|
||||
Assert.assertFalse(new File(INPUT_DIRECTORY, "randombytes-1").exists());
|
||||
Assert.assertTrue(new File(OUTPUT_DIRECTORY, "randombytes-1").exists());
|
||||
}
|
||||
|
||||
private void createDotFile() throws IOException {
|
||||
File dotFile = new File(DOT_FILE_PATH);
|
||||
dotFile.createNewFile();
|
||||
@Test
|
||||
public void testDirectoryWithSubDirectoryAsInputMove() throws IOException {
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), new File(INPUT_DIRECTORY));
|
||||
File subdir = new File(INPUT_DIRECTORY, "subdir");
|
||||
FileUtils.copyDirectory(new File(TEST_DATA_DIRECTORY), subdir);
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.enqueue(new byte[0]);
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
Assert.assertEquals(7, flowFiles.size());
|
||||
Assert.assertTrue(new File(INPUT_DIRECTORY).exists());
|
||||
Assert.assertTrue(subdir.exists());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyInputDirectory() throws IOException {
|
||||
MoveHDFS proc = new TestableMoveHDFS(kerberosProperties);
|
||||
TestRunner runner = TestRunners.newTestRunner(proc);
|
||||
Files.createDirectories(Paths.get(INPUT_DIRECTORY));
|
||||
runner.setProperty(MoveHDFS.INPUT_DIRECTORY_OR_FILE, INPUT_DIRECTORY);
|
||||
runner.setProperty(MoveHDFS.OUTPUT_DIRECTORY, OUTPUT_DIRECTORY);
|
||||
runner.enqueue(new byte[0]);
|
||||
Assert.assertEquals(0, Files.list(Paths.get(INPUT_DIRECTORY)).count());
|
||||
runner.run();
|
||||
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(MoveHDFS.REL_SUCCESS);
|
||||
runner.assertAllFlowFilesTransferred(MoveHDFS.REL_SUCCESS);
|
||||
Assert.assertEquals(0, flowFiles.size());
|
||||
}
|
||||
|
||||
private static class TestableMoveHDFS extends MoveHDFS {
|
||||
|
|
Loading…
Reference in New Issue