mirror of https://github.com/apache/nifi.git
NIFI-9360 Update PutHDFS to handle filesystems which do not support getAclStatus()
This closes #5505 Signed-off-by: David Handermann <exceptionfactory@apache.org>
This commit is contained in:
parent
f01fb17555
commit
362a243e0f
|
@ -21,6 +21,7 @@ import com.github.benmanes.caffeine.cache.Caffeine;
|
|||
import com.google.common.base.Throwables;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
|
@ -280,7 +281,6 @@ public class PutHDFS extends AbstractHadoopProcessor {
|
|||
FlowFile putFlowFile = flowFile;
|
||||
try {
|
||||
final Path dirPath = getNormalizedPath(context, DIRECTORY, putFlowFile);
|
||||
checkAclStatus(getAclStatus(dirPath));
|
||||
final String conflictResponse = context.getProperty(CONFLICT_RESOLUTION).getValue();
|
||||
final long blockSize = getBlockSize(context, session, putFlowFile, dirPath);
|
||||
final int bufferSize = getBufferSize(context, session, putFlowFile);
|
||||
|
@ -298,14 +298,22 @@ public class PutHDFS extends AbstractHadoopProcessor {
|
|||
// Create destination directory if it does not exist
|
||||
boolean targetDirCreated = false;
|
||||
try {
|
||||
if (!hdfs.getFileStatus(dirPath).isDirectory()) {
|
||||
final FileStatus fileStatus = hdfs.getFileStatus(dirPath);
|
||||
if (!fileStatus.isDirectory()) {
|
||||
throw new IOException(dirPath.toString() + " already exists and is not a directory");
|
||||
}
|
||||
if (fileStatus.hasAcl()) {
|
||||
checkAclStatus(getAclStatus(dirPath));
|
||||
}
|
||||
} catch (FileNotFoundException fe) {
|
||||
targetDirCreated = hdfs.mkdirs(dirPath);
|
||||
if (!targetDirCreated) {
|
||||
throw new IOException(dirPath.toString() + " could not be created");
|
||||
}
|
||||
final FileStatus fileStatus = hdfs.getFileStatus(dirPath);
|
||||
if (fileStatus.hasAcl()) {
|
||||
checkAclStatus(getAclStatus(dirPath));
|
||||
}
|
||||
changeOwner(context, hdfs, dirPath, flowFile);
|
||||
}
|
||||
|
||||
|
@ -463,7 +471,7 @@ public class PutHDFS extends AbstractHadoopProcessor {
|
|||
return aclCache.get(dirPath, fn -> {
|
||||
try {
|
||||
return hdfs.getAclStatus(dirPath);
|
||||
} catch (IOException e) {
|
||||
} catch (final IOException e) {
|
||||
throw new UncheckedIOException(String.format("Unable to query ACL for directory [%s]", dirPath), e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -730,7 +730,7 @@ public class PutHDFSTest {
|
|||
}
|
||||
|
||||
private FileStatus newDir(Path p) {
|
||||
return new FileStatus(1L, true, 3, 128 * 1024 * 1024, 1523456000000L, 1523457000000L, perms((short) 0755), "owner", "group", p);
|
||||
return new FileStatus(1L, true, 3, 128 * 1024 * 1024, 1523456000000L, 1523457000000L, perms((short) 0755), "owner", "group", (Path)null, p, true, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Loading…
Reference in New Issue