diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 6e82543ca85..0453ca14537 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -60,6 +60,8 @@ import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /** * This class provides an interface for implementors of a Hadoop file system * (analogous to the VFS of Unix). Applications do not access this class; @@ -72,7 +74,7 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.Public @InterfaceStability.Stable -public abstract class AbstractFileSystem { +public abstract class AbstractFileSystem implements PathCapabilities { static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class); /** Recording statistics per a file system class. */ @@ -1371,4 +1373,16 @@ public abstract class AbstractFileSystem { new CompletableFuture<>(), () -> open(path, bufferSize)); } + public boolean hasPathCapability(final Path path, + final String capability) + throws IOException { + switch (validatePathCapabilityArgs(makeQualified(path), capability)) { + case CommonPathCapabilities.FS_SYMLINKS: + // delegate to the existing supportsSymlinks() call. + return supportsSymlinks(); + default: + // the feature is not implemented. + return false; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 99aa5d22bab..5e5d29a28bf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Locale; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -42,6 +43,8 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /**************************************************************** * Abstract Checksumed FileSystem. * It provide a basic implementation of a Checksumed FileSystem, @@ -872,4 +875,23 @@ public abstract class ChecksumFileSystem extends FilterFileSystem { public FSDataOutputStreamBuilder appendFile(Path path) { return createDataOutputStreamBuilder(this, path).append(); } + + /** + * Disable those operations which the checksummed FS blocks. + * {@inheritDoc} + */ + @Override + public boolean hasPathCapability(final Path path, final String capability) + throws IOException { + // query the superclass, which triggers argument validation. + final Path p = makeQualified(path); + switch (validatePathCapabilityArgs(p, capability)) { + case CommonPathCapabilities.FS_APPEND: + case CommonPathCapabilities.FS_CONCAT: + return false; + default: + return super.hasPathCapability(p, capability); + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java new file mode 100644 index 00000000000..31e6bac0cce --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +/** + * Common path capabilities. + */ +public final class CommonPathCapabilities { + + private CommonPathCapabilities() { + } + + /** + * Does the store support + * {@code FileSystem.setAcl(Path, List)}, + * {@code FileSystem.getAclStatus(Path)} + * and related methods? + * Value: {@value}. + */ + public static final String FS_ACLS = "fs.capability.paths.acls"; + + /** + * Does the store support {@code FileSystem.append(Path)}? + * Value: {@value}. + */ + public static final String FS_APPEND = "fs.capability.paths.append"; + + /** + * Does the store support {@code FileSystem.getFileChecksum(Path)}? + * Value: {@value}. + */ + public static final String FS_CHECKSUMS = "fs.capability.paths.checksums"; + + /** + * Does the store support {@code FileSystem.concat(Path, Path[])}? + * Value: {@value}. + */ + public static final String FS_CONCAT = "fs.capability.paths.concat"; + + /** + * Does the store support {@code FileSystem.listCorruptFileBlocks(Path)} ()}? + * Value: {@value}. + */ + public static final String FS_LIST_CORRUPT_FILE_BLOCKS = + "fs.capability.paths.list-corrupt-file-blocks"; + + /** + * Does the store support + * {@code FileSystem.createPathHandle(FileStatus, Options.HandleOpt...)} + * and related methods? + * Value: {@value}. + */ + public static final String FS_PATHHANDLES = "fs.capability.paths.pathhandles"; + + /** + * Does the store support {@code FileSystem.setPermission(Path, FsPermission)} + * and related methods? + * Value: {@value}. + */ + public static final String FS_PERMISSIONS = "fs.capability.paths.permissions"; + + /** + * Does this filesystem connector only support filesystem read operations? + * For example, the {@code HttpFileSystem} is always read-only. + * This is different from "is the specific instance and path read only?", + * which must be determined by checking permissions (where supported), or + * attempting write operations under a path. + * Value: {@value}. + */ + public static final String FS_READ_ONLY_CONNECTOR = + "fs.capability.paths.read-only-connector"; + + /** + * Does the store support snapshots through + * {@code FileSystem.createSnapshot(Path)} and related methods?? + * Value: {@value}. + */ + public static final String FS_SNAPSHOTS = "fs.capability.paths.snapshots"; + + /** + * Does the store support {@code FileSystem.setStoragePolicy(Path, String)} + * and related methods? + * Value: {@value}. + */ + public static final String FS_STORAGEPOLICY = + "fs.capability.paths.storagepolicy"; + + /** + * Does the store support symlinks through + * {@code FileSystem.createSymlink(Path, Path, boolean)} and related methods? + * Value: {@value}. + */ + public static final String FS_SYMLINKS = + "fs.capability.paths.symlinks"; + + /** + * Does the store support {@code FileSystem#truncate(Path, long)} ? + * Value: {@value}. + */ + public static final String FS_TRUNCATE = + "fs.capability.paths.truncate"; + + /** + * Does the store support XAttributes through + * {@code FileSystem#.setXAttr()} and related methods? + * Value: {@value}. + */ + public static final String FS_XATTRS = "fs.capability.paths.xattrs"; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java index 165c56c3d5c..a8f294f3791 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java @@ -281,4 +281,11 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem { int bufferSize) throws IOException { return fsImpl.openFileWithOptions(path, mandatoryKeys, options, bufferSize); } + + @Override + public boolean hasPathCapability(final Path path, + final String capability) + throws IOException { + return fsImpl.hasPathCapability(path, capability); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index f65074856bf..b2c1369a9c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -46,6 +46,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl; +import org.apache.hadoop.fs.impl.FsLinkResolution; +import org.apache.hadoop.fs.impl.PathCapabilitiesSupport; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -68,6 +70,8 @@ import org.apache.htrace.core.Tracer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /** * The FileContext class provides an interface for users of the Hadoop * file system. It exposes a number of file system operations, e.g. create, @@ -171,7 +175,7 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Public @InterfaceStability.Stable -public class FileContext { +public class FileContext implements PathCapabilities { public static final Logger LOG = LoggerFactory.getLogger(FileContext.class); /** @@ -2934,4 +2938,21 @@ public class FileContext { }.resolve(FileContext.this, absF); } } + + /** + * Return the path capabilities of the bonded {@code AbstractFileSystem}. + * @param path path to query the capability of. + * @param capability string to query the stream support for. + * @return true iff the capability is supported under that FS. + * @throws IOException path resolution or other IO failure + * @throws IllegalArgumentException invalid arguments + */ + public boolean hasPathCapability(Path path, String capability) + throws IOException { + validatePathCapabilityArgs(path, capability); + return FsLinkResolution.resolve(this, + fixRelativePart(path), + (fs, p) -> fs.hasPathCapability(p, capability)); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index c3be2f21a57..4e9f172a4c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -88,6 +88,7 @@ import org.slf4j.LoggerFactory; import static com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; /**************************************************************** * An abstract base class for a fairly generic filesystem. It @@ -134,7 +135,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; @InterfaceAudience.Public @InterfaceStability.Stable public abstract class FileSystem extends Configured - implements Closeable, DelegationTokenIssuer { + implements Closeable, DelegationTokenIssuer, PathCapabilities { public static final String FS_DEFAULT_NAME_KEY = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY; public static final String DEFAULT_FS = @@ -720,6 +721,7 @@ public abstract class FileSystem extends Configured * */ protected void checkPath(Path path) { + Preconditions.checkArgument(path != null, "null path"); URI uri = path.toUri(); String thatScheme = uri.getScheme(); if (thatScheme == null) // fs is relative @@ -3259,6 +3261,25 @@ public abstract class FileSystem extends Configured return ret; } + /** + * The base FileSystem implementation generally has no knowledge + * of the capabilities of actual implementations. + * Unless it has a way to explicitly determine the capabilities, + * this method returns false. + * {@inheritDoc} + */ + public boolean hasPathCapability(final Path path, final String capability) + throws IOException { + switch (validatePathCapabilityArgs(makeQualified(path), capability)) { + case CommonPathCapabilities.FS_SYMLINKS: + // delegate to the existing supportsSymlinks() call. + return supportsSymlinks() && areSymlinksEnabled(); + default: + // the feature is not implemented. + return false; + } + } + // making it volatile to be able to do a double checked locking private volatile static boolean FILE_SYSTEMS_LOADED = false; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index e05c574063f..3bc3cb2e9b0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -729,4 +729,11 @@ public class FilterFileSystem extends FileSystem { return fs.openFileWithOptions(pathHandle, mandatoryKeys, options, bufferSize); } + + @Override + public boolean hasPathCapability(final Path path, final String capability) + throws IOException { + return fs.hasPathCapability(path, capability); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index f5430d60261..731a52a7b41 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -446,4 +446,9 @@ public abstract class FilterFs extends AbstractFileSystem { return myFs.openFileWithOptions(path, mandatoryKeys, options, bufferSize); } + public boolean hasPathCapability(final Path path, + final String capability) + throws IOException { + return myFs.hasPathCapability(path, capability); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java index f7da819ed6c..5f4c4a236e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java @@ -36,6 +36,8 @@ import java.net.URISyntaxException; import java.net.URLDecoder; import java.util.*; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /** * This is an implementation of the Hadoop Archive * Filesystem. This archive Filesystem has index files @@ -899,7 +901,22 @@ public class HarFileSystem extends FileSystem { throws IOException { throw new IOException("Har: setPermission not allowed"); } - + + /** + * Declare that this filesystem connector is always read only. + * {@inheritDoc} + */ + @Override + public boolean hasPathCapability(final Path path, final String capability) + throws IOException { + switch (validatePathCapabilityArgs(path, capability)) { + case CommonPathCapabilities.FS_READ_ONLY_CONNECTOR: + return true; + default: + return false; + } + } + /** * Hadoop archives input stream. This input stream fakes EOF * since archive files are part of bigger part files. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java new file mode 100644 index 00000000000..d3492568f46 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; + +/** + * The Path counterpoint to {@link StreamCapabilities}; a query to see if, + * a FileSystem/FileContext instance has a specific capability under the given + * path. + * Other classes may also implement the interface, as desired. + * + * See {@link CommonPathCapabilities} for the well-known capabilities. + */ +public interface PathCapabilities { + + /** + * Probe for a specific capability under the given path. + * If the function returns {@code true}, this instance is explicitly + * declaring that the capability is available. + * If the function returns {@code false}, it can mean one of: + *
+ * Implementors: {@link org.apache.hadoop.fs.impl.PathCapabilitiesSupport}
+ * can be used to help implement this method.
+ * @param path path to query the capability of.
+ * @param capability non-null, non-empty string to query the path for support.
+ * @return true if the capability is supported under that part of the FS.
+ * @throws IOException this should not be raised, except on problems
+ * resolving paths or relaying the call.
+ * @throws IllegalArgumentException invalid arguments
+ */
+ boolean hasPathCapability(Path path, String capability)
+ throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index bd003ae90ab..cf2210575da 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -53,6 +53,8 @@ import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
/****************************************************************
* Implement the FileSystem API for the raw local filesystem.
*
@@ -1060,4 +1062,21 @@ public class RawLocalFileSystem extends FileSystem {
// return an unqualified symlink target
return fi.getSymlink();
}
+
+ @Override
+ public boolean hasPathCapability(final Path path, final String capability)
+ throws IOException {
+ switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+ case CommonPathCapabilities.FS_APPEND:
+ case CommonPathCapabilities.FS_CONCAT:
+ case CommonPathCapabilities.FS_PATHHANDLES:
+ case CommonPathCapabilities.FS_PERMISSIONS:
+ case CommonPathCapabilities.FS_TRUNCATE:
+ return true;
+ case CommonPathCapabilities.FS_SYMLINKS:
+ return FileSystem.areSymlinksEnabled();
+ default:
+ return super.hasPathCapability(path, capability);
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
index fa0b2cf6c31..baf0a8187ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.http;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -36,6 +37,8 @@ import java.io.InputStream;
import java.net.URI;
import java.net.URLConnection;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
abstract class AbstractHttpFileSystem extends FileSystem {
private static final long DEFAULT_BLOCK_SIZE = 4096;
private static final Path WORKING_DIR = new Path("/");
@@ -111,6 +114,21 @@ abstract class AbstractHttpFileSystem extends FileSystem {
return new FileStatus(-1, false, 1, DEFAULT_BLOCK_SIZE, 0, path);
}
+ /**
+ * Declare that this filesystem connector is always read only.
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean hasPathCapability(final Path path, final String capability)
+ throws IOException {
+ switch (validatePathCapabilityArgs(path, capability)) {
+ case CommonPathCapabilities.FS_READ_ONLY_CONNECTOR:
+ return true;
+ default:
+ return super.hasPathCapability(path, capability);
+ }
+ }
+
private static class HttpDataInputStream extends FilterInputStream
implements Seekable, PositionedReadable {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java
new file mode 100644
index 00000000000..f5ef8c49233
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import java.io.IOException;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FSLinkResolver;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+
+/**
+ * Class to allow Lambda expressions to be used in {@link FileContext}
+ * link resolution.
+ * @param
@@ -1561,4 +1565,30 @@ public class HttpFSFileSystem extends FileSystem
return JsonUtilClient.toSnapshottableDirectoryList(json);
}
+ /**
+ * This filesystem's capabilities must be in sync with that of
+ * {@code DistributedFileSystem.hasPathCapability()} except
+ * where the feature is not exposed (e.g. symlinks).
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean hasPathCapability(final Path path, final String capability)
+ throws IOException {
+ // query the superclass, which triggers argument validation.
+ final Path p = makeQualified(path);
+ switch (validatePathCapabilityArgs(p, capability)) {
+ case CommonPathCapabilities.FS_ACLS:
+ case CommonPathCapabilities.FS_APPEND:
+ case CommonPathCapabilities.FS_CONCAT:
+ case CommonPathCapabilities.FS_PERMISSIONS:
+ case CommonPathCapabilities.FS_SNAPSHOTS:
+ case CommonPathCapabilities.FS_STORAGEPOLICY:
+ case CommonPathCapabilities.FS_XATTRS:
+ return true;
+ case CommonPathCapabilities.FS_SYMLINKS:
+ return false;
+ default:
+ return super.hasPathCapability(p, capability);
+ }
+ }
}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index bdffed4b254..a60f9af2a93 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -497,17 +497,19 @@ class S3ABlockOutputStream extends OutputStream implements
* @param capability string to query the stream support for.
* @return true if the capability is supported by this instance.
*/
+ @SuppressWarnings("deprecation")
@Override
public boolean hasCapability(String capability) {
switch (capability.toLowerCase(Locale.ENGLISH)) {
// does the output stream have delayed visibility
case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT:
+ case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT_OLD:
return !putTracker.outputImmediatelyVisible();
// The flush/sync options are absolutely not supported
- case "hflush":
- case "hsync":
+ case StreamCapabilities.HFLUSH:
+ case StreamCapabilities.HSYNC:
return false;
default:
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 0747be2d7e4..159505b055b 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -36,7 +36,6 @@ import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.List;
-import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
@@ -91,6 +90,7 @@ import org.apache.commons.lang3.tuple.Triple;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -152,6 +152,7 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.SemaphoredDelegatingExecutor;
import static org.apache.hadoop.fs.impl.AbstractFSBuilderImpl.rejectUnknownMandatoryKeys;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
import static org.apache.hadoop.fs.s3a.Constants.*;
import static org.apache.hadoop.fs.s3a.Invoker.*;
import static org.apache.hadoop.fs.s3a.S3AUtils.*;
@@ -4084,17 +4085,15 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
return instrumentation.newCommitterStatistics();
}
- /**
- * Return the capabilities of this filesystem instance.
- * @param capability string to query the stream support for.
- * @return whether the FS instance has the capability.
- */
+ @SuppressWarnings("deprecation")
@Override
- public boolean hasCapability(String capability) {
-
- switch (capability.toLowerCase(Locale.ENGLISH)) {
+ public boolean hasPathCapability(final Path path, final String capability)
+ throws IOException {
+ final Path p = makeQualified(path);
+ switch (validatePathCapabilityArgs(p, capability)) {
case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER:
+ case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER_OLD:
// capability depends on FS configuration
return isMagicCommitEnabled();
@@ -4102,7 +4101,31 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
// select is only supported if enabled
return selectBinding.isEnabled();
+ case CommonPathCapabilities.FS_CHECKSUMS:
+ // capability depends on FS configuration
+ return getConf().getBoolean(ETAG_CHECKSUM_ENABLED,
+ ETAG_CHECKSUM_ENABLED_DEFAULT);
+
default:
+ return super.hasPathCapability(p, capability);
+ }
+ }
+
+ /**
+ * Return the capabilities of this filesystem instance.
+ *
+ * This has been supplanted by {@link #hasPathCapability(Path, String)}.
+ * @param capability string to query the stream support for.
+ * @return whether the FS instance has the capability.
+ */
+ @Deprecated
+ @Override
+ public boolean hasCapability(String capability) {
+ try {
+ return hasPathCapability(workingDir, capability);
+ } catch (IOException ex) {
+ // should never happen, so log and downgrade.
+ LOG.debug("Ignoring exception on hasCapability({}})", capability, ex);
return false;
}
}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
index 877433bab2a..c9b0337bcb2 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
@@ -78,14 +78,32 @@ public final class CommitConstants {
* Value: {@value}.
*/
public static final String STREAM_CAPABILITY_MAGIC_OUTPUT
+ = "fs.s3a.capability.magic.output.stream";
+
+ /**
+ * Flag to indicate that a store supports magic committers.
+ * returned in {@code PathCapabilities}
+ * Value: {@value}.
+ */
+ public static final String STORE_CAPABILITY_MAGIC_COMMITTER
+ = "fs.s3a.capability.magic.committer";
+
+ /**
+ * Flag to indicate whether a stream is a magic output stream;
+ * returned in {@code StreamCapabilities}
+ * Value: {@value}.
+ */
+ @Deprecated
+ public static final String STREAM_CAPABILITY_MAGIC_OUTPUT_OLD
= "s3a:magic.output.stream";
/**
* Flag to indicate that a store supports magic committers.
- * returned in {@code StreamCapabilities}
+ * returned in {@code PathCapabilities}
* Value: {@value}.
*/
- public static final String STORE_CAPABILITY_MAGIC_COMMITTER
+ @Deprecated
+ public static final String STORE_CAPABILITY_MAGIC_COMMITTER_OLD
= "s3a:magic.committer";
/**
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index 9cb1efe380f..bd834e0f2cb 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -1227,7 +1227,8 @@ public abstract class S3GuardTool extends Configured implements Tool {
} else {
println(out, "Filesystem %s is not using S3Guard", fsUri);
}
- boolean magic = fs.hasCapability(
+ boolean magic = fs.hasPathCapability(
+ new Path(s3Path),
CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER);
println(out, "The \"magic\" committer %s supported",
magic ? "is" : "is not");
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
index d74411d2f92..0e2bf914f83 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
@@ -50,7 +50,7 @@ public final class SelectConstants {
* Does the FS Support S3 Select?
* Value: {@value}.
*/
- public static final String S3_SELECT_CAPABILITY = "s3a:fs.s3a.select.sql";
+ public static final String S3_SELECT_CAPABILITY = "fs.s3a.capability.select.sql";
/**
* Flag: is S3 select enabled?
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
index 61409f8ea12..4b362c667ec 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
@@ -234,7 +234,7 @@ public class SelectTool extends S3GuardTool {
}
setFilesystem((S3AFileSystem) fs);
- if (!getFilesystem().hasCapability(S3_SELECT_CAPABILITY)) {
+ if (!getFilesystem().hasPathCapability(path, S3_SELECT_CAPABILITY)) {
// capability disabled
throw new ExitUtil.ExitException(EXIT_SERVICE_UNAVAILABLE,
SELECT_IS_DISABLED + " for " + file);
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
index fc8d872463c..8f7f1beb444 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
@@ -28,12 +28,15 @@ import com.amazonaws.services.s3.model.PutObjectRequest;
import org.junit.Assume;
import org.junit.Test;
+import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.store.EtagChecksum;
import org.apache.hadoop.test.LambdaTestUtils;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksPathCapabilities;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
@@ -142,6 +145,8 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
Path file1 = touchFile("file1");
EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
LOG.info("Checksum for {}: {}", file1, checksum1);
+ assertHasPathCapabilities(fs, file1,
+ CommonPathCapabilities.FS_CHECKSUMS);
assertNotNull("Null file 1 checksum", checksum1);
assertNotEquals("file 1 checksum", 0, checksum1.getLength());
assertEquals("checksums", checksum1,
@@ -159,6 +164,8 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
final S3AFileSystem fs = getFileSystem();
Path file1 = touchFile("file1");
EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
+ assertLacksPathCapabilities(fs, file1,
+ CommonPathCapabilities.FS_CHECKSUMS);
assertNull("Checksums are being generated", checksum1);
}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index b9743858b21..1889c054310 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -1236,9 +1236,12 @@ public final class S3ATestUtils {
* Skip a test if the FS isn't marked as supporting magic commits.
* @param fs filesystem
*/
- public static void assumeMagicCommitEnabled(S3AFileSystem fs) {
+ public static void assumeMagicCommitEnabled(S3AFileSystem fs)
+ throws IOException {
assume("Magic commit option disabled on " + fs,
- fs.hasCapability(CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER));
+ fs.hasPathCapability(
+ fs.getWorkingDirectory(),
+ CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER));
}
/**
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
index b0e2b8e4bca..455a8a3ebd1 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
@@ -550,10 +550,7 @@ public class ITestCommitOperations extends AbstractCommitITest {
@Test
public void testWriteNormalStream() throws Throwable {
S3AFileSystem fs = getFileSystem();
- Assume.assumeTrue(
- "Filesystem does not have magic support enabled: " + fs,
- fs.hasCapability(STORE_CAPABILITY_MAGIC_COMMITTER));
-
+ assumeMagicCommitEnabled(fs);
Path destFile = path("normal");
try (FSDataOutputStream out = fs.create(destFile, true)) {
out.writeChars("data");
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 2d17ca54249..97fcdc5e20c 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -517,7 +517,7 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
String name = fs.getUri().toString();
S3GuardTool.BucketInfo cmd = new S3GuardTool.BucketInfo(
getConfiguration());
- if (fs.hasCapability(
+ if (fs.hasPathCapability(fs.getWorkingDirectory(),
CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER)) {
// if the FS is magic, expect this to work
exec(cmd, S3GuardTool.BucketInfo.MAGIC_FLAG, name);
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
index 1f2faa209a9..d6058d19521 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
@@ -102,9 +102,9 @@ public class ITestS3Select extends AbstractS3SelectTest {
@Override
public void setup() throws Exception {
super.setup();
- Assume.assumeTrue("S3 Select is not enabled",
- getFileSystem().hasCapability(S3_SELECT_CAPABILITY));
csvPath = path(getMethodName() + ".csv");
+ Assume.assumeTrue("S3 Select is not enabled",
+ getFileSystem().hasPathCapability(csvPath, S3_SELECT_CAPABILITY));
selectConf = new Configuration(false);
selectConf.setBoolean(SELECT_ERRORS_INCLUDE_SQL, true);
createStandardCsvFile(getFileSystem(), csvPath, ALL_QUOTES);
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3955721a765..278b815782a 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -46,6 +46,7 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.CreateFlag;
@@ -70,6 +71,7 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.VersionInfo;
import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
/**
* A FileSystem to access Azure Data Lake Store.
@@ -1033,4 +1035,20 @@ public class AdlFileSystem extends FileSystem {
}
return dest;
}
+
+ @Override
+ public boolean hasPathCapability(final Path path, final String capability)
+ throws IOException {
+
+ switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+
+ case CommonPathCapabilities.FS_ACLS:
+ case CommonPathCapabilities.FS_APPEND:
+ case CommonPathCapabilities.FS_CONCAT:
+ case CommonPathCapabilities.FS_PERMISSIONS:
+ return true;
+ default:
+ return super.hasPathCapability(path, capability);
+ }
+ }
}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index f8962d9b170..a990b60b1c9 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BufferedFSInputStream;
+import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -84,6 +85,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.azure.NativeAzureFileSystemHelper.*;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
@@ -3866,4 +3868,19 @@ public class NativeAzureFileSystem extends FileSystem {
void updateDaemonUsers(List