From 6a9ceedfb3ee7c2f66a44083fb8e68cca508e207 Mon Sep 17 00:00:00 2001
From: Akira Ajisaka
* ListingBatch behaves similar to a Future, in that getting the result via
* {@link #get()} will throw an Exception if there was a failure.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
index f13d701803d..84ca94e6428 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
@@ -166,11 +166,11 @@ public final class FutureIOSupport {
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
- * This class is extended from the ViewFileSystem for the overloaded
+ * scheme file system. Mount link configurations and in-memory mount table
+ * building behaviors are inherited from ViewFileSystem. Unlike
+ * ViewFileSystem scheme (viewfs://), the users would be able to use
+ * any scheme. To use this class, the following configurations need to be added in
+ * core-site.xml file. Here {@literal Use Case 1:
+ * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user
* Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
+ * Use Case 2:
+ * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user
* Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
+ * Note:
+ * This will check that every method on the combined interface is
* implemented by at least one of the supplied proxy objects.
*
* @param combinedProxyInterface The interface of the combined proxy.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
index 4304b24299f..1ecd19b74c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
@@ -29,8 +29,8 @@ import static org.apache.hadoop.ipc.ProcessingDetails.Timing;
* {@link ProcessingDetails}). This can be used by specifying the
* {@link org.apache.hadoop.fs.CommonConfigurationKeys#IPC_COST_PROVIDER_KEY}
* configuration key.
- *
- *
+ * This allows for configuration of how heavily each of the operations
* within {@link ProcessingDetails} is weighted. By default,
* {@link ProcessingDetails.Timing#LOCKFREE},
* {@link ProcessingDetails.Timing#RESPONSE}, and
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
index a0b0380c189..fdb45dd85d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
-import java.io.IOException;
import java.net.URI;
/**
@@ -49,7 +48,7 @@ public final class DomainNameResolverFactory {
* @return Domain name resolver.
*/
public static DomainNameResolver newInstance(
- Configuration conf, URI uri, String configKey) throws IOException {
+ Configuration conf, URI uri, String configKey) {
String host = uri.getHost();
String confKeyWithHost = configKey + "." + host;
return newInstance(conf, confKeyWithHost);
@@ -61,7 +60,6 @@ public final class DomainNameResolverFactory {
* @param conf Configuration
* @param configKey config key name.
* @return Domain name resolver.
- * @throws IOException when the class cannot be found or initiated.
*/
public static DomainNameResolver newInstance(
Configuration conf, String configKey) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 406d0d0e150..47dca6cfe94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -201,10 +201,10 @@ public class Groups {
/**
* Get the group memberships of a given user.
* If the user's group is not cached, this method may block.
- * Note this method can be expensive as it involves Set->List conversion.
- * For user with large group membership (i.e., > 1000 groups), we recommend
- * using getGroupSet to avoid the conversion and fast membership look up via
- * contains().
+ * Note this method can be expensive as it involves Set {@literal ->} List
+ * conversion. For user with large group membership
+ * (i.e., {@literal >} 1000 groups), we recommend using getGroupSet
+ * to avoid the conversion and fast membership look up via contains().
* @param user User's name
* @return the group memberships of the user as list
* @throws IOException if user does not exist
@@ -220,7 +220,9 @@ public class Groups {
* Get the group memberships of a given user.
* If the user's group is not cached, this method may block.
* This provide better performance when user has large group membership via
- * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck
+ *
* The factory has several different modes of operation:
- *
+ *
* @param builder builder to modify
* @param conf configuration to read
* @param optionalPrefix prefix for optional settings
@@ -196,11 +196,11 @@ public final class FutureIOSupport {
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
- * {@code
* fs.example.s3a.option => s3a:option
* fs.example.fs.io.policy => s3a.io.policy
* fs.example.something => something
- *
+ * }
+ *
* @param builder builder to modify
* @param conf configuration to read
* @param prefix prefix to scan/strip
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index bf9f7db7223..5c27692eb53 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -89,8 +89,8 @@ public interface Constants {
/**
* Config variable for specifying a regex link which uses regular expressions
* as source and target could use group captured in src.
- * E.g. (^/(?{@code
* fs.example.s3a.option => s3a:option
* fs.example.fs.io.policy => s3a.io.policy
* fs.example.something => something
- *
+ * }
+ * 1) fs.{@literal
+ * 2) fs.viewfs.overload.scheme.target.{@literal
+ * ===========
* If users want some of their existing cluster (hdfs://Cluster)
* data to mount with other hdfs and object store clusters(hdfs://NN1,
- * o3fs://bucket1.volume1/, s3a://bucket1/)
+ * o3fs://bucket1.volume1/, s3a://bucket1/)
+ * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data
* fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/
+ *
* Op2: Create file hdfs://Cluster/data/datafile will go to
- * o3fs://bucket1.volume1/data/datafile
+ * o3fs://bucket1.volume1/data/datafile
* Op3: Create file hdfs://Cluster/backup/data.zip will go to
* s3a://bucket1/backup/data.zip
+ *
+ * ===========
* If users want some of their existing cluster (s3a://bucketA/)
* data to mount with other hdfs and object store clusters
- * (hdfs://NN1, o3fs://bucket1.volume1/)
+ * (hdfs://NN1, o3fs://bucket1.volume1/)
+ * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data
* fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/
+ *
* Op2: Create file s3a://bucketA/data/datafile will go to
- * o3fs://bucket1.volume1/data/datafile
+ * o3fs://bucket1.volume1/data/datafile
* Op3: Create file s3a://bucketA/salesDB/dbfile will go to
* s3a://bucketA/salesDB/dbfile
+ *
* (1) In ViewFileSystemOverloadScheme, by default the mount links will be
* represented as non-symlinks. If you want to change this behavior, please see
- * {@link ViewFileSystem#listStatus(Path)}
+ * {@link ViewFileSystem#listStatus(Path)}
* (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will
* be considered as the mount table name. When the passed uri has hostname:port,
* it will simply ignore the port number and only hostname will be considered as
- * the mount table name.
+ * the mount table name.
* (3) If there are no mount links configured with the initializing uri's
* hostname as the mount table name, then it will automatically consider the
- * current uri as fallback( ex: fs.viewfs.mounttable.
+ * 1) avoid {@literal set->list->set} conversion for the caller
+ * UGI/PermissionCheck
* 2) fast lookup using contains() via Set instead of List
* @param user User's name
* @return the group memberships of the user as set
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
index 9d7afa933b6..5644234a57a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
@@ -43,19 +43,20 @@ import org.slf4j.LoggerFactory;
*
*
- *
*