HDFS-15507. [JDK 11] Fix javadoc errors in hadoop-hdfs-client module. Contributed by Xieming Li.

This commit is contained in:
Akira Ajisaka 2020-08-11 13:49:56 +09:00
parent 7938ebfb9d
commit 32895f4f7e
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
4 changed files with 4 additions and 4 deletions

View File

@ -31,7 +31,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<properties> <properties>
<hadoop.component>hdfs</hadoop.component> <hadoop.component>hdfs</hadoop.component>
<javadoc.skip.jdk11>true</javadoc.skip.jdk11>
</properties> </properties>
<dependencies> <dependencies>

View File

@ -29,7 +29,7 @@ import java.util.concurrent.atomic.LongAccumulator;
/** /**
* Global State Id context for the client. * Global State Id context for the client.
* <p/> * <p>
* This is the client side implementation responsible for receiving * This is the client side implementation responsible for receiving
* state alignment info from server(s). * state alignment info from server(s).
*/ */

View File

@ -1241,7 +1241,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/** /**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long, * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* addition of Progressable, int, ChecksumOpt, InetSocketAddress[], String)} * Progressable, int, ChecksumOpt, InetSocketAddress[], String)}
* with the storagePolicy that is used to specify a specific storage policy * with the storagePolicy that is used to specify a specific storage policy
* instead of inheriting any policy from this new file's parent directory. * instead of inheriting any policy from this new file's parent directory.
* This policy will be persisted in HDFS. A value of null means inheriting * This policy will be persisted in HDFS. A value of null means inheriting

View File

@ -22,6 +22,7 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -158,7 +159,7 @@ public final class HdfsConstants {
* period, no other client can write to the file. The writing client can * period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is * periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by this soft limit and a * revoked. The lease duration is bound by this soft limit and a
* {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the * {@link HdfsClientConfigKeys#DFS_LEASE_HARDLIMIT_KEY }. Until the
* soft limit expires, the writer has sole write access to the file. If the * soft limit expires, the writer has sole write access to the file. If the
* soft limit expires and the client fails to close the file or renew the * soft limit expires and the client fails to close the file or renew the
* lease, another client can preempt the lease. * lease, another client can preempt the lease.