HADOOP-15785. [JDK10] Javadoc build fails on JDK 10 in hadoop-common. Contributed by Dinesh Chitlangia.

This commit is contained in:
Takanobu Asanuma 2018-10-11 13:51:51 +09:00
parent f261c31937
commit 7b57f2f71f
146 changed files with 518 additions and 483 deletions

View File

@ -26,9 +26,15 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Stable
public interface Configurable {
/** Set the configuration to be used by this object. */
/**
* Set the configuration to be used by this object.
* @param conf configuration to be used
*/
void setConf(Configuration conf);
/** Return the configuration used by this object. */
/**
* Return the configuration used by this object.
* @return Configuration
*/
Configuration getConf();
}

View File

@ -115,7 +115,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
/**
* Provides access to configuration parameters.
*
* <h4 id="Resources">Resources</h4>
* <h3 id="Resources">Resources</h3>
*
* <p>Configurations are specified by resources. A resource contains a set of
* name/value pairs as XML data. Each resource is named by either a
@ -141,12 +141,12 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
* Once a resource declares a value final, no subsequently-loaded
* resource can alter that value.
* For example, one might define a final parameter with:
* <tt><pre>
* <pre><code>
* &lt;property&gt;
* &lt;name&gt;dfs.hosts.include&lt;/name&gt;
* &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt;
* <b>&lt;final&gt;true&lt;/final&gt;</b>
* &lt;/property&gt;</pre></tt>
* &lt;/property&gt;</code></pre>
*
* Administrators typically define parameters as final in
* <tt>core-site.xml</tt> for values that user applications may not alter.
@ -164,7 +164,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
*
* <p>For example, if a configuration resource contains the following property
* definitions:
* <tt><pre>
* <pre><code>
* &lt;property&gt;
* &lt;name&gt;basedir&lt;/name&gt;
* &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
@ -179,7 +179,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
* &lt;name&gt;otherdir&lt;/name&gt;
* &lt;value&gt;${<i>env.BASE_DIR</i>}/other&lt;/value&gt;
* &lt;/property&gt;
* </pre></tt>
* </code></pre>
*
* <p>When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
* will be resolved to another property in this Configuration, while
@ -203,7 +203,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
* can define there own custom tags in hadoop.tags.custom property.
*
* <p>For example, we can tag existing property as:
* <tt><pre>
* <pre><code>
* &lt;property&gt;
* &lt;name&gt;dfs.replication&lt;/name&gt;
* &lt;value&gt;3&lt;/value&gt;
@ -215,7 +215,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
* &lt;value&gt;3&lt;/value&gt;
* &lt;tag&gt;HDFS,SECURITY&lt;/tag&gt;
* &lt;/property&gt;
* </pre></tt>
* </code></pre>
* <p> Properties marked with tags can be retrieved with <tt>conf
* .getAllPropertiesByTag("HDFS")</tt> or <tt>conf.getAllPropertiesByTags
* (Arrays.asList("YARN","SECURITY"))</tt>.</p>
@ -581,9 +581,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* If you have multiple deprecation entries to add, it is more efficient to
* use #addDeprecations(DeprecationDelta[] deltas) instead.
*
* @param key
* @param newKeys
* @param customMessage
* @param key to be deprecated
* @param newKeys list of keys that take up the values of deprecated key
* @param customMessage depcrication message
* @deprecated use {@link #addDeprecation(String key, String newKey,
String customMessage)} instead
*/
@ -605,9 +605,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* If you have multiple deprecation entries to add, it is more efficient to
* use #addDeprecations(DeprecationDelta[] deltas) instead.
*
* @param key
* @param newKey
* @param customMessage
* @param key to be deprecated
* @param newKey key that take up the values of deprecated key
* @param customMessage deprecation message
*/
public static void addDeprecation(String key, String newKey,
String customMessage) {
@ -1428,6 +1428,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
/**
* Unset a previously set property.
* @param name the property name
*/
public synchronized void unset(String name) {
String[] names = null;
@ -1717,6 +1718,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* is equivalent to <code>set(&lt;name&gt;, value.toString())</code>.
* @param name property name
* @param value new value
* @param <T> enumeration type
*/
public <T extends Enum<T>> void setEnum(String name, T value) {
set(name, value.toString());
@ -1727,8 +1729,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* Note that the returned value is trimmed by this method.
* @param name Property name
* @param defaultValue Value returned if no mapping exists
* @param <T> enumeration type
* @throws IllegalArgumentException If mapping is illegal for the type
* provided
* @return enumeration type
*/
public <T extends Enum<T>> T getEnum(String name, T defaultValue) {
final String val = getTrimmed(name);
@ -1807,6 +1811,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* @param unit Unit to convert the stored property, if it exists.
* @throws NumberFormatException If the property stripped of its unit is not
* a number
* @return time duration in given time unit
*/
public long getTimeDuration(String name, long defaultValue, TimeUnit unit) {
String vStr = get(name);
@ -2299,6 +2304,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* the CredentialProvider API and conditionally fallsback to config.
* @param name property name
* @return password
* @throws IOException when error in fetching password
*/
public char[] getPassword(String name) throws IOException {
char[] pass = null;
@ -2358,7 +2364,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* alias.
* @param name alias of the provisioned credential
* @return password or null if not found
* @throws IOException
* @throws IOException when error in fetching password
*/
public char[] getPasswordFromCredentialProviders(String name)
throws IOException {
@ -3425,25 +3431,23 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
/**
* Write out the non-default properties in this configuration to the
* given {@link Writer}.
*
* <ul>
* <li>
* When property name is not empty and the property exists in the
* configuration, this method writes the property and its attributes
* to the {@link Writer}.
* </li>
* <p>
*
* <li>
* When property name is null or empty, this method writes all the
* configuration properties and their attributes to the {@link Writer}.
* </li>
* <p>
*
* <li>
* When property name is not empty but the property doesn't exist in
* the configuration, this method throws an {@link IllegalArgumentException}.
* </li>
* <p>
* </ul>
* @param out the writer to write to.
*/
public void writeXml(String propertyName, Writer out)
@ -3553,7 +3557,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
/**
* Writes properties and their attributes (final and resource)
* to the given {@link Writer}.
*
* <ul>
* <li>
* When propertyName is not empty, and the property exists
* in the configuration, the format of the output would be,
@ -3593,6 +3597,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* found in the configuration, this method will throw an
* {@link IllegalArgumentException}.
* </li>
* </ul>
* <p>
* @param config the configuration
* @param propertyName property name
@ -3791,7 +3796,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
/**
* get keys matching the the regex
* @param regex
* @return Map<String,String> with matching keys
* @return {@literal Map<String,String>} with matching keys
*/
public Map<String,String> getValByRegex(String regex) {
Pattern p = Pattern.compile(regex);

View File

@ -41,7 +41,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#get(String).
* See {@link Configuration#get(String)}.
*/
@Override
public String get(String name) {
@ -51,7 +51,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#get(String, String).
* See {@link Configuration#get(String, String)}.
*/
@Override
public String get(String name, String defaultValue) {
@ -62,7 +62,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#getBoolean(String, boolean).
* See {@link Configuration#getBoolean(String, boolean)}.
*/
@Override
public boolean getBoolean(String name, boolean defaultValue) {
@ -72,7 +72,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#getFloat(String, float).
* See {@link Configuration#getFloat(String, float)}.
*/
@Override
public float getFloat(String name, float defaultValue) {
@ -82,7 +82,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#getInt(String, int).
* See {@link Configuration#getInt(String, int)}.
*/
@Override
public int getInt(String name, int defaultValue) {
@ -92,7 +92,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#getLong(String, long).
* See {@link Configuration#getLong(String, long)}.
*/
@Override
public long getLong(String name, long defaultValue) {
@ -102,7 +102,7 @@ public class ConfigurationWithLogging extends Configuration {
}
/**
* @see Configuration#set(String, String, String).
* See {@link Configuration#set(String, String, String)}.
*/
@Override
public void set(String name, String value, String source) {

View File

@ -158,14 +158,15 @@ public abstract class CryptoCodec implements Configurable, Closeable {
* For example a {@link javax.crypto.Cipher} will maintain its encryption
* context internally when we do encryption/decryption using the
* Cipher#update interface.
* <p/>
* <p>
* Encryption/Decryption is not always on the entire file. For example,
* in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
* In these situations, the counter is derived from the file position.
* <p/>
* <p>
* The IV can be calculated by combining the initial IV and the counter with
* a lossless operation (concatenation, addition, or XOR).
* @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
* See http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_
* .28CTR.29
*
* @param initIV initial IV
* @param counter counter for input stream position

View File

@ -53,10 +53,10 @@ import org.apache.hadoop.util.StringUtils;
* required in order to ensure that the plain text and cipher text have a 1:1
* mapping. The decryption is buffer based. The key points of the decryption
* are (1) calculating the counter and (2) padding through stream position:
* <p/>
* <p>
* counter = base + pos/(algorithm blocksize);
* padding = pos%(algorithm blocksize);
* <p/>
* <p>
* The underlying stream offset is maintained as state.
*/
@InterfaceAudience.Private

View File

@ -36,10 +36,10 @@ import com.google.common.base.Preconditions;
* required in order to ensure that the plain text and cipher text have a 1:1
* mapping. The encryption is buffer based. The key points of the encryption are
* (1) calculating counter and (2) padding through stream position.
* <p/>
* <p>
* counter = base + pos/(algorithm blocksize);
* padding = pos%(algorithm blocksize);
* <p/>
* <p>
* The underlying stream offset is maintained as state.
*
* Note that while some of this class' methods are synchronized, this is just to

View File

@ -38,7 +38,7 @@ public interface Decryptor {
/**
* Indicate whether the decryption context is reset.
* <p/>
* <p>
* Certain modes, like CTR, require a different IV depending on the
* position in the stream. Generally, the decryptor maintains any necessary
* context for calculating the IV and counter so that no reinit is necessary
@ -49,22 +49,22 @@ public interface Decryptor {
/**
* This presents a direct interface decrypting with direct ByteBuffers.
* <p/>
* <p>
* This function does not always decrypt the entire buffer and may potentially
* need to be called multiple times to process an entire buffer. The object
* may hold the decryption context internally.
* <p/>
* <p>
* Some implementations may require sufficient space in the destination
* buffer to decrypt the entire input buffer.
* <p/>
* <p>
* Upon return, inBuffer.position() will be advanced by the number of bytes
* read and outBuffer.position() by bytes written. Implementations should
* not modify inBuffer.limit() and outBuffer.limit().
* <p/>
* <p>
* @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may
* not be null and inBuffer.remaining() must be > 0
* not be null and inBuffer.remaining() must be {@literal >} 0
* @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may
* not be null and outBuffer.remaining() must be > 0
* not be null and outBuffer.remaining() must be {@literal >} 0
* @throws IOException if decryption fails
*/
public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)

View File

@ -37,7 +37,7 @@ public interface Encryptor {
/**
* Indicate whether the encryption context is reset.
* <p/>
* <p>
* Certain modes, like CTR, require a different IV depending on the
* position in the stream. Generally, the encryptor maintains any necessary
* context for calculating the IV and counter so that no reinit is necessary
@ -48,22 +48,22 @@ public interface Encryptor {
/**
* This presents a direct interface encrypting with direct ByteBuffers.
* <p/>
* <p>
* This function does not always encrypt the entire buffer and may potentially
* need to be called multiple times to process an entire buffer. The object
* may hold the encryption context internally.
* <p/>
* <p>
* Some implementations may require sufficient space in the destination
* buffer to encrypt the entire input buffer.
* <p/>
* <p>
* Upon return, inBuffer.position() will be advanced by the number of bytes
* read and outBuffer.position() by bytes written. Implementations should
* not modify inBuffer.limit() and outBuffer.limit().
* <p/>
* <p>
* @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may
* not be null and inBuffer.remaining() must be > 0
* not be null and inBuffer.remaining() must be &gt; 0
* @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may
* not be null and outBuffer.remaining() must be > 0
* not be null and outBuffer.remaining() must be &gt; 0
* @throws IOException if encryption fails
*/
public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)

View File

@ -107,12 +107,12 @@ public final class OpensslCipher {
}
/**
* Return an <code>OpensslCipher<code> object that implements the specified
* Return an <code>OpensslCipher</code> object that implements the specified
* transformation.
*
* @param transformation the name of the transformation, e.g.,
* AES/CTR/NoPadding.
* @return OpensslCipher an <code>OpensslCipher<code> object
* @return OpensslCipher an <code>OpensslCipher</code> object
* @throws NoSuchAlgorithmException if <code>transformation</code> is null,
* empty, in an invalid format, or if Openssl doesn't implement the
* specified algorithm.
@ -181,18 +181,18 @@ public final class OpensslCipher {
/**
* Continues a multiple-part encryption or decryption operation. The data
* is encrypted or decrypted, depending on how this cipher was initialized.
* <p/>
* <p>
*
* All <code>input.remaining()</code> bytes starting at
* <code>input.position()</code> are processed. The result is stored in
* the output buffer.
* <p/>
* <p>
*
* Upon return, the input buffer's position will be equal to its limit;
* its limit will not have changed. The output buffer's position will have
* advanced by n, when n is the value returned by this method; the output
* buffer's limit will not have changed.
* <p/>
* <p>
*
* If <code>output.remaining()</code> bytes are insufficient to hold the
* result, a <code>ShortBufferException</code> is thrown.
@ -218,21 +218,21 @@ public final class OpensslCipher {
/**
* Finishes a multiple-part operation. The data is encrypted or decrypted,
* depending on how this cipher was initialized.
* <p/>
* <p>
*
* The result is stored in the output buffer. Upon return, the output buffer's
* position will have advanced by n, where n is the value returned by this
* method; the output buffer's limit will not have changed.
* <p/>
* <p>
*
* If <code>output.remaining()</code> bytes are insufficient to hold the result,
* a <code>ShortBufferException</code> is thrown.
* <p/>
* <p>
*
* Upon finishing, this method resets this cipher object to the state it was
* in when previously initialized. That is, the object is available to encrypt
* or decrypt more data.
* <p/>
* <p>
*
* If any exception is thrown, this cipher object need to be reset before it
* can be used again.

View File

@ -62,23 +62,24 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* KeyProvider based on Java's KeyStore file format. The file may be stored in
* any Hadoop FileSystem using the following name mangling:
* jks://hdfs@nn1.example.com/my/keys.jks -> hdfs://nn1.example.com/my/keys.jks
* jks://file/home/owen/keys.jks -> file:///home/owen/keys.jks
* <p/>
* jks://hdfs@nn1.example.com/my/keys.jks {@literal ->}
* hdfs://nn1.example.com/my/keys.jks
* jks://file/home/owen/keys.jks {@literal ->} file:///home/owen/keys.jks
* <p>
* If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is set,
* its value is used as the password for the keystore.
* <p/>
* <p>
* If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is not set,
* the password for the keystore is read from file specified in the
* {@link #KEYSTORE_PASSWORD_FILE_KEY} configuration property. The password file
* is looked up in Hadoop's configuration directory via the classpath.
* <p/>
* <p>
* <b>NOTE:</b> Make sure the password in the password file does not have an
* ENTER at the end, else it won't be valid for the Java KeyStore.
* <p/>
* <p>
* If the environment variable, nor the property are not set, the password used
* is 'none'.
* <p/>
* <p>
* It is expected for encrypted InputFormats and OutputFormats to copy the keys
* from the original provider into the job's Credentials object, which is
* accessed via the UserProvider. Therefore, this provider won't be used by

View File

@ -49,7 +49,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
* abstraction to separate key storage from users of encryption. It
* is intended to support getting or storing keys in a variety of ways,
* including third party bindings.
* <P/>
* <p>
* <code>KeyProvider</code> implementations must be thread safe.
*/
@InterfaceAudience.Public
@ -549,7 +549,7 @@ public abstract class KeyProvider {
/**
* Create a new key generating the material for it.
* The given key must not already exist.
* <p/>
* <p>
* This implementation generates the key material and calls the
* {@link #createKey(String, byte[], Options)} method.
*
@ -593,7 +593,7 @@ public abstract class KeyProvider {
/**
* Roll a new version of the given key generating the material for it.
* <p/>
* <p>
* This implementation generates the key material and calls the
* {@link #rollNewVersion(String, byte[])} method.
*

View File

@ -149,7 +149,7 @@ public class KeyProviderCryptoExtension extends
* Derive the initialization vector (IV) for the encryption key from the IV
* of the encrypted key. This derived IV is used with the encryption key to
* decrypt the encrypted key.
* <p/>
* <p>
* The alternative to this is using the same IV for both the encryption key
* and the encrypted key. Even a simple symmetric transformation like this
* improves security by avoiding IV re-use. IVs will also be fairly unique
@ -195,7 +195,7 @@ public class KeyProviderCryptoExtension extends
* The generated key material is of the same
* length as the <code>KeyVersion</code> material of the latest key version
* of the key and is encrypted using the same cipher.
* <p/>
* <p>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKeyName
@ -498,7 +498,7 @@ public class KeyProviderCryptoExtension extends
* and initialization vector. The generated key material is of the same
* length as the <code>KeyVersion</code> material and is encrypted using the
* same cipher.
* <p/>
* <p>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKeyName The latest KeyVersion of this key's material will
@ -576,7 +576,6 @@ public class KeyProviderCryptoExtension extends
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param ekvs List containing the EncryptedKeyVersion's
* @return The re-encrypted EncryptedKeyVersion's, in the same order.
* @throws IOException If any EncryptedKeyVersion could not be re-encrypted
* @throws GeneralSecurityException If any EncryptedKeyVersion could not be
* re-encrypted because of a cryptographic issue.
@ -589,7 +588,7 @@ public class KeyProviderCryptoExtension extends
/**
* Creates a <code>KeyProviderCryptoExtension</code> using a given
* {@link KeyProvider}.
* <p/>
* <p>
* If the given <code>KeyProvider</code> implements the
* {@link CryptoExtension} interface the <code>KeyProvider</code> itself
* will provide the extension functionality.

View File

@ -113,7 +113,7 @@ public class KeyProviderDelegationTokenExtension extends
/**
* Creates a <code>KeyProviderDelegationTokenExtension</code> using a given
* {@link KeyProvider}.
* <p/>
* <p>
* If the given <code>KeyProvider</code> implements the
* {@link DelegationTokenExtension} interface the <code>KeyProvider</code>
* itself will provide the extension functionality, otherwise a default

View File

@ -246,12 +246,12 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
/**
* This provider expects URIs in the following form :
* kms://<PROTO>@<AUTHORITY>/<PATH>
* {@literal kms://<PROTO>@<AUTHORITY>/<PATH>}
*
* where :
* - PROTO = http or https
* - AUTHORITY = <HOSTS>[:<PORT>]
* - HOSTS = <HOSTNAME>[;<HOSTS>]
* - AUTHORITY = {@literal <HOSTS>[:<PORT>]}
* - HOSTS = {@literal <HOSTNAME>[;<HOSTS>]}
* - HOSTNAME = string
* - PORT = integer
*

View File

@ -344,7 +344,7 @@ public class ValueQueue <E> {
* <code>SyncGenerationPolicy</code> specified by the user.
* @param keyName String key name
* @param num Minimum number of values to return.
* @return List<E> values returned
* @return {@literal List<E>} values returned
* @throws IOException
* @throws ExecutionException
*/

View File

@ -30,16 +30,16 @@ import org.slf4j.LoggerFactory;
/**
* OpenSSL secure random using JNI.
* This implementation is thread-safe.
* <p/>
* <p>
*
* If using an Intel chipset with RDRAND, the high-performance hardware
* random number generator will be used and it's much faster than
* {@link java.security.SecureRandom}. If RDRAND is unavailable, default
* OpenSSL secure random generator will be used. It's still faster
* and can generate strong random bytes.
* <p/>
* @see https://wiki.openssl.org/index.php/Random_Numbers
* @see http://en.wikipedia.org/wiki/RdRand
* <p>
* See https://wiki.openssl.org/index.php/Random_Numbers
* See http://en.wikipedia.org/wiki/RdRand
*/
@InterfaceAudience.Private
public class OpensslSecureRandom extends Random {
@ -97,7 +97,7 @@ public class OpensslSecureRandom extends Random {
* random bits (right justified, with leading zeros).
*
* @param numBits number of random bits to be generated, where
* 0 <= <code>numBits</code> <= 32.
* 0 {@literal <=} <code>numBits</code> {@literal <=} 32.
*
* @return int an <code>int</code> containing the user-specified number
* of random bits (right justified, with leading zeros).

View File

@ -336,7 +336,7 @@ public abstract class AbstractFileSystem {
* The default port of this file system.
*
* @return default port of this file system's Uri scheme
* A uri with a port of -1 => default port;
* A uri with a port of -1 =&gt; default port;
*/
public abstract int getUriDefaultPort();
@ -478,9 +478,11 @@ public abstract class AbstractFileSystem {
* through any internal symlinks or mount point
* @param p path to be resolved
* @return fully qualified path
* @throws FileNotFoundException, AccessControlException, IOException
* UnresolvedLinkException if symbolic link on path cannot be resolved
* internally
* @throws FileNotFoundException
* @throws AccessControlException
* @throws IOException
* @throws UnresolvedLinkException if symbolic link on path cannot be
* resolved internally
*/
public Path resolvePath(final Path p) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
@ -1021,7 +1023,7 @@ public abstract class AbstractFileSystem {
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @param aclSpec List{@literal <AclEntry>} describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
@ -1035,7 +1037,7 @@ public abstract class AbstractFileSystem {
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @param aclSpec List{@literal <AclEntry>} describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
@ -1075,8 +1077,9 @@ public abstract class AbstractFileSystem {
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @param aclSpec List{@literal <AclEntry>} describing modifications, must
* include entries for user, group, and others for compatibility with
* permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
@ -1088,7 +1091,7 @@ public abstract class AbstractFileSystem {
* Gets the ACLs of files and directories.
*
* @param path Path to get
* @return RemoteIterator<AclStatus> which returns each AclStatus
* @return RemoteIterator{@literal <AclStatus>} which returns each AclStatus
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
@ -1100,7 +1103,7 @@ public abstract class AbstractFileSystem {
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
@ -1118,7 +1121,7 @@ public abstract class AbstractFileSystem {
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
@ -1137,7 +1140,7 @@ public abstract class AbstractFileSystem {
* Get an xattr for a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attribute
@ -1154,11 +1157,13 @@ public abstract class AbstractFileSystem {
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
*
* @return {@literal Map<String, byte[]>} describing the XAttrs of the file
* or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
@ -1170,12 +1175,13 @@ public abstract class AbstractFileSystem {
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @param names XAttr names.
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @return {@literal Map<String, byte[]>} describing the XAttrs of the file
* or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
@ -1188,11 +1194,12 @@ public abstract class AbstractFileSystem {
* Get all of the xattr names for a file or directory.
* Only the xattr names for which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @return {@literal Map<String, byte[]>} describing the XAttrs of the file
* or directory
* @throws IOException
*/
public List<String> listXAttrs(Path path)
@ -1205,7 +1212,7 @@ public abstract class AbstractFileSystem {
* Remove an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to remove extended attribute

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* A class that optimizes reading from FSInputStream by buffering
* A class that optimizes reading from FSInputStream by buffering.
*/
@InterfaceAudience.Private
@ -44,7 +44,7 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
*
* @param in the underlying input stream.
* @param size the buffer size.
* @exception IllegalArgumentException if size <= 0.
* @exception IllegalArgumentException if size {@literal <=} 0.
*/
public BufferedFSInputStream(FSInputStream in, int size) {
super(in, size);

View File

@ -32,18 +32,18 @@ public interface ByteBufferReadable {
/**
* Reads up to buf.remaining() bytes into buf. Callers should use
* buf.limit(..) to control the size of the desired read.
* <p/>
* <p>
* After a successful call, buf.position() will be advanced by the number
* of bytes read and buf.limit() should be unchanged.
* <p/>
* <p>
* In the case of an exception, the values of buf.position() and buf.limit()
* are undefined, and callers should be prepared to recover from this
* eventuality.
* <p/>
* <p>
* Many implementations will throw {@link UnsupportedOperationException}, so
* callers that are not confident in support for this method from the
* underlying filesystem should be prepared to handle that exception.
* <p/>
* <p>
* Implementations should treat 0-length requests as legitimate, and must not
* signal an error upon their receipt.
*

View File

@ -39,7 +39,7 @@ import org.apache.hadoop.util.Progressable;
* Abstract Checksumed FileSystem.
* It provide a basic implementation of a Checksumed FileSystem,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
* It generates &amp; verifies checksums at the client side.
*
*****************************************************************/
@InterfaceAudience.Public

View File

@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
* Abstract Checksumed Fs.
* It provide a basic implementation of a Checksumed Fs,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
* It generates &amp; verifies checksums at the client side.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */

View File

@ -309,7 +309,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
"dr.who";
/**
* User->groups static mapping to override the groups lookup
* User{@literal ->}groups static mapping to override the groups lookup
*/
public static final String HADOOP_USER_GROUP_STATIC_OVERRIDES =
"hadoop.user.group.static.mapping.overrides";

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* CreateFlag specifies the file create semantic. Users can combine flags like: <br>
* <code>
* EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND)
* <code>
* </code>
* <p>
*
* Use the CreateFlag as follows:

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.fs;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;

View File

@ -101,7 +101,7 @@ abstract public class FSInputChecker extends FSInputStream {
* Implementors should simply pass through to the underlying data stream.
* or
* (b) needChecksum() will return true:
* - len >= maxChunkSize
* - len {@literal >=} maxChunkSize
* - checksum.length is a multiple of CHECKSUM_SIZE
* Implementors should read an integer number of data chunks into
* buf. The amount read should be bounded by len or by

View File

@ -765,7 +765,7 @@ public class FileContext {
* Make(create) a directory and all the non-existent parents.
*
* @param dir - the dir to make
* @param permission - permissions is set permission&~umask
* @param permission - permissions is set permission{@literal &~}umask
* @param createParent - if true then missing parent dirs are created if false
* then parent must exist
*
@ -979,7 +979,6 @@ public class FileContext {
/**
* Renames Path src to Path dst
* <ul>
* <li
* <li>Fails if src is a file and dst is a directory.
* <li>Fails if src is a directory and dst is a file.
* <li>Fails if the parent of dst does not exist or is a file.
@ -1001,7 +1000,7 @@ public class FileContext {
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If <code>dst</code> already exists and
* <code>options</options> has {@link Options.Rename#OVERWRITE}
* <code>options</code> has {@link Options.Rename#OVERWRITE}
* option false.
* @throws FileNotFoundException If <code>src</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>dst</code> is not a
@ -1250,7 +1249,7 @@ public class FileContext {
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* <p>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
@ -1497,9 +1496,9 @@ public class FileContext {
* <pre>
* Given a path referring to a symlink of form:
*
* <---X--->
* {@literal <---}X{@literal --->}
* fs://host/A/B/link
* <-----Y----->
* {@literal <-----}Y{@literal ----->}
*
* In this path X is the scheme and authority that identify the file system,
* and Y is the path leading up to the final path component "link". If Y is
@ -1536,7 +1535,7 @@ public class FileContext {
*
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If file <code>linkcode> already exists
* @throws FileAlreadyExistsException If file <code>link</code> already exists
* @throws FileNotFoundException If <code>target</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>link</code> is not a
* directory.
@ -2038,7 +2037,6 @@ public class FileContext {
* <dl>
* <dd>
* <dl>
* <p>
* <dt> <tt> ? </tt>
* <dd> Matches any single character.
*
@ -2400,7 +2398,8 @@ public class FileContext {
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @param aclSpec List{@literal <}AclEntry{@literal >} describing
* modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(final Path path, final List<AclEntry> aclSpec)
@ -2421,7 +2420,8 @@ public class FileContext {
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @param aclSpec List{@literal <}AclEntry{@literal >} describing entries
* to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(final Path path, final List<AclEntry> aclSpec)
@ -2481,8 +2481,9 @@ public class FileContext {
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @param aclSpec List{@literal <}AclEntry{@literal >} describing
* modifications, must include entries for user, group, and others for
* compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, final List<AclEntry> aclSpec)
@ -2502,7 +2503,8 @@ public class FileContext {
* Gets the ACLs of files and directories.
*
* @param path Path to get
* @return RemoteIterator<AclStatus> which returns each AclStatus
* @return RemoteIterator{@literal <}AclStatus{@literal >} which returns
* each AclStatus
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
@ -2520,7 +2522,7 @@ public class FileContext {
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
@ -2538,7 +2540,7 @@ public class FileContext {
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
@ -2564,7 +2566,7 @@ public class FileContext {
* Get an xattr for a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attribute
@ -2587,11 +2589,12 @@ public class FileContext {
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
* of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
@ -2609,12 +2612,13 @@ public class FileContext {
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @param names XAttr names.
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
* of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
@ -2633,7 +2637,7 @@ public class FileContext {
* Remove an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to remove extended attribute
@ -2656,11 +2660,12 @@ public class FileContext {
* Get all of the xattr names for a file or directory.
* Only those xattr names which the logged-in user has permissions to view
* are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return List<String> of the XAttr names of the file or directory
* @return List{@literal <}String{@literal >} of the XAttr names of the
* file or directory
* @throws IOException
*/
public List<String> listXAttrs(Path path) throws IOException {

View File

@ -684,7 +684,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* Create a file with the provided permission.
*
* The permission of the file is set to be the provided permission as in
* setPermission, not permission&~umask
* setPermission, not permission{@literal &~}umask
*
* The HDFS implementation is implemented using two RPCs.
* It is understood that it is inefficient,
@ -709,7 +709,7 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Create a directory with the provided permission.
* The permission of the directory is set to be the provided permission as in
* setPermission, not permission&~umask
* setPermission, not permission{@literal &~}umask
*
* @see #create(FileSystem, Path, FsPermission)
*
@ -789,7 +789,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* <pre>
* if f == null :
* result = null
* elif f.getLen() <= start:
* elif f.getLen() {@literal <=} start:
* result = []
* else result = [ locations(FS, b) for b in blocks(FS, p, s, s+l)]
* </pre>
@ -2017,7 +2017,6 @@ public abstract class FileSystem extends Configured implements Closeable {
* <dl>
* <dd>
* <dl>
* <p>
* <dt> <tt> ? </tt>
* <dd> Matches any single character.
*
@ -2916,7 +2915,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @param aclSpec List&lt;AclEntry&gt; describing modifications
* @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported
* (default outcome).
@ -3109,7 +3108,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return List<String> of the XAttr names of the file or directory
* @return List{@literal <String>} of the XAttr names of the file or directory
* @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
* (default outcome).

View File

@ -1474,8 +1474,8 @@ public class FileUtil {
* @param inputClassPath String input classpath to bundle into the jar manifest
* @param pwd Path to working directory to save jar
* @param targetDir path to where the jar execution will have its working dir
* @param callerEnv Map<String, String> caller's environment variables to use
* for expansion
* @param callerEnv Map {@literal <}String, String{@literal >} caller's
* environment variables to use for expansion
* @return String[] with absolute path to new jar in position 0 and
* unexpanded wild card entry path in position 1
* @throws IOException if there is an I/O error while writing the jar file

View File

@ -83,7 +83,7 @@ public class HarFileSystem extends FileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
* <p>
*
* @return <code>har</code>
*/

View File

@ -52,18 +52,19 @@ public interface HasEnhancedByteBufferAccess {
* @return
* We will always return an empty buffer if maxLength was 0,
* whether or not we are at EOF.
* If maxLength > 0, we will return null if the stream has
* reached EOF.
* If maxLength &gt; 0, we will return null if the stream
* has reached EOF.
* Otherwise, we will return a ByteBuffer containing at least one
* byte. You must free this ByteBuffer when you are done with it
* by calling releaseBuffer on it. The buffer will continue to be
* readable until it is released in this manner. However, the
* input stream's close method may warn about unclosed buffers.
* @throws
* IOException: if there was an error reading.
* UnsupportedOperationException: if factory was null, and we
* needed an external byte buffer. UnsupportedOperationException
* will never be thrown unless the factory argument is null.
* @throws IOException if there was an error reading.
* @throws UnsupportedOperationException if factory was null,
* and we needed an external byte buffer.
* @throws UnsupportedOperationException will never be thrown
* unless the factory argument is null.
*
*/
public ByteBuffer read(ByteBufferPool factory, int maxLength,
EnumSet<ReadOption> opts)

View File

@ -241,9 +241,8 @@ public class LocalDirAllocator {
* @param pathStr the requested file (this will be searched)
* @param conf the Configuration object
* @return true if files exist. false otherwise
* @throws IOException
*/
public boolean ifExists(String pathStr,Configuration conf) {
public boolean ifExists(String pathStr, Configuration conf) {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.ifExists(pathStr, conf);
}

View File

@ -54,7 +54,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
* <p>
*
* @return <code>file</code>
*/

View File

@ -290,7 +290,7 @@ public final class Options {
* @param defaultOpt Default checksum option
* @param userOpt User-specified checksum option. Ignored if null.
* @param userBytesPerChecksum User-specified bytesPerChecksum
* Ignored if < 0.
* Ignored if {@literal <} 0.
*/
public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt,
ChecksumOpt userOpt, int userBytesPerChecksum) {

View File

@ -229,8 +229,8 @@ public class QuotaUsage {
/**
* Output format:
* <----12----> <----15----> <----15----> <----15----> <-------18------->
* QUOTA REMAINING_QUATA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
* |----12----| |----15----| |----15----| |----15----| |-------18-------|
* QUOTA REMAINING_QUOTA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
*/
protected static final String QUOTA_STRING_FORMAT = "%12s %15s ";
protected static final String SPACE_QUOTA_STRING_FORMAT = "%15s %15s ";
@ -244,9 +244,9 @@ public class QuotaUsage {
/**
* Output format:
* <----12----> <------15-----> <------15-----> <------15----->
* |----12----| |------15-----| |------15-----| |------15-----|
* QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA
* <----12----> <----12----> <-------18------->
* |----12----| |----12----| |-------18-------|
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";

View File

@ -76,7 +76,7 @@ public class FTPFileSystem extends FileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
* <p>
*
* @return <code>ftp</code>
*/
@ -162,7 +162,7 @@ public class FTPFileSystem extends FileSystem {
/**
* Set FTP's transfer mode based on configuration. Valid values are
* STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
* <p/>
* <p>
* Defaults to BLOCK_TRANSFER_MODE.
*
* @param conf
@ -195,7 +195,7 @@ public class FTPFileSystem extends FileSystem {
* Set the FTPClient's data connection mode based on configuration. Valid
* values are ACTIVE_LOCAL_DATA_CONNECTION_MODE,
* PASSIVE_LOCAL_DATA_CONNECTION_MODE and PASSIVE_REMOTE_DATA_CONNECTION_MODE.
* <p/>
* <p>
* Defaults to ACTIVE_LOCAL_DATA_CONNECTION_MODE.
*
* @param client

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.ftp;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.util.DataChecksum;

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.util.DataChecksum;

View File

@ -69,7 +69,7 @@ public class AclStatus {
/**
* Returns the list of all ACL entries, ordered by their natural ordering.
*
* @return List<AclEntry> unmodifiable ordered list of all ACL entries
* @return List&lt;AclEntry&gt; unmodifiable ordered list of all ACL entries
*/
public List<AclEntry> getEntries() {
return entries;

View File

@ -36,8 +36,8 @@ public final class AclUtil {
* Given permissions and extended ACL entries, returns the full logical ACL.
*
* @param perm FsPermission containing permissions
* @param entries List<AclEntry> containing extended ACL entries
* @return List<AclEntry> containing full logical ACL
* @param entries List&lt;AclEntry&gt; containing extended ACL entries
* @return List&lt;AclEntry&gt; containing full logical ACL
*/
public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
List<AclEntry> entries) {
@ -93,8 +93,8 @@ public final class AclUtil {
* Translates the given permission bits to the equivalent minimal ACL.
*
* @param perm FsPermission to translate
* @return List<AclEntry> containing exactly 3 entries representing the owner,
* group and other permissions
* @return List&lt;AclEntry&gt; containing exactly 3 entries representing the
* owner, group and other permissions
*/
public static List<AclEntry> getMinimalAcl(FsPermission perm) {
return Lists.newArrayList(
@ -119,7 +119,7 @@ public final class AclUtil {
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries List<AclEntry> entries to check
* @param entries List&lt;AclEntry&gt; entries to check
* @return boolean true if the entries represent a minimal ACL
*/
public static boolean isMinimalAcl(List<AclEntry> entries) {

View File

@ -42,7 +42,7 @@ public final class ScopedAclEntries {
* list is already sorted such that all access entries precede all default
* entries.
*
* @param aclEntries List<AclEntry> to separate
* @param aclEntries List&lt;AclEntry&gt; to separate
*/
public ScopedAclEntries(List<AclEntry> aclEntries) {
int pivot = calculatePivotOnDefaultEntries(aclEntries);
@ -59,8 +59,8 @@ public final class ScopedAclEntries {
/**
* Returns access entries.
*
* @return List<AclEntry> containing just access entries, or an empty list if
* there are no access entries
* @return List&lt;AclEntry&gt; containing just access entries, or an empty
* list if there are no access entries
*/
public List<AclEntry> getAccessEntries() {
return accessEntries;
@ -69,8 +69,8 @@ public final class ScopedAclEntries {
/**
* Returns default entries.
*
* @return List<AclEntry> containing just default entries, or an empty list if
* there are no default entries
* @return List&lt;AclEntry&gt; containing just default entries, or an empty
* list if there are no default entries
*/
public List<AclEntry> getDefaultEntries() {
return defaultEntries;
@ -78,8 +78,8 @@ public final class ScopedAclEntries {
/**
* Returns the pivot point in the list between the access entries and the
* default entries. This is the index of the first element in the list that is
* a default entry.
* default entries. This is the index of the first element in the list that
* is a default entry.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return int pivot point, or -1 if list contains no default entries

View File

@ -148,16 +148,16 @@ abstract public class Command extends Configured {
* expand arguments, and then process each argument.
* <pre>
* run
* |-> {@link #processOptions(LinkedList)}
* \-> {@link #processRawArguments(LinkedList)}
* |-> {@link #expandArguments(LinkedList)}
* | \-> {@link #expandArgument(String)}*
* \-> {@link #processArguments(LinkedList)}
* |-> {@link #processArgument(PathData)}*
* | |-> {@link #processPathArgument(PathData)}
* | \-> {@link #processPaths(PathData, PathData...)}
* | \-> {@link #processPath(PathData)}*
* \-> {@link #processNonexistentPath(PathData)}
* |{@literal ->} {@link #processOptions(LinkedList)}
* \{@literal ->} {@link #processRawArguments(LinkedList)}
* |{@literal ->} {@link #expandArguments(LinkedList)}
* | \{@literal ->} {@link #expandArgument(String)}*
* \{@literal ->} {@link #processArguments(LinkedList)}
* |{@literal ->} {@link #processArgument(PathData)}*
* | |{@literal ->} {@link #processPathArgument(PathData)}
* | \{@literal ->} {@link #processPaths(PathData, PathData...)}
* | \{@literal ->} {@link #processPath(PathData)}*
* \{@literal ->} {@link #processNonexistentPath(PathData)}
* </pre>
* Most commands will chose to implement just
* {@link #processOptions(LinkedList)} and {@link #processPath(PathData)}
@ -292,8 +292,8 @@ abstract public class Command extends Configured {
/**
* This is the last chance to modify an argument before going into the
* (possibly) recursive {@link #processPaths(PathData, PathData...)}
* -> {@link #processPath(PathData)} loop. Ex. ls and du use this to
* expand out directories.
* {@literal ->} {@link #processPath(PathData)} loop. Ex. ls and du use
* this to expand out directories.
* @param item a {@link PathData} representing a path which exists
* @throws IOException if anything goes wrong...
*/

View File

@ -162,7 +162,7 @@ public class CommandFormat {
/** Returns all the options that are set
*
* @return Set<String> of the enabled options
* @return Set{@literal <}String{@literal >} of the enabled options
*/
public Set<String> getOpts() {
Set<String> optSet = new HashSet<String>();

View File

@ -75,16 +75,16 @@ import org.apache.hadoop.util.Time;
* one or more individual file systems (a localFs or Hdfs, S3fs, etc).
* For example one could have a mount table that provides links such as
* <ul>
* <li> /user -> hdfs://nnContainingUserDir/user
* <li> /project/foo -> hdfs://nnProject1/projects/foo
* <li> /project/bar -> hdfs://nnProject2/projects/bar
* <li> /tmp -> hdfs://nnTmp/privateTmpForUserXXX
* <li> /user {@literal ->} hdfs://nnContainingUserDir/user
* <li> /project/foo {@literal ->} hdfs://nnProject1/projects/foo
* <li> /project/bar {@literal ->} hdfs://nnProject2/projects/bar
* <li> /tmp {@literal ->} hdfs://nnTmp/privateTmpForUserXXX
* </ul>
*
* ViewFs is specified with the following URI: <b>viewfs:///</b>
* <p>
* To use viewfs one would typically set the default file system in the
* config (i.e. fs.defaultFS < = viewfs:///) along with the
* config (i.e. fs.defaultFS {@literal <} = viewfs:///) along with the
* mount table config variables as described below.
*
* <p>
@ -132,7 +132,7 @@ import org.apache.hadoop.util.Time;
* (because they do not fit on one) then one could specify a mount
* entry such as following merges two dirs:
* <ul>
* <li> /user -> hdfs://nnUser1/user,hdfs://nnUser2/user
* <li> /user {@literal ->} hdfs://nnUser1/user,hdfs://nnUser2/user
* </ul>
* Such a mergeLink can be specified with the following config var where ","
* is used as the separator for each of links to be merged:

View File

@ -54,10 +54,10 @@ import org.slf4j.LoggerFactory;
* Apache Zookeeper. Using Zookeeper as a coordination service, leader election
* can be performed by atomically creating an ephemeral lock file (znode) on
* Zookeeper. The service instance that successfully creates the znode becomes
* active and the rest become standbys. <br/>
* active and the rest become standbys. <br>
* This election mechanism is only efficient for small number of election
* candidates (order of 10's) because contention on single znode by a large
* number of candidates can result in Zookeeper overload. <br/>
* number of candidates can result in Zookeeper overload. <br>
* The elector does not guarantee fencing (protection of shared resources) among
* service instances. After it has notified an instance about becoming a leader,
* then that instance must ensure that it meets the service consistency
@ -70,10 +70,10 @@ import org.slf4j.LoggerFactory;
public class ActiveStandbyElector implements StatCallback, StringCallback {
/**
* Callback interface to interact with the ActiveStandbyElector object. <br/>
* Callback interface to interact with the ActiveStandbyElector object. <br>
* The application will be notified with a callback only on state changes
* (i.e. there will never be successive calls to becomeActive without an
* intermediate call to enterNeutralMode). <br/>
* intermediate call to enterNeutralMode). <br>
* The callbacks will be running on Zookeeper client library threads. The
* application should return from these callbacks quickly so as not to impede
* Zookeeper client library performance and notifications. The app will
@ -105,7 +105,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
* interface. The service may choose to ignore this or stop doing state
* changing operations. Upon reconnection, the elector verifies the leader
* status and calls back on the becomeActive and becomeStandby app
* interfaces. <br/>
* interfaces. <br>
* Zookeeper disconnects can happen due to network issues or loss of
* Zookeeper quorum. Thus enterNeutralMode can be used to guard against
* split-brain issues. In such situations it might be prudent to call
@ -178,12 +178,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
private ZooKeeper monitorLockNodeClient;
/**
* Create a new ActiveStandbyElector object <br/>
* Create a new ActiveStandbyElector object <br>
* The elector is created by providing to it the Zookeeper configuration, the
* parent znode under which to create the znode and a reference to the
* callback interface. <br/>
* callback interface. <br>
* The parent znode name must be the same for all service instances and
* different across services. <br/>
* different across services. <br>
* After the leader has been lost, a new leader will be elected after the
* session timeout expires. Hence, the app must set this parameter based on
* its needs for failure response time. The session timeout must be greater
@ -217,12 +217,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
}
/**
* Create a new ActiveStandbyElector object <br/>
* Create a new ActiveStandbyElector object <br>
* The elector is created by providing to it the Zookeeper configuration, the
* parent znode under which to create the znode and a reference to the
* callback interface. <br/>
* callback interface. <br>
* The parent znode name must be the same for all service instances and
* different across services. <br/>
* different across services. <br>
* After the leader has been lost, a new leader will be elected after the
* session timeout expires. Hence, the app must set this parameter based on
* its needs for failure response time. The session timeout must be greater
@ -278,9 +278,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
/**
* To participate in election, the app will call joinElection. The result will
* be notified by a callback on either the becomeActive or becomeStandby app
* interfaces. <br/>
* interfaces. <br>
* After this the elector will automatically monitor the leader status and
* perform re-election if necessary<br/>
* perform re-election if necessary<br>
* The app could potentially start off in standby mode and ignore the
* becomeStandby call.
*
@ -397,11 +397,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
/**
* Any service instance can drop out of the election by calling quitElection.
* <br/>
* <br>
* This will lose any leader status, if held, and stop monitoring of the lock
* node. <br/>
* node. <br>
* If the instance wants to participate in election again, then it needs to
* call joinElection(). <br/>
* call joinElection(). <br>
* This allows service instances to take themselves out of rotation for known
* impending unavailable states (e.g. long GC pause or software upgrade).
*

View File

@ -372,7 +372,7 @@ public abstract class HAAdmin extends Configured implements Tool {
/**
* Return the serviceId as is, we are assuming it was
* given as a service address of form <host:ipcport>.
* given as a service address of form {@literal <}host:ipcport{@literal >}.
*/
protected String getServiceAddr(String serviceId) {
return serviceId;

View File

@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
* <code>com.example.foo.MyMethod</code>
* The class provided must implement the {@link FenceMethod} interface.
* The fencing methods that ship with Hadoop may also be referred to
* by shortened names:<p>
* by shortened names:<br>
* <ul>
* <li><code>shell(/path/to/some/script.sh args...)</code></li>
* <li><code>sshfence(...)</code> (see {@link SshFenceByTcpPort})

View File

@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
* with ssh.
* <p>
* In order to achieve passwordless SSH, the operator must also configure
* <code>dfs.ha.fencing.ssh.private-key-files<code> to point to an
* <code>dfs.ha.fencing.ssh.private-key-files</code> to point to an
* SSH key that has passphrase-less access to the given username and host.
*/
public class SshFenceByTcpPort extends Configured

View File

@ -107,9 +107,9 @@ import org.slf4j.LoggerFactory;
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
* to serve up status information for the server. There are three contexts:
* "/logs/" -> points to the log directory "/static/" -> points to common static
* files (src/webapps/static) "/" -> the jsp server code from
* (src/webapps/<name>)
* "/logs/" {@literal ->} points to the log directory "/static/" {@literal ->}
* points to common static files (src/webapps/static) "/" {@literal ->} the
* jsp server code from (src/webapps/{@literal <}name{@literal >})
*
* This class is a fork of the old HttpServer. HttpServer exists for
* compatibility reasons. See HBASE-10336 for more details.
@ -1364,10 +1364,10 @@ public final class HttpServer2 implements FilterContainer {
/**
* Checks the user has privileges to access to instrumentation servlets.
* <p/>
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
* (default value) it always returns TRUE.
* <p/>
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
* it will check that if the current user is in the admin ACLS. If the user is
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.

View File

@ -83,7 +83,7 @@ public class EnumSetWritable<E extends Enum<E>> extends AbstractCollection<E>
/**
* reset the EnumSetWritable with specified
* <tt>value</value> and <tt>elementType</tt>. If the <tt>value</tt> argument
* <tt>value</tt> and <tt>elementType</tt>. If the <tt>value</tt> argument
* is null or its size is zero, the <tt>elementType</tt> argument must not be
* null. If the argument <tt>value</tt>'s size is bigger than zero, the
* argument <tt>elementType</tt> is not be used.

View File

@ -370,7 +370,7 @@ public class IOUtils {
}
/**
* Return the complete list of files in a directory as strings.<p/>
* Return the complete list of files in a directory as strings.<p>
*
* This is better than File#listDir because it does not ignore IOExceptions.
*

View File

@ -80,7 +80,7 @@ public class ReadaheadPool {
* @param readaheadLength the configured length to read ahead
* @param maxOffsetToRead the maximum offset that will be readahead
* (useful if, for example, only some segment of the file is
* requested by the user). Pass {@link Long.MAX_VALUE} to allow
* requested by the user). Pass {@link Long#MAX_VALUE} to allow
* readahead to the end of the file.
* @param lastReadahead the result returned by the previous invocation
* of this function on this file descriptor, or null if this is

View File

@ -37,7 +37,7 @@ import com.google.common.annotations.VisibleForTesting;
/**
* This class provides secure APIs for opening and creating files on the local
* disk. The main issue this class tries to handle is that of symlink traversal.
* <br/>
* <br>
* An example of such an attack is:
* <ol>
* <li> Malicious user removes his task's syslog file, and puts a link to the
@ -50,7 +50,7 @@ import com.google.common.annotations.VisibleForTesting;
* </ol>
* A similar attack is possible involving task log truncation, but in that case
* due to an insecure write to a file.
* <br/>
* <br>
*/
public class SecureIOUtils {

View File

@ -79,7 +79,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
* values.
* </li>
* <li>
* <code>BlockCompressWriter</code> : Block-compressed files, both keys &
* <code>BlockCompressWriter</code> : Block-compressed files, both keys &amp;
* values are collected in 'blocks'
* separately and compressed. The size of
* the 'block' is configurable.
@ -94,13 +94,13 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
* <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the
* above <code>SequenceFile</code> formats.</p>
*
* <h4 id="Formats">SequenceFile Formats</h4>
* <h3 id="Formats">SequenceFile Formats</h3>
*
* <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
* depending on the <code>CompressionType</code> specified. All of them share a
* <a href="#Header">common header</a> described below.
*
* <h5 id="Header">SequenceFile Header</h5>
* <h4 id="Header">SequenceFile Header</h4>
* <ul>
* <li>
* version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
@ -133,7 +133,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
* </li>
* </ul>
*
* <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
* <h5>Uncompressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
@ -152,7 +152,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
* </li>
* </ul>
*
* <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
* <h5>Record-Compressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
@ -171,7 +171,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
* </li>
* </ul>
*
* <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
* <h5>Block-Compressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
@ -1935,8 +1935,8 @@ public class SequenceFile {
* @param fs The file system used to open the file.
* @param file The file being read.
* @param bufferSize The buffer size used to read the file.
* @param length The length being read if it is >= 0. Otherwise,
* the length is not available.
* @param length The length being read if it is {@literal >=} 0.
* Otherwise, the length is not available.
* @return The opened stream.
* @throws IOException
*/

View File

@ -37,7 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* and returns the instance.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* <blockquote><pre>
* public class MyWritable implements Writable {
* // Some data
* private int counter;
@ -62,7 +62,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* return w;
* }
* }
* </pre></blockquote></p>
* </pre></blockquote>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable

View File

@ -36,8 +36,9 @@ import org.apache.hadoop.classification.InterfaceStability;
* satisfy this property.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class MyWritableComparable implements WritableComparable<MyWritableComparable> {
* <blockquote><pre>
* public class MyWritableComparable implements
* WritableComparable{@literal <MyWritableComparable>} {
* // Some data
* private int counter;
* private long timestamp;
@ -66,7 +67,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* return result
* }
* }
* </pre></blockquote></p>
* </pre></blockquote>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable

View File

@ -236,7 +236,8 @@ public final class WritableUtils {
/**
* Serializes an integer to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the
* actual value.
* For other values of i, the first byte value indicates whether the
* integer is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -116, the following integer
@ -255,7 +256,8 @@ public final class WritableUtils {
/**
* Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the
* actual value.
* For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long

View File

@ -227,9 +227,9 @@ public class CompressionCodecFactory {
/**
* Find the relevant compression codec for the codec's canonical class name
* or by codec alias.
* <p/>
* <p>
* Codec aliases are case insensitive.
* <p/>
* <p>
* The code alias is the short class name (without the package name).
* If the short class name ends with 'Codec', then there are two aliases for
* the codec, the complete short class name and the short class name without
@ -255,9 +255,9 @@ public class CompressionCodecFactory {
/**
* Find the relevant compression codec for the codec's canonical class name
* or by codec alias and returns its implemetation class.
* <p/>
* <p>
* Codec aliases are case insensitive.
* <p/>
* <p>
* The code alias is the short class name (without the package name).
* If the short class name ends with 'Codec', then there are two aliases for
* the codec, the complete short class name and the short class name without

View File

@ -61,9 +61,9 @@ public class Lz4Codec implements Configurable, CompressionCodec {
}
/**
* Are the native lz4 libraries loaded & initialized?
* Are the native lz4 libraries loaded &amp; initialized?
*
* @return true if loaded & initialized, otherwise false
* @return true if loaded &amp; initialized, otherwise false
*/
public static boolean isNativeCodeLoaded() {
return NativeCodeLoader.isNativeCodeLoaded();

View File

@ -57,7 +57,7 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
}
/**
* Are the native snappy libraries loaded & initialized?
* Are the native snappy libraries loaded &amp; initialized?
*/
public static void checkNativeCodeLoaded() {
if (!NativeCodeLoader.buildSupportsSnappy()) {

View File

@ -247,7 +247,7 @@ public class Bzip2Compressor implements Compressor {
}
/**
* Returns the total number of uncompressed bytes input so far.</p>
* Returns the total number of uncompressed bytes input so far.
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/

View File

@ -183,7 +183,7 @@ public class Bzip2Decompressor implements Decompressor {
}
/**
* Returns the total number of compressed bytes input so far.</p>
* Returns the total number of compressed bytes input so far.
*
* @return the total (non-negative) number of compressed bytes input so far
*/
@ -195,7 +195,7 @@ public class Bzip2Decompressor implements Decompressor {
/**
* Returns the number of bytes remaining in the input buffers; normally
* called when finished() is true to determine amount of post-gzip-stream
* data.</p>
* data.
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@ -206,7 +206,7 @@ public class Bzip2Decompressor implements Decompressor {
}
/**
* Resets everything including the input buffers (user and direct).</p>
* Resets everything including the input buffers (user and direct).
*/
@Override
public synchronized void reset() {

View File

@ -37,11 +37,11 @@ public class Bzip2Factory {
private static boolean nativeBzip2Loaded;
/**
* Check if native-bzip2 code is loaded & initialized correctly and
* Check if native-bzip2 code is loaded &amp; initialized correctly and
* can be loaded for this job.
*
* @param conf configuration
* @return <code>true</code> if native-bzip2 is loaded & initialized
* @return <code>true</code> if native-bzip2 is loaded &amp; initialized
* and can be loaded for this job, else <code>false</code>
*/
public static synchronized boolean isNativeBzip2Loaded(Configuration conf) {

View File

@ -200,20 +200,18 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
}
/**
* This method tries to find the marker (passed to it as the first parameter)
* in the stream. It can find bit patterns of length <= 63 bits. Specifically
* this method is used in CBZip2InputStream to find the end of block (EOB)
* delimiter in the stream, starting from the current position of the stream.
* If marker is found, the stream position will be at the byte containing
* the starting bit of the marker.
*
* @param marker The bit pattern to be found in the stream
* @param markerBitLength No of bits in the marker
* @return true if the marker was found otherwise false
*
* @throws IOException
* @throws IllegalArgumentException if marketBitLength is greater than 63
*/
* This method tries to find the marker (passed to it as the first parameter)
* in the stream. It can find bit patterns of length &lt;= 63 bits.
* Specifically this method is used in CBZip2InputStream to find the end of
* block (EOB) delimiter in the stream, starting from the current position
* of the stream. If marker is found, the stream position will be at the
* byte containing the starting bit of the marker.
* @param marker The bit pattern to be found in the stream
* @param markerBitLength No of bits in the marker
* @return true if the marker was found otherwise false
* @throws IOException
* @throws IllegalArgumentException if marketBitLength is greater than 63
*/
public boolean skipToNextMarker(long marker, int markerBitLength)
throws IOException, IllegalArgumentException {
try {

View File

@ -64,7 +64,8 @@ import org.apache.hadoop.io.IOUtils;
* </pre>
*
* <table width="100%" border="1">
* <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" />
* <caption></caption>
* <colgroup> <col width="33%" > <col width="33%" > <col width="33%" >
* </colgroup>
* <tr>
* <th colspan="3">Memory usage by blocksize</th>
@ -614,9 +615,9 @@ public class CBZip2OutputStream extends OutputStream implements BZip2Constants {
* @throws IOException
* if an I/O error occurs in the specified stream.
* @throws IllegalArgumentException
* if <code>(blockSize < 1) || (blockSize > 9)</code>.
* if {@code (blockSize < 1) || (blockSize > 9)}
* @throws NullPointerException
* if <code>out == null</code>.
* if {@code out == null}.
*
* @see #MIN_BLOCKSIZE
* @see #MAX_BLOCKSIZE

View File

@ -404,7 +404,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
/**
* Returns the total number of compressed bytes input so far, including
* gzip header/trailer bytes.</p>
* gzip header/trailer bytes.
*
* @return the total (non-negative) number of compressed bytes read so far
*/
@ -420,7 +420,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
* non-zero value unless called after {@link #setInput(byte[] b, int off,
* int len)} and before {@link #decompress(byte[] b, int off, int len)}.
* (That is, after {@link #decompress(byte[] b, int off, int len)} it
* always returns zero, except in finished state with concatenated data.)</p>
* always returns zero, except in finished state with concatenated data.)
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@ -441,7 +441,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
/**
* Returns true if the end of the gzip substream (single "member") has been
* reached.</p>
* reached.
*/
@Override
public synchronized boolean finished() {
@ -450,7 +450,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
/**
* Resets everything, including the input buffer, regardless of whether the
* current gzip substream is finished.</p>
* current gzip substream is finished.
*/
@Override
public synchronized void reset() {

View File

@ -435,7 +435,7 @@ public class ZlibCompressor implements Compressor {
}
/**
* Returns the total number of uncompressed bytes input so far.</p>
* Returns the total number of uncompressed bytes input so far.
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/

View File

@ -243,7 +243,7 @@ public class ZlibDecompressor implements Decompressor {
}
/**
* Returns the total number of compressed bytes input so far.</p>
* Returns the total number of compressed bytes input so far.
*
* @return the total (non-negative) number of compressed bytes input so far
*/
@ -255,7 +255,7 @@ public class ZlibDecompressor implements Decompressor {
/**
* Returns the number of bytes remaining in the input buffers; normally
* called when finished() is true to determine amount of post-gzip-stream
* data.</p>
* data.
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@ -266,7 +266,7 @@ public class ZlibDecompressor implements Decompressor {
}
/**
* Resets everything including the input buffers (user and direct).</p>
* Resets everything including the input buffers (user and direct).
*/
@Override
public void reset() {

View File

@ -73,11 +73,11 @@ public class ZlibFactory {
ZlibFactory.nativeZlibLoaded = isLoaded;
}
/**
* Check if native-zlib code is loaded & initialized correctly and
* Check if native-zlib code is loaded &amp; initialized correctly and
* can be loaded for this job.
*
* @param conf configuration
* @return <code>true</code> if native-zlib is loaded & initialized
* @return <code>true</code> if native-zlib is loaded &amp; initialized
* and can be loaded for this job, else <code>false</code>
*/
public static boolean isNativeZlibLoaded(Configuration conf) {

View File

@ -36,7 +36,7 @@ import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
/**
* A codec & coder utility to help create coders conveniently.
* A codec &amp; coder utility to help create coders conveniently.
*
* {@link CodecUtil} includes erasure coder configurations key and default
* values such as coder class name and erasure codec option values included

View File

@ -518,7 +518,7 @@ public class GaloisField {
/**
* Perform Gaussian elimination on the given matrix. This matrix has to be a
* fat matrix (number of rows > number of columns).
* fat matrix (number of rows &gt; number of columns).
*/
public void gaussianElimination(int[][] matrix) {
assert(matrix != null && matrix.length > 0 && matrix[0].length > 0

View File

@ -1308,11 +1308,11 @@ public class TFile {
* @param reader
* The TFile reader object.
* @param beginKey
* Begin key of the scan. If null, scan from the first <K,V>
* entry of the TFile.
* Begin key of the scan. If null, scan from the first
* &lt;K, V&gt; entry of the TFile.
* @param endKey
* End key of the scan. If null, scan up to the last <K, V> entry
* of the TFile.
* End key of the scan. If null, scan up to the last &lt;K, V&gt;
* entry of the TFile.
* @throws IOException
*/
protected Scanner(Reader reader, RawComparable beginKey,

View File

@ -62,27 +62,33 @@ public final class Utils {
* <li>if n in [-32, 127): encode in one byte with the actual value.
* Otherwise,
* <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52;
* byte[1]=n&0xff. Otherwise,
* byte[1]=n&amp;0xff. Otherwise,
* <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 -
* 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise,
* 88; byte[1]=(n&gt;&gt;8)&amp;0xff; byte[2]=n&amp;0xff. Otherwise,
* <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112;
* byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff; byte[3]=n&0xff. Otherwise:
* byte[1] = (n&gt;&gt;16)&amp;0xff; byte[2] = (n&gt;&gt;8)&amp;0xff;
* byte[3]=n&amp;0xff.
* Otherwise:
* <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] =
* (n>>24)&0xff; byte[2]=(n>>16)&0xff; byte[3]=(n>>8)&0xff; byte[4]=n&0xff;
* (n&gt;&gt;24)&amp;0xff; byte[2]=(n&gt;&gt;16)&amp;0xff;
* byte[3]=(n&gt;&gt;8)&amp;0xff; byte[4]=n&amp;0xff;
* <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] =
* (n>>32)&0xff; byte[2]=(n>>24)&0xff; byte[3]=(n>>16)&0xff;
* byte[4]=(n>>8)&0xff; byte[5]=n&0xff
* (n&gt;&gt;32)&amp;0xff; byte[2]=(n&gt;&gt;24)&amp;0xff;
* byte[3]=(n&gt;&gt;16)&amp;0xff; byte[4]=(n&gt;&gt;8)&amp;0xff;
* byte[5]=n&amp;0xff
* <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] =
* (n>>40)&0xff; byte[2]=(n>>32)&0xff; byte[3]=(n>>24)&0xff;
* byte[4]=(n>>16)&0xff; byte[5]=(n>>8)&0xff; byte[6]=n&0xff;
* (n&gt;&gt;40)&amp;0xff; byte[2]=(n&gt;&gt;32)&amp;0xff;
* byte[3]=(n&gt;&gt;24)&amp;0xff; byte[4]=(n&gt;&gt;16)&amp;0xff;
* byte[5]=(n&gt;&gt;8)&amp;0xff; byte[6]=n&amp;0xff;
* <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] =
* (n>>48)&0xff; byte[2] = (n>>40)&0xff; byte[3]=(n>>32)&0xff;
* byte[4]=(n>>24)&0xff; byte[5]=(n>>16)&0xff; byte[6]=(n>>8)&0xff;
* byte[7]=n&0xff;
* (n&gt;&gt;48)&amp;0xff; byte[2] = (n&gt;&gt;40)&amp;0xff;
* byte[3]=(n&gt;&gt;32)&amp;0xff; byte[4]=(n&gt;&gt;24)&amp;0xff; byte[5]=
* (n&gt;&gt;16)&amp;0xff; byte[6]=(n&gt;&gt;8)&amp;0xff; byte[7]=n&amp;0xff;
* <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] =
* (n>>54)&0xff; byte[2] = (n>>48)&0xff; byte[3] = (n>>40)&0xff;
* byte[4]=(n>>32)&0xff; byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff;
* byte[7]=(n>>8)&0xff; byte[8]=n&0xff;
* (n&gt;&gt;54)&amp;0xff; byte[2] = (n&gt;&gt;48)&amp;0xff;
* byte[3] = (n&gt;&gt;40)&amp;0xff; byte[4]=(n&gt;&gt;32)&amp;0xff;
* byte[5]=(n&gt;&gt;24)&amp;0xff; byte[6]=(n&gt;&gt;16)&amp;0xff; byte[7]=
* (n&gt;&gt;8)&amp;0xff; byte[8]=n&amp;0xff;
* </ul>
*
* @param out
@ -181,15 +187,15 @@ public final class Utils {
* Decoding the variable-length integer. Suppose the value of the first byte
* is FB, and the following bytes are NB[*].
* <ul>
* <li>if (FB >= -32), return (long)FB;
* <li>if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff;
* <li>if (FB in [-104, -73]), return (FB+88)<<16 + (NB[0]&0xff)<<8 +
* NB[1]&0xff;
* <li>if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff)<<16 +
* (NB[1]&0xff)<<8 + NB[2]&0xff;
* <li>if (FB &gt;= -32), return (long)FB;
* <li>if (FB in [-72, -33]), return (FB+52)&lt;&lt;8 + NB[0]&amp;0xff;
* <li>if (FB in [-104, -73]), return (FB+88)&lt;&lt;16 +
* (NB[0]&amp;0xff)&lt;&lt;8 + NB[1]&amp;0xff;
* <li>if (FB in [-120, -105]), return (FB+112)&lt;&lt;24 + (NB[0]&amp;0xff)
* &lt;&lt;16 + (NB[1]&amp;0xff)&lt;&lt;8 + NB[2]&amp;0xff;
* <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed
* big-endian integer.
*
* </ul>
* @param in
* input stream
* @return the decoded long integer.

View File

@ -89,12 +89,12 @@ public class RetryProxy {
*
* @param iface the interface that the retry will implement
* @param proxyProvider provides implementation instances whose methods should be retried
* @param methodNameToPolicyMapa map of method names to retry policies
* @param methodNameToPolicyMap map of method names to retry policies
* @return the retry proxy
*/
public static <T> Object create(Class<T> iface,
FailoverProxyProvider<T> proxyProvider,
Map<String,RetryPolicy> methodNameToPolicyMap,
Map<String, RetryPolicy> methodNameToPolicyMap,
RetryPolicy defaultPolicy) {
return Proxy.newProxyInstance(
proxyProvider.getInterface().getClassLoader(),

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Provides a facility for deserializing objects of type <T> from an
* Provides a facility for deserializing objects of type {@literal <T>} from an
* {@link InputStream}.
* </p>
*

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Provides a facility for serializing objects of type <T> to an
* Provides a facility for serializing objects of type &lt;T&gt; to an
* {@link OutputStream}.
* </p>
*

View File

@ -131,7 +131,7 @@ public final class CallerContext {
/**
* The thread local current caller context.
* <p/>
* <p>
* Internal class for defered singleton idiom.
* https://en.wikipedia.org/wiki/Initialization_on_demand_holder_idiom
*/

View File

@ -1357,7 +1357,7 @@ public class Client implements AutoCloseable {
* @param remoteId - the target rpc server
* @param fallbackToSimpleAuth - set to true or false during this method to
* indicate if a secure client falls back to simple auth
* @returns the rpc response
* @return the rpc response
* Throws exceptions if there are network problems or if the remote code
* threw an exception.
*/
@ -1392,7 +1392,7 @@ public class Client implements AutoCloseable {
* @param serviceClass - service class for RPC
* @param fallbackToSimpleAuth - set to true or false during this method to
* indicate if a secure client falls back to simple auth
* @returns the rpc response
* @return the rpc response
* Throws exceptions if there are network problems or if the remote code
* threw an exception.
*/
@ -1461,7 +1461,7 @@ public class Client implements AutoCloseable {
/**
* Check if RPC is in asynchronous mode or not.
*
* @returns true, if RPC is in asynchronous mode, otherwise false for
* @return true, if RPC is in asynchronous mode, otherwise false for
* synchronous mode.
*/
@Unstable
@ -1575,7 +1575,8 @@ public class Client implements AutoCloseable {
/**
* This class holds the address and the user ticket. The client connections
* to servers are uniquely identified by <remoteAddress, protocol, ticket>
* to servers are uniquely identified by {@literal <}remoteAddress, protocol,
* ticket{@literal >}
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving

View File

@ -37,7 +37,7 @@ public class ClientCache {
new HashMap<SocketFactory, Client>();
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* Construct &amp; cache an IPC client with the user-provided SocketFactory
* if no cached client exists.
*
* @param conf Configuration
@ -66,7 +66,7 @@ public class ClientCache {
}
/**
* Construct & cache an IPC client with the default SocketFactory
* Construct &amp; cache an IPC client with the default SocketFactory
* and default valueClass if no cached client exists.
*
* @param conf Configuration
@ -77,7 +77,7 @@ public class ClientCache {
}
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* Construct &amp; cache an IPC client with the user-provided SocketFactory
* if no cached client exists. Default response type is ObjectWritable.
*
* @param conf Configuration

View File

@ -76,7 +76,8 @@ public class DecayRpcScheduler implements RpcScheduler,
/**
* Decay factor controls how much each count is suppressed by on each sweep.
* Valid numbers are > 0 and < 1. Decay factor works in tandem with period
* Valid numbers are &gt; 0 and &lt; 1. Decay factor works in tandem with
* period
* to control how long the scheduler remembers an identity.
*/
public static final String IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_KEY =

View File

@ -28,7 +28,6 @@ public interface RefreshHandler {
* Implement this method to accept refresh requests from the administrator.
* @param identifier is the identifier you registered earlier
* @param args contains a list of string args from the administrator
* @throws Exception as a shorthand for a RefreshResponse(-1, message)
* @return a RefreshResponse
*/
RefreshResponse handleRefresh(String identifier, String[] args);

View File

@ -102,7 +102,7 @@ public class RemoteException extends IOException {
* a <code>String</code> as a parameter.
* Otherwise it returns this.
*
* @return <code>Throwable
* @return <code>Throwable</code>
*/
public IOException unwrapRemoteException() {
try {

View File

@ -2747,7 +2747,7 @@ public abstract class Server {
/**
* Constructs a server listening on the named port and address. Parameters passed must
* be of the named class. The <code>handlerCount</handlerCount> determines
* be of the named class. The <code>handlerCount</code> determines
* the number of handler threads that will be used to process calls.
* If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters
* from configuration. Otherwise the configuration will be picked up.

View File

@ -70,7 +70,7 @@ import java.util.Set;
* <p>
* The optional <code>get</code> parameter is used to query an specific
* attribute of a JMX bean. The format of the URL is
* <code>http://.../jmx?get=MXBeanName::AttributeName<code>
* <code>http://.../jmx?get=MXBeanName::AttributeName</code>
* <p>
* For example
* <code>
@ -85,7 +85,7 @@ import java.util.Set;
* <p>
* The return format is JSON and in the form
* <p>
* <code><pre>
* <pre><code>
* {
* "beans" : [
* {
@ -94,7 +94,7 @@ import java.util.Set;
* }
* ]
* }
* </pre></code>
* </code></pre>
* <p>
* The servlet attempts to convert the the JMXBeans into JSON. Each
* bean's attributes will be converted to a JSON object member.

View File

@ -62,10 +62,10 @@ import org.apache.hadoop.util.Timer;
* still maintaining overall information about how many large requests were
* received.
*
* <p/>This class can also be used to coordinate multiple logging points; see
* <p>This class can also be used to coordinate multiple logging points; see
* {@link #record(String, long, double...)} for more details.
*
* <p/>This class is not thread-safe.
* <p>This class is not thread-safe.
*/
public class LogThrottlingHelper {
@ -175,7 +175,7 @@ public class LogThrottlingHelper {
* about the values specified since the last time the caller was expected to
* write to its log.
*
* <p/>Specifying multiple values will maintain separate summary statistics
* <p>Specifying multiple values will maintain separate summary statistics
* about each value. For example:
* <pre>{@code
* helper.record(1, 0);
@ -230,7 +230,7 @@ public class LogThrottlingHelper {
* iteration as "pre", yet each one is able to maintain its own summary
* information.
*
* <p/>Other behavior is the same as {@link #record(double...)}.
* <p>Other behavior is the same as {@link #record(double...)}.
*
* @param recorderName The name of the recorder. This is used to check if the
* current recorder is the primary. Other names are

View File

@ -251,7 +251,7 @@ public class MutableRollingAverages extends MutableMetric implements Closeable {
}
/**
* Retrieve a map of metric name -> (aggregate).
* Retrieve a map of metric name {@literal ->} (aggregate).
* Filter out entries that don't have at least minSamples.
*
* @return a map of peer DataNode Id to the average latency to that

View File

@ -87,6 +87,7 @@
<h2><a name="gettingstarted">Getting started</a></h2>
<h3>Implementing metrics sources</h3>
<table width="99%" border="1" cellspacing="0" cellpadding="4">
<caption></caption>
<tbody>
<tr>
<th>Using annotations</th><th>Using MetricsSource interface</th>
@ -289,6 +290,7 @@
backend that can handle multiple contexts (file, gangalia etc.):
</p>
<table width="99%" border="1" cellspacing="0" cellpadding="4">
<caption></caption>
<tbody>
<tr>
<th width="40%">Before</th><th>After</th>
@ -310,6 +312,7 @@
using the context option in the sink options like the following:
</p>
<table width="99%" border="1" cellspacing="0" cellpadding="4">
<caption></caption>
<tbody>
<tr>
<th width="40%">Before</th><th>After</th>

View File

@ -111,7 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation;
* <i>unknown</i>.</p>
*
* <p>Instead of appending to an existing file, by default the sink
* will create a new file with a suffix of &quot;.&lt;n&gt;&quet;, where
* will create a new file with a suffix of &quot;.&lt;n&gt;&quot;, where
* <i>n</i> is the next lowest integer that isn't already used in a file name,
* similar to the Hadoop daemon logs. NOTE: the file with the <b>highest</b>
* sequence number is the <b>newest</b> file, unlike the Hadoop daemon logs.</p>

View File

@ -47,10 +47,10 @@ import org.slf4j.LoggerFactory;
* a daemon that is running on the localhost and will add the
* hostname to the metric (such as the
* <a href="https://collectd.org/">CollectD</a> StatsD plugin).
* <br/>
* <br>
* To configure this plugin, you will need to add the following
* entries to your hadoop-metrics2.properties file:
* <br/>
* <br>
* <pre>
* *.sink.statsd.class=org.apache.hadoop.metrics2.sink.StatsDSink
* [prefix].sink.statsd.server.host=

View File

@ -59,8 +59,9 @@ public final class MBeans {
/**
* Register the MBean using our standard MBeanName format
* "hadoop:service=<serviceName>,name=<nameName>"
* Where the <serviceName> and <nameName> are the supplied parameters.
* "hadoop:service={@literal <serviceName>,name=<nameName>}"
* Where the {@literal <serviceName> and <nameName>} are the supplied
* parameters.
*
* @param serviceName
* @param nameName
@ -75,8 +76,9 @@ public final class MBeans {
/**
* Register the MBean using our standard MBeanName format
* "hadoop:service=<serviceName>,name=<nameName>"
* Where the <serviceName> and <nameName> are the supplied parameters.
* "hadoop:service={@literal <serviceName>,name=<nameName>}"
* Where the {@literal <serviceName> and <nameName>} are the supplied
* parameters.
*
* @param serviceName
* @param nameName

View File

@ -29,11 +29,11 @@ import java.util.Map;
import java.util.Set;
/**
* This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
* This is a base class for DNS to Switch mappings. <p> It is not mandatory to
* derive {@link DNSToSwitchMapping} implementations from it, but it is strongly
* recommended, as it makes it easy for the Hadoop developers to add new methods
* to this base class that are automatically picked up by all implementations.
* <p/>
* <p>
*
* This class does not extend the <code>Configured</code>
* base class, and should not be changed to do so, as it causes problems
@ -81,7 +81,7 @@ public abstract class AbstractDNSToSwitchMapping
* multi-rack. Subclasses may override this with methods that are more aware
* of their topologies.
*
* <p/>
* <p>
*
* This method is used when parts of Hadoop need know whether to apply
* single rack vs multi-rack policies, such as during block placement.
@ -140,7 +140,7 @@ public abstract class AbstractDNSToSwitchMapping
/**
* Query for a {@link DNSToSwitchMapping} instance being on a single
* switch.
* <p/>
* <p>
* This predicate simply assumes that all mappings not derived from
* this class are multi-switch.
* @param mapping the mapping to query

View File

@ -141,7 +141,7 @@ public class DNS {
}
/**
* Like {@link DNS#getIPs(String, boolean), but returns all
* Like {@link DNS#getIPs(String, boolean)}, but returns all
* IPs associated with the given interface and its subinterfaces.
*/
public static String[] getIPs(String strInterface)

View File

@ -40,7 +40,7 @@ public interface DNSToSwitchMapping {
* Note the hostname/ip-address is not part of the returned path.
* The network topology of the cluster would determine the number of
* components in the network path.
* <p/>
* <p>
*
* If a name cannot be resolved to a rack, the implementation
* should return {@link NetworkTopology#DEFAULT_RACK}. This

View File

@ -146,8 +146,8 @@ public class NetUtils {
/**
* Util method to build socket addr from either:
* <host>:<port>
* <fs>://<host>:<port>/<path>
* {@literal <host>:<port>}
* {@literal <fs>://<host>:<port>/<path>}
*/
public static InetSocketAddress createSocketAddr(String target) {
return createSocketAddr(target, -1);
@ -155,9 +155,9 @@ public class NetUtils {
/**
* Util method to build socket addr from either:
* <host>
* <host>:<port>
* <fs>://<host>:<port>/<path>
* {@literal <host>}
* {@literal <host>:<port>}
* {@literal <fs>://<host>:<port>/<path>}
*/
public static InetSocketAddress createSocketAddr(String target,
int defaultPort) {
@ -938,7 +938,7 @@ public class NetUtils {
* Return a free port number. There is no guarantee it will remain free, so
* it should be used immediately.
*
* @returns A free port for binding a local socket
* @return A free port for binding a local socket
*/
public static int getFreeSocketPort() {
int port = 0;
@ -959,7 +959,7 @@ public class NetUtils {
*
* @param localAddr
* @param bindWildCardAddress
* @returns InetAddress
* @return InetAddress
*/
public static InetAddress bindToLocalAddress(InetAddress localAddr, boolean
bindWildCardAddress) {

View File

@ -114,7 +114,7 @@ public class NetworkTopology {
}
/** Add a leaf node
* Update node counter & rack counter if necessary
* Update node counter &amp; rack counter if necessary
* @param node node to be added; can be null
* @exception IllegalArgumentException if add a node to a leave
or node to be added is not a leaf
@ -858,12 +858,12 @@ public class NetworkTopology {
/**
* Sort nodes array by network distance to <i>reader</i>.
* <p/>
* <p>
* In a three-level topology, a node can be either local, on the same rack,
* or on a different rack from the reader. Sorting the nodes based on network
* distance from the reader reduces network traffic and improves
* performance.
* <p/>
* <p>
* As an additional twist, we also randomize the nodes at each network
* distance. This helps with load balancing when there is data skew.
*
@ -881,11 +881,11 @@ public class NetworkTopology {
/**
* Sort nodes array by network distance to <i>reader</i>.
* <p/> using network location. This is used when the reader
* <p> using network location. This is used when the reader
* is not a datanode. Sorting the nodes based on network distance
* from the reader reduces network traffic and improves
* performance.
* <p/>
* <p>
*
* @param reader Node where data will be read
* @param nodes Available replicas with the requested data
@ -902,7 +902,7 @@ public class NetworkTopology {
/**
* Sort nodes array by network distance to <i>reader</i>.
* <p/>
* <p>
* As an additional twist, we also randomize the nodes at each network
* distance. This helps with load balancing when there is data skew.
*

View File

@ -168,7 +168,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
}
/** Add a leaf node
* Update node counter & rack counter if necessary
* Update node counter &amp; rack counter if necessary
* @param node node to be added; can be null
* @exception IllegalArgumentException if add a node to a leave
* or node to be added is not a leaf
@ -272,7 +272,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
/**
* Sort nodes array by their distances to <i>reader</i>.
* <p/>
* <p>
* This is the same as {@link NetworkTopology#sortByDistance(Node, Node[],
* int)} except with a four-level network topology which contains the
* additional network distance of a "node group" which is between local and

View File

@ -33,13 +33,13 @@ import org.slf4j.LoggerFactory;
* This class implements the {@link DNSToSwitchMapping} interface using a
* script configured via the
* {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} option.
* <p/>
* <p>
* It contains a static class <code>RawScriptBasedMapping</code> that performs
* the work: reading the configuration parameters, executing any defined
* script, handling errors and such like. The outer
* class extends {@link CachedDNSToSwitchMapping} to cache the delegated
* queries.
* <p/>
* <p>
* This DNS mapper's {@link #isSingleSwitch()} predicate returns
* true if and only if a script is defined.
*/
@ -78,7 +78,7 @@ public class ScriptBasedMapping extends CachedDNSToSwitchMapping {
/**
* Create an instance with the default configuration.
* </p>
* <p>
* Calling {@link #setConf(Configuration)} will trigger a
* re-evaluation of the configuration settings and so be used to
* set up the mapping script.
@ -125,7 +125,7 @@ public class ScriptBasedMapping extends CachedDNSToSwitchMapping {
/**
* {@inheritDoc}
* <p/>
* <p>
* This will get called in the superclass constructor, so a check is needed
* to ensure that the raw mapping is defined before trying to relaying a null
* configuration.

View File

@ -32,10 +32,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
* the {@link DNSToSwitchMappingWithDependency} interface using
* a script configured via the
* {@link CommonConfigurationKeys#NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY} option.
* <p/>
* <p>
* It contains a static class <code>RawScriptBasedMappingWithDependency</code>
* that performs the getDependency work.
* <p/>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@ -52,7 +51,7 @@ public class ScriptBasedMappingWithDependency extends ScriptBasedMapping
/**
* Create an instance with the default configuration.
* </p>
* <p>
* Calling {@link #setConf(Configuration)} will trigger a
* re-evaluation of the configuration settings and so be used to
* set up the mapping script.
@ -76,7 +75,7 @@ public class ScriptBasedMappingWithDependency extends ScriptBasedMapping
/**
* {@inheritDoc}
* <p/>
* <p>
* This will get called in the superclass constructor, so a check is needed
* to ensure that the raw mapping is defined before trying to relaying a null
* configuration.

View File

@ -32,7 +32,6 @@ import java.nio.channels.WritableByteChannel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
* This implements an output stream that can have a timeout while writing.
@ -187,7 +186,7 @@ public class SocketOutputStream extends OutputStream
* @param count number of bytes to transfer.
* @param waitForWritableTime nanoseconds spent waiting for the socket
* to become writable
* @param transferTime nanoseconds spent transferring data
* @param transferToTime nanoseconds spent transferring data
*
* @throws EOFException
* If end of input file is reached before requested number of
@ -253,7 +252,8 @@ public class SocketOutputStream extends OutputStream
/**
* Call
* {@link #transferToFully(FileChannel, long, int, MutableRate, MutableRate)}
* {@link #transferToFully(FileChannel, long, int, LongWritable, LongWritable)
* }
* with null <code>waitForWritableTime</code> and <code>transferToTime</code>
*/
public void transferToFully(FileChannel fileCh, long position, int count)

View File

@ -31,11 +31,11 @@ import java.util.Map;
/**
* Initializes hadoop-auth AuthenticationFilter which provides support for
* Kerberos HTTP SPNEGO authentication.
* <p/>
* <p>
* It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
* authentication for Hadoop JobTracker, NameNode, DataNodes and
* TaskTrackers.
* <p/>
* <p>
* Refer to the <code>core-default.xml</code> file, after the comment
* 'HTTP Authentication' for details on the configuration options.
* All related configuration properties have 'hadoop.http.authentication.'
@ -47,7 +47,7 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
/**
* Initializes hadoop-auth AuthenticationFilter.
* <p/>
* <p>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop
* configuration properties prefixed with "hadoop.http.authentication."
*

View File

@ -46,7 +46,7 @@ public class HadoopKerberosName extends KerberosName {
}
/**
* Set the static configuration to get the rules.
* <p/>
* <p>
* IMPORTANT: This method does a NOP if the rules have been set already.
* If there is a need to reset the rules, the {@link KerberosName#setRules(String)}
* method should be invoked directly.

Some files were not shown because too many files have changed in this diff Show More