diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java index de9bbdab7f7..a06ff0a5082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java @@ -49,7 +49,7 @@ import org.apache.hadoop.classification.InterfaceAudience; * not visible to the user except when getXAttr/getXAttrs is called on a file * or directory in the /.reserved/raw HDFS directory hierarchy. These * attributes can only be accessed by the superuser. - *
+ *
* @see
* http://en.wikipedia.org/wiki/Extended_file_attributes
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index b0686d7c4b4..1b6ee4a42c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -20,11 +20,17 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+
+import java.util.EnumSet;
/**
* AddBlockFlag provides hints for new block allocation and placement.
* Users can use this flag to control per DFSOutputStream
- * {@see ClientProtocol#addBlock()} behavior.
+ * @see ClientProtocol#addBlock(String, String, ExtendedBlock, DatanodeInfo[],
+ * long, String[], EnumSet)
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 38072b21ddd..5511657e1b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1054,8 +1054,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Call {@link #create(String, boolean, short, long, Progressable)} with
- * default
* A return value of -1 indicates that an estimate could not be produced, and
* should be ignored. The value returned by this method is really only useful
* when compared to previous or subsequent returned values.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 7fe1278a597..272d8de5c5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -294,12 +294,12 @@ public class DFSPacket {
}
/**
- * Add a trace parent span for this packet.
* Trace parent spans for a packet are the trace spans responsible for
* adding data to that packet. We store them as an array of longs for
- * efficiency.
* Protected by the DFSOutputStream dataQueue lock.
*/
public void addTraceParent(Span span) {
@@ -323,10 +323,10 @@ public class DFSPacket {
}
/**
- * Get the trace parent spans for this packet.
+ * Will always be non-null.
+ *
* Protected by the DFSOutputStream dataQueue lock.
*/
public SpanId[] getTraceParents() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 8f785c79155..0686ed5b1fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -266,8 +266,8 @@ public class DFSUtilClient {
}
/**
- * Convert a List
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the all open files.
- *
* This method can only be called by HDFS superusers.
*/
@Deprecated
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index a092f02630d..897ecc8c163 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -67,7 +67,7 @@ import org.apache.hadoop.security.UserGroupInformation;
* non-HA-enabled client proxy as appropriate.
*
* For creating proxy objects with other protocols, please see
- * {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
+ * NameNodeProxies#createProxy(Configuration, URI, Class).
*/
@InterfaceAudience.Private
public class NameNodeProxiesClient {
@@ -118,7 +118,6 @@ public class NameNodeProxiesClient {
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
- * @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
public static ProxyAndInfo
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the entire list of encryption zones.
- *
* This method can only be called by HDFS superusers.
*/
public RemoteIterator
* Inotify users may want to tune the following HDFS parameters to
* ensure that enough extra HDFS edits are saved to support inotify clients
* that fall behind the current state of the namespace while reading events.
@@ -438,7 +438,7 @@ public class HdfsAdmin {
* dfs.namenode.checkpoint.txns
* dfs.namenode.num.checkpoints.retained
* dfs.ha.log-roll.period
- *
* It is recommended that local journaling be configured
* (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
* so that edit transfers from the shared journal can be avoided.
@@ -615,10 +615,10 @@ public class HdfsAdmin {
* Returns a RemoteIterator which can be used to list all open files
* currently managed by the NameNode. For large numbers of open files,
* iterator will fetch the list in batches of configured size.
- *
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the all open files.
- *
* This method can only be called by HDFS superusers.
*/
@Deprecated
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 957c0a9bf68..e8ec41c5bfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -70,7 +70,7 @@ import org.slf4j.LoggerFactory;
* to renew the leases.
*
*
- *
*/
@InterfaceAudience.Private
public class LeaseRenewer {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index e2be9c6226e..7312f26d0a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -143,7 +143,7 @@ public interface ClientDatanodeProtocol {
/**
* Get the status of the previously issued reconfig task.
- * @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
+ * @see org.apache.hadoop.conf.ReconfigurationTaskStatus
*/
ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index e8c881b624c..a55a0f7d959 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -839,7 +839,7 @@ public interface ClientProtocol {
* percentage called threshold of blocks, which satisfy the minimal
* replication condition.
* The minimal replication condition is that each block must have at least
- * dfs.namenode.replication.min replicas.
+ * {@code dfs.namenode.replication.min} replicas.
* When the threshold is reached the name node extends safe mode
* for a configurable amount of time
* to let the remaining data nodes to check in before it
@@ -855,12 +855,13 @@ public interface ClientProtocol {
* Current state of the name node can be verified using
* {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
- * Configuration parameters: Special cases:
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
@@ -1545,12 +1545,12 @@ public interface ClientProtocol {
* If xAttrs is null or empty, this is the same as getting all xattrs of the
* file or directory. Only those xattrs for which the logged-in user has
* permissions to view are returned.
- *
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @param xAttrs xAttrs to get
- * @return List
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
- * @return List
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
index 837043882f3..540a4727e16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
@@ -45,7 +45,7 @@ public interface ReconfigurationProtocol {
/**
* Get the status of the previously issued reconfig task.
- * @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
+ * @see org.apache.hadoop.conf.ReconfigurationTaskStatus
*/
@Idempotent
ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index 5950752d1a1..0635fd0e75f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -182,13 +182,13 @@ public class BlockTokenIdentifier extends TokenIdentifier {
* because we know the first field is the Expiry date.
*
* In the case of the legacy buffer, the expiry date is a VInt, so the size
- * (which should always be >1) is encoded in the first byte - which is
+ * (which should always be >1) is encoded in the first byte - which is
* always negative due to this encoding. However, there are sometimes null
* BlockTokenIdentifier written so we also need to handle the case there
* the first byte is also 0.
*
* In the case of protobuf, the first byte is a type tag for the expiry date
- * which is written as replication
and blockSize
and null
- * progress
.
+ * default replication
and blockSize
and null
+ * progress
.
*/
public OutputStream create(String src, boolean overwrite)
throws IOException {
@@ -1065,7 +1065,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Call {@link #create(String, boolean, short, long, Progressable)} with
- * default replication
and blockSize
.
+ * default
replication
and blockSize
.
*/
public OutputStream create(String src,
boolean overwrite, Progressable progress) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
index bcdc1c8285d..a921a190e4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
@@ -139,7 +139,7 @@ public class DFSInotifyEventInputStream {
* are falling behind (i.e. transaction are being generated faster than the
* client is reading them). If a client falls too far behind events may be
* deleted before the client can read them.
- *
+ * hdfs
*/
@@ -1860,7 +1859,7 @@ public class DistributedFileSystem extends FileSystem
return setSafeMode(SafeModeAction.SAFEMODE_GET, true);
}
- /** @see HdfsAdmin#allowSnapshot(Path) */
+ /** @see org.apache.hadoop.hdfs.client.HdfsAdmin#allowSnapshot(Path) */
public void allowSnapshot(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ALLOW_SNAPSHOT);
@@ -1888,7 +1887,7 @@ public class DistributedFileSystem extends FileSystem
}.resolve(this, absF);
}
- /** @see HdfsAdmin#disallowSnapshot(Path) */
+ /** @see org.apache.hadoop.hdfs.client.HdfsAdmin#disallowSnapshot(Path) */
public void disallowSnapshot(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.DISALLOW_SNAPSHOT);
@@ -2207,7 +2206,7 @@ public class DistributedFileSystem extends FileSystem
}
/**
- * @see {@link #addCacheDirective(CacheDirectiveInfo, EnumSet)}
+ * @see #addCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public long addCacheDirective(CacheDirectiveInfo info) throws IOException {
return addCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
@@ -2234,7 +2233,7 @@ public class DistributedFileSystem extends FileSystem
}
/**
- * @see {@link #modifyCacheDirective(CacheDirectiveInfo, EnumSet)}
+ * @see #modifyCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public void modifyCacheDirective(CacheDirectiveInfo info) throws IOException {
modifyCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
@@ -3305,10 +3304,10 @@ public class DistributedFileSystem extends FileSystem
* Returns a RemoteIterator which can be used to list all open files
* currently managed by the NameNode. For large numbers of open files,
* iterator will fetch the list in batches of configured size.
- *
+ * Configuration parameters:
- * dfs.safemode.threshold.pct is the threshold parameter.
- * dfs.safemode.extension is the safe mode extension parameter.
- * dfs.namenode.replication.min is the minimal replication parameter.
*
- * Special cases:
+ *
+ * {@code dfs.safemode.extension} is the safe mode extension parameter.
+ * {@code dfs.namenode.replication.min} is the minimal replication parameter.
+ *
+ *
* If the threshold is set to 1 then all blocks need to have at least
@@ -1211,7 +1212,6 @@ public interface ClientProtocol {
* Get a valid Delegation Token.
*
* @param renewer the designated renewer for the token
- * @return TokenXAttr
list
+ * @return XAttr
list
* @throws IOException
*/
@Idempotent
@@ -1561,11 +1561,11 @@ public interface ClientProtocol {
* List the xattrs names for a file or directory.
* Only the xattr names for which the logged in user has the permissions to
* access will be returned.
- *
+ * XAttr
list
+ * @return XAttr
list
* @throws IOException
*/
@Idempotent
@@ -1576,7 +1576,7 @@ public interface ClientProtocol {
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
- *
+ * (field_number << 3 | wire_type
.
+ * which is written as field_number << 3 | wire_type
.
* So as long as the field_number is less than 16, but also positive, then
* we know we have a Protobuf.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index 22620031126..6b3d8e07ce0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -57,9 +57,8 @@ import org.slf4j.LoggerFactory;
* slots in the set of existing segments, falling back to getting a new segment
* from the DataNode via {@link DataTransferProtocol#requestShortCircuitFds}.
*
- * The counterpart to this class on the DataNode is
- * {@link ShortCircuitRegistry}. See {@link ShortCircuitRegistry} for more
- * information on the communication protocol.
+ * The counterpart to this class on the DataNode is ShortCircuitRegistry.
+ * See ShortCircuitRegistry for more information on the communication protocol.
*/
@InterfaceAudience.Private
public class DfsClientShmManager implements Closeable {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
index a9adb7e2f71..164b03bf203 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
@@ -54,10 +54,10 @@ public abstract class ByteArrayManager {
/**
* @return the least power of two greater than or equal to n, i.e. return
- * the least integer x with x >= n and x a power of two.
+ * the least integer x with x >= n and x a power of two.
*
* @throws HadoopIllegalArgumentException
- * if n <= 0.
+ * if n <= 0.
*/
public static int leastPowerOfTwo(final int n) {
if (n <= 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 9bad45dfb84..042592779b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -53,7 +53,8 @@ import java.util.concurrent.TimeUnit;
* illustrated in the following diagram. Unless otherwise specified, all
* range-related calculations are inclusive (the end offset of the previous
* range should be 1 byte lower than the start offset of the next one).
- *
+ */
+ /*
* | <---- Block Group ----> | <- Block Group: logical unit composing
* | | striped HDFS files.
* blk_0 blk_1 blk_2 <- Internal Blocks: each internal block
@@ -492,9 +493,12 @@ public class StripedBlockUtil {
return stripes.toArray(new AlignedStripe[stripes.size()]);
}
+ /**
+ * Cell indexing convention defined in {@link StripingCell}.
+ */
private static void calcualteChunkPositionsInBuf(int cellSize,
AlignedStripe[] stripes, StripingCell[] cells, ByteBuffer buf) {
- /**
+ /*
* | <--------------- AlignedStripe --------------->|
*
* |<- length_0 ->|<-- length_1 -->|<- length_2 ->|
@@ -508,8 +512,6 @@ public class StripedBlockUtil {
* | cell_0_0_0 | cell_1_0_1 and cell_2_0_2 |cell_3_1_0 ...| <- buf
* | (partial) | (from blk_1 and blk_2) | |
* +----------------------------------------------------------+
- *
- * Cell indexing convention defined in {@link StripingCell}
*/
int done = 0;
for (StripingCell cell : cells) {
@@ -562,7 +564,11 @@ public class StripedBlockUtil {
* its start and end offsets -- e.g., the end logical offset of cell_0_0_0
* should be 1 byte lower than the start logical offset of cell_1_0_1.
*
- * | <------- Striped Block Group -------> |
+ * A StripingCell is a special instance of {@link StripingChunk} whose offset
+ * and size align with the cell used when writing data.
+ * TODO: consider parity cells
+ */
+ /* | <------- Striped Block Group -------> |
* blk_0 blk_1 blk_2
* | | |
* v v v
@@ -572,9 +578,6 @@ public class StripedBlockUtil {
* |cell_3_1_0| |cell_4_1_1| |cell_5_1_2| <- {@link #idxInBlkGroup} = 5
* +----------+ +----------+ +----------+ {@link #idxInInternalBlk} = 1
* {@link #idxInStripe} = 2
- * A StripingCell is a special instance of {@link StripingChunk} whose offset
- * and size align with the cell used when writing data.
- * TODO: consider parity cells
*/
@VisibleForTesting
public static class StripingCell {
@@ -622,6 +625,18 @@ public class StripedBlockUtil {
* the diagram, any given byte range on a block group leads to 1~5
* AlignedStripe's.
*
+ * An AlignedStripe is the basic unit of reading from a striped block group,
+ * because within the AlignedStripe, all internal blocks can be processed in
+ * a uniform manner.
+ *
+ * The coverage of an AlignedStripe on an internal block is represented as a
+ * {@link StripingChunk}.
+ *
+ * To simplify the logic of reading a logical byte range from a block group,
+ * a StripingChunk is either completely in the requested byte range or
+ * completely outside the requested byte range.
+ */
+ /*
* |<-------- Striped Block Group -------->|
* blk_0 blk_1 blk_2 blk_3 blk_4
* +----+ | +----+ +----+
@@ -638,18 +653,7 @@ public class StripedBlockUtil {
* | | | | | | | <- AlignedStripe4:
* +----+ | +----+ +----+ last cell is partial
* |
- * <---- data blocks ----> | <--- parity --->
- *
- * An AlignedStripe is the basic unit of reading from a striped block group,
- * because within the AlignedStripe, all internal blocks can be processed in
- * a uniform manner.
- *
- * The coverage of an AlignedStripe on an internal block is represented as a
- * {@link StripingChunk}.
- *
- * To simplify the logic of reading a logical byte range from a block group,
- * a StripingChunk is either completely in the requested byte range or
- * completely outside the requested byte range.
+ * <---- data blocks ----> | <--- parity -->
*/
public static class AlignedStripe {
public VerticalRange range;
@@ -691,7 +695,8 @@ public class StripedBlockUtil {
* starting at {@link #offsetInBlock} and lasting for {@link #spanInBlock}
* bytes in an internal block. Note that VerticalRange doesn't necessarily
* align with {@link StripingCell}.
- *
+ */
+ /*
* |<- Striped Block Group ->|
* blk_0
* |
@@ -735,8 +740,8 @@ public class StripedBlockUtil {
/**
* Indicates the coverage of an {@link AlignedStripe} on an internal block,
* and the state of the chunk in the context of the read request.
- *
- * |<---------------- Striped Block Group --------------->|
+ */
+ /* |<---------------- Striped Block Group --------------->|
* blk_0 blk_1 blk_2 blk_3 blk_4
* +---------+ | +----+ +----+
* null null |REQUESTED| | |null| |null| <- AlignedStripe0
@@ -745,7 +750,7 @@ public class StripedBlockUtil {
* +---------+ +---------+ +---------+ | +----+ +----+
* |REQUESTED| |REQUESTED| ALLZERO | |null| |null| <- AlignedStripe2
* +---------+ +---------+ | +----+ +----+
- * <----------- data blocks ------------> | <--- parity --->
+ * <----------- data blocks ------------> | <--- parity -->
*/
public static class StripingChunk {
/** Chunk has been successfully fetched */
@@ -767,10 +772,12 @@ public class StripedBlockUtil {
/**
* If a chunk is completely in requested range, the state transition is:
- * REQUESTED (when AlignedStripe created) -> PENDING -> {FETCHED | MISSING}
+ * REQUESTED (when AlignedStripe created) -> PENDING ->
+ * {FETCHED | MISSING}
* If a chunk is completely outside requested range (including parity
* chunks), state transition is:
- * null (AlignedStripe created) -> REQUESTED (upon failure) -> PENDING ...
+ * null (AlignedStripe created) ->REQUESTED (upon failure) ->
+ * PENDING ...
*/
public int state = REQUESTED;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index ec60a186c7b..d504cfeb3ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -176,7 +176,6 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Return the protocol scheme for the FileSystem.
- * webhdfs
*/