HBASE-27530 Fix comment syntax errors (#4910)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
8b4e134f8c
commit
f9518cc1a1
|
@ -84,7 +84,7 @@ import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
|
||||||
* An asynchronous HDFS output stream implementation which fans out data to datanode and only
|
* An asynchronous HDFS output stream implementation which fans out data to datanode and only
|
||||||
* supports writing file with only one block.
|
* supports writing file with only one block.
|
||||||
* <p>
|
* <p>
|
||||||
* Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly
|
* Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The main
|
||||||
* usage of this class is implementing WAL, so we only expose a little HDFS configurations in the
|
* usage of this class is implementing WAL, so we only expose a little HDFS configurations in the
|
||||||
* method. And we place it here under io package because we want to make it independent of WAL
|
* method. And we place it here under io package because we want to make it independent of WAL
|
||||||
* implementation thus easier to move it to HDFS project finally.
|
* implementation thus easier to move it to HDFS project finally.
|
||||||
|
@ -104,8 +104,8 @@ import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
|
|
||||||
// The MAX_PACKET_SIZE is 16MB but it include the header size and checksum size. So here we set a
|
// The MAX_PACKET_SIZE is 16MB, but it includes the header size and checksum size. So here we set
|
||||||
// smaller limit for data size.
|
// a smaller limit for data size.
|
||||||
private static final int MAX_DATA_LEN = 12 * 1024 * 1024;
|
private static final int MAX_DATA_LEN = 12 * 1024 * 1024;
|
||||||
|
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
@ -173,7 +173,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
private long nextPacketOffsetInBlock = 0L;
|
private long nextPacketOffsetInBlock = 0L;
|
||||||
|
|
||||||
// the length of the trailing partial chunk, this is because the packet start offset must be
|
// the length of the trailing partial chunk, this is because the packet start offset must be
|
||||||
// aligned with the length of checksum chunk so we need to resend the same data.
|
// aligned with the length of checksum chunk, so we need to resend the same data.
|
||||||
private int trailingPartialChunkLength = 0;
|
private int trailingPartialChunkLength = 0;
|
||||||
|
|
||||||
private long nextPacketSeqno = 0L;
|
private long nextPacketSeqno = 0L;
|
||||||
|
@ -437,7 +437,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
checksumBuf.release();
|
checksumBuf.release();
|
||||||
headerBuf.release();
|
headerBuf.release();
|
||||||
|
|
||||||
// This method takes ownership of the dataBuf so we need release it before returning.
|
// This method takes ownership of the dataBuf, so we need release it before returning.
|
||||||
dataBuf.release();
|
dataBuf.release();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,14 +72,14 @@ public final class RecoverLeaseFSUtils {
|
||||||
* file's primary node. If all is well, it should return near immediately. But, as is common, it
|
* file's primary node. If all is well, it should return near immediately. But, as is common, it
|
||||||
* is the very primary node that has crashed and so the namenode will be stuck waiting on a socket
|
* is the very primary node that has crashed and so the namenode will be stuck waiting on a socket
|
||||||
* timeout before it will ask another datanode to start the recovery. It does not help if we call
|
* timeout before it will ask another datanode to start the recovery. It does not help if we call
|
||||||
* recoverLease in the meantime and in particular, subsequent to the socket timeout, a
|
* recoverLease in the meantime and in particular, after the socket timeout, a recoverLease
|
||||||
* recoverLease invocation will cause us to start over from square one (possibly waiting on socket
|
* invocation will cause us to start over from square one (possibly waiting on socket timeout
|
||||||
* timeout against primary node). So, in the below, we do the following: 1. Call recoverLease. 2.
|
* against primary node). So, in the below, we do the following: 1. Call recoverLease. 2. If it
|
||||||
* If it returns true, break. 3. If it returns false, wait a few seconds and then call it again.
|
* returns true, break. 3. If it returns false, wait a few seconds and then call it again. 4. If
|
||||||
* 4. If it returns true, break. 5. If it returns false, wait for what we think the datanode
|
* it returns true, break. 5. If it returns false, wait for what we think the datanode socket
|
||||||
* socket timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it
|
* timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it returns
|
||||||
* returns false, repeat starting at step 5. above. If HDFS-4525 is available, call it every
|
* false, repeat starting at step 5. above. If HDFS-4525 is available, call it every second, and
|
||||||
* second and we might be able to exit early.
|
* we might be able to exit early.
|
||||||
*/
|
*/
|
||||||
private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
|
private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
|
||||||
final Configuration conf, final CancelableProgressable reporter) throws IOException {
|
final Configuration conf, final CancelableProgressable reporter) throws IOException {
|
||||||
|
@ -89,10 +89,10 @@ public final class RecoverLeaseFSUtils {
|
||||||
// usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
|
// usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
|
||||||
// beyond that limit 'to be safe'.
|
// beyond that limit 'to be safe'.
|
||||||
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
|
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
|
||||||
// This setting should be a little bit above what the cluster dfs heartbeat is set to.
|
// This setting should be a little above what the cluster dfs heartbeat is set to.
|
||||||
long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
|
long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
|
||||||
// This should be set to how long it'll take for us to timeout against primary datanode if it
|
// This should be set to how long it'll take for us to timeout against primary datanode if it
|
||||||
// is dead. We set it to 64 seconds, 4 second than the default READ_TIMEOUT in HDFS, the
|
// is dead. We set it to 64 seconds, 4 seconds than the default READ_TIMEOUT in HDFS, the
|
||||||
// default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this
|
// default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this
|
||||||
// timeout, then further recovery will take liner backoff with this base, to avoid endless
|
// timeout, then further recovery will take liner backoff with this base, to avoid endless
|
||||||
// preemptions when this value is not properly configured.
|
// preemptions when this value is not properly configured.
|
||||||
|
@ -118,7 +118,7 @@ public final class RecoverLeaseFSUtils {
|
||||||
Thread.sleep(firstPause);
|
Thread.sleep(firstPause);
|
||||||
} else {
|
} else {
|
||||||
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
|
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
|
||||||
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
|
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though).
|
||||||
long localStartWaiting = EnvironmentEdgeManager.currentTime();
|
long localStartWaiting = EnvironmentEdgeManager.currentTime();
|
||||||
while (
|
while (
|
||||||
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
|
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
|
||||||
|
|
|
@ -50,13 +50,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The base class for load balancers. It provides the the functions used to by
|
* The base class for load balancers. It provides the functions used to by {@code AssignmentManager}
|
||||||
* {@code AssignmentManager} to assign regions in the edge cases. It doesn't provide an
|
* to assign regions in the edge cases. It doesn't provide an implementation of the actual balancing
|
||||||
* implementation of the actual balancing algorithm.
|
* algorithm.
|
||||||
* <p/>
|
* <p/>
|
||||||
* Since 3.0.0, all the balancers will be wrapped inside a {@code RSGroupBasedLoadBalancer}, it will
|
* Since 3.0.0, all the balancers will be wrapped inside a {@code RSGroupBasedLoadBalancer}, it will
|
||||||
* be in charge of the synchronization of balancing and configuration changing, so we do not need to
|
* be in charge of the synchronization of balancing and configuration changing, so we do not need to
|
||||||
* synchronized by ourselves.
|
* synchronize by ourselves.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public abstract class BaseLoadBalancer implements LoadBalancer {
|
public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
|
@ -297,7 +297,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
return Collections.singletonMap(servers.get(0), new ArrayList<>(regions.keySet()));
|
return Collections.singletonMap(servers.get(0), new ArrayList<>(regions.keySet()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group all of the old assignments by their hostname.
|
// Group all the old assignments by their hostname.
|
||||||
// We can't group directly by ServerName since the servers all have
|
// We can't group directly by ServerName since the servers all have
|
||||||
// new start-codes.
|
// new start-codes.
|
||||||
|
|
||||||
|
@ -484,7 +484,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Round robin a list of regions to a list of servers
|
* Round-robin a list of regions to a list of servers
|
||||||
*/
|
*/
|
||||||
private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions,
|
private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions,
|
||||||
List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
|
List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
|
||||||
|
|
Loading…
Reference in New Issue