HDDS-1166. Fix checkstyle line length issues.

Contributed by Nandakumar.
This commit is contained in:
Anu Engineer 2019-02-23 20:31:39 -08:00
parent 1b87668a3b
commit 014e17af78
8 changed files with 66 additions and 36 deletions

View File

@ -86,7 +86,8 @@ public class CloseContainerCommandHandler implements CommandHandler {
return; return;
} }
if (container.getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED) { if (container.getContainerState() ==
ContainerProtos.ContainerDataProto.State.CLOSED) {
// Closing a container is an idempotent operation. // Closing a container is an idempotent operation.
return; return;
} }

View File

@ -220,17 +220,20 @@ public class TestCloseContainerCommandHandler {
throws Exception { throws Exception {
final OzoneConfiguration conf = new OzoneConfiguration(); final OzoneConfiguration conf = new OzoneConfiguration();
final DatanodeDetails datanodeDetails = randomDatanodeDetails(); final DatanodeDetails datanodeDetails = randomDatanodeDetails();
final OzoneContainer ozoneContainer = getOzoneContainer(conf, datanodeDetails); final OzoneContainer ozoneContainer = getOzoneContainer(
conf, datanodeDetails);
ozoneContainer.start(); ozoneContainer.start();
try { try {
final Container container = createContainer(conf, datanodeDetails, ozoneContainer); final Container container = createContainer(
conf, datanodeDetails, ozoneContainer);
Mockito.verify(context.getParent(), Mockito.verify(context.getParent(),
Mockito.times(1)).triggerHeartbeat(); Mockito.times(1)).triggerHeartbeat();
final long containerId = container.getContainerData().getContainerID(); final long containerId = container.getContainerData().getContainerID();
final PipelineID pipelineId = PipelineID.valueOf(UUID.fromString( final PipelineID pipelineId = PipelineID.valueOf(UUID.fromString(
container.getContainerData().getOriginPipelineId())); container.getContainerData().getOriginPipelineId()));
final CloseContainerCommandHandler closeHandler = new CloseContainerCommandHandler(); final CloseContainerCommandHandler closeHandler =
new CloseContainerCommandHandler();
final CloseContainerCommand closeCommand = new CloseContainerCommand( final CloseContainerCommand closeCommand = new CloseContainerCommand(
containerId, pipelineId); containerId, pipelineId);
@ -240,12 +243,14 @@ public class TestCloseContainerCommandHandler {
ozoneContainer.getContainerSet().getContainer(containerId) ozoneContainer.getContainerSet().getContainer(containerId)
.getContainerState()); .getContainerState());
// The container is closed, now we send close command with pipeline id which doesn't exist. // The container is closed, now we send close command with
// This should cause the datanode to trigger quasi close, since the container is already // pipeline id which doesn't exist.
// closed, this should do nothing. The command should not fail either. // This should cause the datanode to trigger quasi close, since the
// container is already closed, this should do nothing.
// The command should not fail either.
final PipelineID randomPipeline = PipelineID.randomId(); final PipelineID randomPipeline = PipelineID.randomId();
final CloseContainerCommand quasiCloseCommand = new CloseContainerCommand( final CloseContainerCommand quasiCloseCommand =
containerId, randomPipeline); new CloseContainerCommand(containerId, randomPipeline);
closeHandler.handle(quasiCloseCommand, ozoneContainer, context, null); closeHandler.handle(quasiCloseCommand, ozoneContainer, context, null);
Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,

View File

@ -50,11 +50,16 @@ import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; .INVALID_BLOCK_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
/** Block Manager manages the block access for SCM. */ /** Block Manager manages the block access for SCM. */
public class BlockManagerImpl implements EventHandler<Boolean>, public class BlockManagerImpl implements EventHandler<Boolean>,
@ -83,8 +88,8 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
* @param scm * @param scm
* @throws IOException * @throws IOException
*/ */
public BlockManagerImpl(final Configuration conf, StorageContainerManager scm) public BlockManagerImpl(final Configuration conf,
throws IOException { final StorageContainerManager scm) {
Objects.requireNonNull(scm, "SCM cannot be null"); Objects.requireNonNull(scm, "SCM cannot be null");
this.pipelineManager = scm.getPipelineManager(); this.pipelineManager = scm.getPipelineManager();
this.containerManager = scm.getContainerManager(); this.containerManager = scm.getContainerManager();

View File

@ -31,10 +31,15 @@ import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus; .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.scm.command
.CommandStatusReportHandler.DeleteBlockStatus;
import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerManager;
@ -50,8 +55,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import static java.lang.Math.min; import static java.lang.Math.min;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
/** /**
* A implement class of {@link DeletedBlockLog}, and it uses * A implement class of {@link DeletedBlockLog}, and it uses
@ -328,12 +335,14 @@ public class DeletedBlockLogImpl
? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter = ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
scmMetadataStore.getDeletedBlocksTXTable().iterator()) { scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
while (iter.hasNext()) { while (iter.hasNext()) {
Table.KeyValue<Long, DeletedBlocksTransaction> keyValue = iter.next(); Table.KeyValue<Long, DeletedBlocksTransaction> keyValue =
iter.next();
DeletedBlocksTransaction block = keyValue.getValue(); DeletedBlocksTransaction block = keyValue.getValue();
if (block.getCount() > -1 && block.getCount() <= maxRetry) { if (block.getCount() > -1 && block.getCount() <= maxRetry) {
if (transactions.addTransaction(block, if (transactions.addTransaction(block,
transactionToDNsCommitMap.get(block.getTxID()))) { transactionToDNsCommitMap.get(block.getTxID()))) {
deleteTransactionMap.put(block.getContainerID(), block.getTxID()); deleteTransactionMap.put(block.getContainerID(),
block.getTxID());
transactionToDNsCommitMap transactionToDNsCommitMap
.putIfAbsent(block.getTxID(), new ConcurrentHashSet<>()); .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
} }

View File

@ -22,7 +22,8 @@ package org.apache.hadoop.hdds.scm.metadata;
import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.InvalidProtocolBufferException;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.utils.db.Codec; import org.apache.hadoop.utils.db.Codec;
/** /**

View File

@ -24,12 +24,14 @@ import java.security.cert.X509Certificate;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore; import org.apache.hadoop.hdds.security.x509.certificate.authority
.CertificateStore;
import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.utils.db.DBStore; import org.apache.hadoop.utils.db.DBStore;
import org.apache.hadoop.utils.db.DBStoreBuilder; import org.apache.hadoop.utils.db.DBStoreBuilder;
import org.apache.hadoop.utils.db.Table; import org.apache.hadoop.utils.db.Table;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.utils.db.TableIterator; import org.apache.hadoop.utils.db.TableIterator;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -75,7 +77,8 @@ public class SCMMetadataStoreRDBImpl implements SCMMetadataStore {
* @param config - Ozone Configuration. * @param config - Ozone Configuration.
* @throws IOException - on Failure. * @throws IOException - on Failure.
*/ */
public SCMMetadataStoreRDBImpl(OzoneConfiguration config) throws IOException { public SCMMetadataStoreRDBImpl(OzoneConfiguration config)
throws IOException {
this.configuration = config; this.configuration = config;
start(this.configuration); start(this.configuration);
this.txID = new AtomicLong(this.getLargestRecordedTXID()); this.txID = new AtomicLong(this.getLargestRecordedTXID());
@ -187,8 +190,8 @@ public class SCMMetadataStoreRDBImpl implements SCMMetadataStore {
private void checkTableStatus(Table table, String name) throws IOException { private void checkTableStatus(Table table, String name) throws IOException {
String logMessage = "Unable to get a reference to %s table. Cannot " + String logMessage = "Unable to get a reference to %s table. Cannot " +
"continue."; "continue.";
String errMsg = "Inconsistent DB state, Table - %s. Please check the logs" + String errMsg = "Inconsistent DB state, Table - %s. Please check the" +
"for more info."; " logs for more info.";
if (table == null) { if (table == null) {
LOG.error(String.format(logMessage, name)); LOG.error(String.format(logMessage, name));
throw new IOException(String.format(errMsg, name)); throw new IOException(String.format(errMsg, name));

View File

@ -27,7 +27,8 @@ import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; import org.apache.hadoop.hdds.security.x509.certificate.authority
.CertificateServer;
/** /**
* This class acts as an SCM builder Class. This class is important for us * This class acts as an SCM builder Class. This class is important for us

View File

@ -18,9 +18,12 @@ package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; .StorageContainerDatanodeProtocolProtos.PipelineAction;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineActionsProto; import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
@ -28,7 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; import org.apache.hadoop.hdds.scm.server
.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
import org.apache.hadoop.hdds.scm.server import org.apache.hadoop.hdds.scm.server
.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerID;
@ -54,7 +58,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client
.AuthenticationException;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;