HDDS-1103.Fix rat/findbug/checkstyle errors in ozone/hdds projects.
Contributed by Elek, Marton.
This commit is contained in:
parent
5cb67cf044
commit
75e15cc0c4
|
@ -35,7 +35,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
|
|
|
@ -219,7 +219,8 @@ public final class ScmConfigKeys {
|
|||
"ozone.scm.https-address";
|
||||
public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
|
||||
"hdds.scm.kerberos.keytab.file";
|
||||
public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
|
||||
public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
|
||||
"hdds.scm.kerberos.principal";
|
||||
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
|
||||
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
|
||||
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
|
||||
|
|
|
@ -65,8 +65,9 @@ public interface CertificateServer {
|
|||
* approved.
|
||||
* @throws SCMSecurityException - on Error.
|
||||
*/
|
||||
Future<X509CertificateHolder>
|
||||
requestCertificate(PKCS10CertificationRequest csr, CertificateApprover.ApprovalType type)
|
||||
Future<X509CertificateHolder> requestCertificate(
|
||||
PKCS10CertificationRequest csr,
|
||||
CertificateApprover.ApprovalType type)
|
||||
throws SCMSecurityException;
|
||||
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.slf4j.Logger;
|
|||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.security.InvalidKeyException;
|
||||
|
@ -524,10 +525,9 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
|||
return FAILURE;
|
||||
}
|
||||
default:
|
||||
getLogger().error("Unexpected case: {}, Private key:{} , " +
|
||||
"public key:{}, certificate:{}", init,
|
||||
((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
|
||||
((init.ordinal() & 1 << 0) == 1));
|
||||
getLogger().error("Unexpected case: {} (private/public/cert)",
|
||||
Integer.toBinaryString(init.ordinal()));
|
||||
|
||||
return FAILURE;
|
||||
}
|
||||
}
|
||||
|
@ -584,7 +584,8 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
|||
* */
|
||||
protected boolean validateKeyPair(PublicKey pubKey)
|
||||
throws CertificateException {
|
||||
byte[] challenge = RandomStringUtils.random(1000).getBytes();
|
||||
byte[] challenge = RandomStringUtils.random(1000).getBytes(
|
||||
StandardCharsets.UTF_8);
|
||||
byte[] sign = signDataStream(new ByteArrayInputStream(challenge));
|
||||
return verifySignature(challenge, sign, pubKey);
|
||||
}
|
||||
|
|
|
@ -90,10 +90,8 @@ public class OMCertificateClient extends DefaultCertificateClient {
|
|||
return FAILURE;
|
||||
}
|
||||
default:
|
||||
LOG.error("Unexpected case: {}, Private key:{} , " +
|
||||
"public key:{}, certificate:{}", init,
|
||||
((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
|
||||
((init.ordinal() & 1 << 0) == 1));
|
||||
LOG.error("Unexpected case: {} (private/public/cert)",
|
||||
Integer.toBinaryString(init.ordinal()));
|
||||
return FAILURE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,8 +73,8 @@ data node not only sends the CSR but signs the request with a shared secret
|
|||
with SCM. SCM then can issue a certificate without the intervention of a
|
||||
human administrator.
|
||||
|
||||
The last, TESTING method which never should be used other than in development and
|
||||
testing clusters, is merely a mechanism to bypass all identity checks. If
|
||||
The last, TESTING method which never should be used other than in development
|
||||
and testing clusters, is merely a mechanism to bypass all identity checks. If
|
||||
this flag is setup, then CA will issue a CSR if the base approves all fields.
|
||||
|
||||
* Please do not use this mechanism(TESTING) for any purpose other than
|
||||
|
|
|
@ -56,6 +56,7 @@ import static org.junit.Assert.assertTrue;
|
|||
* Test class for {@link DefaultCertificateClient}.
|
||||
*/
|
||||
@RunWith(Parameterized.class)
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
public class TestCertificateClientInit {
|
||||
|
||||
private CertificateClient dnCertificateClient;
|
||||
|
|
|
@ -18,4 +18,16 @@
|
|||
<Match>
|
||||
<Package name="org.apache.hadoop.hdds.protocol.proto"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||
<Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||
<Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -50,6 +50,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>snakeyaml</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>findbugs</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.dropwizard.metrics</groupId>
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.ozone.container.common.transport.server;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
|
@ -28,8 +29,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier;
|
|||
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
||||
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A server endpoint that acts as the communication layer for Ozone containers.
|
||||
|
@ -40,7 +41,7 @@ public abstract class XceiverServer implements XceiverServerSpi {
|
|||
private final TokenVerifier tokenVerifier;
|
||||
|
||||
public XceiverServer(Configuration conf) {
|
||||
Objects.nonNull(conf);
|
||||
Preconditions.checkNotNull(conf);
|
||||
this.secConfig = new SecurityConfig(conf);
|
||||
tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient());
|
||||
}
|
||||
|
|
|
@ -138,6 +138,7 @@ public class ContainerStateMachine extends BaseStateMachine {
|
|||
*/
|
||||
private final CSMMetrics metrics;
|
||||
|
||||
@SuppressWarnings("parameternumber")
|
||||
public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
|
||||
ThreadPoolExecutor chunkExecutor, XceiverServerRatis ratisServer,
|
||||
List<ExecutorService> executors, long expiryInterval,
|
||||
|
|
|
@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
|
|||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
||||
.newUpdater;
|
||||
|
||||
|
@ -116,7 +117,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
}
|
||||
|
||||
// Logger to log exceptions caught when running listeners.
|
||||
private static final Logger log = Logger
|
||||
private static final Logger LOG = Logger
|
||||
.getLogger(AbstractFuture.class.getName());
|
||||
|
||||
// A heuristic for timed gets. If the remaining timeout is less than this,
|
||||
|
@ -150,8 +151,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
// the field is definitely there.
|
||||
// For these users fallback to a suboptimal implementation, based on
|
||||
// synchronized. This will be a definite performance hit to those users.
|
||||
log.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
|
||||
log.log(
|
||||
LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
|
||||
LOG.log(
|
||||
Level.SEVERE, "SafeAtomicHelper is broken!",
|
||||
atomicReferenceFieldUpdaterFailure);
|
||||
helper = new SynchronizedHelper();
|
||||
|
@ -162,12 +163,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
// Prevent rare disastrous classloading in first call to LockSupport.park.
|
||||
// See: https://bugs.openjdk.java.net/browse/JDK-8074773
|
||||
@SuppressWarnings("unused")
|
||||
@SuppressFBWarnings
|
||||
Class<?> ensureLoaded = LockSupport.class;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waiter links form a Treiber stack, in the {@link #waiters} field.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class Waiter {
|
||||
static final Waiter TOMBSTONE = new Waiter(false /* ignored param */);
|
||||
|
||||
|
@ -252,6 +255,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
/**
|
||||
* Listeners also form a stack through the {@link #listeners} field.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class Listener {
|
||||
static final Listener TOMBSTONE = new Listener(null, null);
|
||||
final Runnable task;
|
||||
|
@ -276,16 +280,17 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
* A special value to represent failure, when {@link #setException} is
|
||||
* called successfully.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class Failure {
|
||||
static final Failure FALLBACK_INSTANCE =
|
||||
new Failure(
|
||||
new Throwable("Failure occurred while trying to finish a future" +
|
||||
".") {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this; // no stack trace
|
||||
}
|
||||
});
|
||||
new Throwable("Failure occurred while trying to finish a future" +
|
||||
".") {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this; // no stack trace
|
||||
}
|
||||
});
|
||||
final Throwable exception;
|
||||
|
||||
Failure(Throwable exception) {
|
||||
|
@ -296,6 +301,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
/**
|
||||
* A special value to represent cancellation and the 'wasInterrupted' bit.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class Cancellation {
|
||||
final boolean wasInterrupted;
|
||||
@Nullable final Throwable cause;
|
||||
|
@ -309,6 +315,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
/**
|
||||
* A special value that encodes the 'setFuture' state.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class SetFuture<V> implements Runnable {
|
||||
final AbstractFuture<V> owner;
|
||||
final ListenableFuture<? extends V> future;
|
||||
|
@ -711,8 +718,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
* @param value the value to be used as the result
|
||||
* @return true if the attempt was accepted, completing the {@code Future}
|
||||
*/
|
||||
protected boolean set(@Nullable V value) {
|
||||
Object valueToSet = value == null ? NULL : value;
|
||||
protected boolean set(@Nullable V val) {
|
||||
Object valueToSet = value == null ? NULL : val;
|
||||
if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
|
||||
complete(this);
|
||||
return true;
|
||||
|
@ -769,13 +776,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
* @since 19.0
|
||||
*/
|
||||
@Beta
|
||||
@SuppressWarnings("deadstore")
|
||||
protected boolean setFuture(ListenableFuture<? extends V> future) {
|
||||
checkNotNull(future);
|
||||
Object localValue = value;
|
||||
if (localValue == null) {
|
||||
if (future.isDone()) {
|
||||
Object value = getFutureValue(future);
|
||||
if (ATOMIC_HELPER.casValue(this, null, value)) {
|
||||
Object val = getFutureValue(future);
|
||||
if (ATOMIC_HELPER.casValue(this, null, val)) {
|
||||
complete(this);
|
||||
return true;
|
||||
}
|
||||
|
@ -950,10 +958,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
do {
|
||||
head = waiters;
|
||||
} while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE));
|
||||
for (
|
||||
Waiter currentWaiter = head;
|
||||
currentWaiter != null;
|
||||
currentWaiter = currentWaiter.next) {
|
||||
for (Waiter currentWaiter = head;
|
||||
currentWaiter != null; currentWaiter = currentWaiter.next) {
|
||||
currentWaiter.unpark();
|
||||
}
|
||||
}
|
||||
|
@ -995,7 +1001,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
// Log it and keep going -- bad runnable and/or executor. Don't punish
|
||||
// the other runnables if we're given a bad one. We only catch
|
||||
// RuntimeException because we want Errors to propagate up.
|
||||
log.log(
|
||||
LOG.log(
|
||||
Level.SEVERE,
|
||||
"RuntimeException while executing runnable " + runnable + " with " +
|
||||
"executor " + executor,
|
||||
|
@ -1147,6 +1153,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
|||
/**
|
||||
* {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}.
|
||||
*/
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
private static final class SafeAtomicHelper extends AtomicHelper {
|
||||
final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
|
||||
final AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater;
|
||||
|
|
|
@ -66,6 +66,7 @@ import java.util.UUID;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
@SuppressWarnings("finalclass")
|
||||
public class HddsVolume
|
||||
implements Checkable<Boolean, VolumeCheckResult> {
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
|
|||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -141,7 +143,7 @@ public class HddsVolumeChecker {
|
|||
|
||||
lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
|
||||
|
||||
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
|
||||
if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
|
||||
throw new DiskErrorException("Invalid value configured for "
|
||||
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
|
||||
+ maxVolumeFailuresTolerated + " "
|
||||
|
@ -310,21 +312,21 @@ public class HddsVolumeChecker {
|
|||
|
||||
@Override
|
||||
public void onSuccess(@Nonnull VolumeCheckResult result) {
|
||||
switch(result) {
|
||||
case HEALTHY:
|
||||
case DEGRADED:
|
||||
LOG.debug("Volume {} is {}.", volume, result);
|
||||
markHealthy();
|
||||
break;
|
||||
case FAILED:
|
||||
LOG.warn("Volume {} detected as being unhealthy", volume);
|
||||
markFailed();
|
||||
break;
|
||||
default:
|
||||
LOG.error("Unexpected health check result {} for volume {}",
|
||||
result, volume);
|
||||
markHealthy();
|
||||
break;
|
||||
switch (result) {
|
||||
case HEALTHY:
|
||||
case DEGRADED:
|
||||
LOG.debug("Volume {} is {}.", volume, result);
|
||||
markHealthy();
|
||||
break;
|
||||
case FAILED:
|
||||
LOG.warn("Volume {} detected as being unhealthy", volume);
|
||||
markFailed();
|
||||
break;
|
||||
default:
|
||||
LOG.error("Unexpected health check result {} for volume {}",
|
||||
result, volume);
|
||||
markHealthy();
|
||||
break;
|
||||
}
|
||||
cleanup();
|
||||
}
|
||||
|
@ -378,7 +380,8 @@ public class HddsVolumeChecker {
|
|||
try {
|
||||
delegateChecker.shutdownAndWait(gracePeriod, timeUnit);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("{} interrupted during shutdown.", this.getClass().getSimpleName());
|
||||
LOG.warn("{} interrupted during shutdown.",
|
||||
this.getClass().getSimpleName());
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,7 +87,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
|||
* the results of the operation.
|
||||
* Protected by the object lock.
|
||||
*/
|
||||
private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>> completedChecks;
|
||||
private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>>
|
||||
completedChecks;
|
||||
|
||||
public ThrottledAsyncChecker(final Timer timer,
|
||||
final long minMsBetweenChecks,
|
||||
|
@ -125,7 +126,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
|||
}
|
||||
|
||||
if (completedChecks.containsKey(target)) {
|
||||
final ThrottledAsyncChecker.LastCheckResult<V> result = completedChecks.get(target);
|
||||
final ThrottledAsyncChecker.LastCheckResult<V> result =
|
||||
completedChecks.get(target);
|
||||
final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
|
||||
if (msSinceLastCheck < minMsBetweenChecks) {
|
||||
LOG.debug("Skipped checking {}. Time since last check {}ms " +
|
||||
|
|
|
@ -94,7 +94,7 @@ final class TimeoutFuture<V> extends AbstractFuture.TrustedFuture<V> {
|
|||
*/
|
||||
private static final class Fire<V> implements Runnable {
|
||||
@Nullable
|
||||
TimeoutFuture<V> timeoutFutureRef;
|
||||
private TimeoutFuture<V> timeoutFutureRef;
|
||||
|
||||
Fire(
|
||||
TimeoutFuture<V> timeoutFuture) {
|
||||
|
|
|
@ -18,33 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.ozone.container.common.volume;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
|
||||
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
|
||||
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
||||
import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
|
||||
import org.apache.hadoop.util.DiskChecker;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -53,14 +26,35 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
|
||||
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
|
||||
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
||||
import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
|
||||
import org.apache.hadoop.util.DiskChecker;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* VolumeSet to manage HDDS volumes in a DataNode.
|
||||
*/
|
||||
|
@ -91,8 +85,8 @@ public class VolumeSet {
|
|||
/**
|
||||
* An executor for periodic disk checks.
|
||||
*/
|
||||
final ScheduledExecutorService diskCheckerservice;
|
||||
final ScheduledFuture<?> periodicDiskChecker;
|
||||
private final ScheduledExecutorService diskCheckerservice;
|
||||
private final ScheduledFuture<?> periodicDiskChecker;
|
||||
|
||||
private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
|
||||
|
||||
|
@ -125,21 +119,21 @@ public class VolumeSet {
|
|||
this.diskCheckerservice = Executors.newScheduledThreadPool(
|
||||
1, r -> new Thread(r, "Periodic HDDS volume checker"));
|
||||
this.periodicDiskChecker =
|
||||
diskCheckerservice.scheduleWithFixedDelay(() -> {
|
||||
try {
|
||||
checkAllVolumes();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while checking disks", e);
|
||||
}
|
||||
}, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
|
||||
TimeUnit.MINUTES);
|
||||
diskCheckerservice.scheduleWithFixedDelay(() -> {
|
||||
try {
|
||||
checkAllVolumes();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while checking disks", e);
|
||||
}
|
||||
}, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
|
||||
TimeUnit.MINUTES);
|
||||
initializeVolumeSet();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
||||
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||
throws DiskChecker.DiskErrorException {
|
||||
return new HddsVolumeChecker(conf, new Timer());
|
||||
return new HddsVolumeChecker(configuration, new Timer());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,17 +18,28 @@
|
|||
|
||||
package org.apache.hadoop.ozone.container.common.volume;
|
||||
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import org.junit.After;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
@ -36,21 +47,6 @@ import org.junit.rules.Timeout;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
|
||||
/**
|
||||
* Verify that {@link VolumeSet} correctly checks for failed disks
|
||||
|
@ -66,7 +62,7 @@ public class TestVolumeSetDiskChecks {
|
|||
@Rule
|
||||
public ExpectedException thrown = ExpectedException.none();
|
||||
|
||||
Configuration conf = null;
|
||||
private Configuration conf = null;
|
||||
|
||||
/**
|
||||
* Cleanup volume directories.
|
||||
|
@ -117,14 +113,15 @@ public class TestVolumeSetDiskChecks {
|
|||
final VolumeSet volumeSet = new VolumeSet(
|
||||
UUID.randomUUID().toString(), conf) {
|
||||
@Override
|
||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
||||
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||
throws DiskErrorException {
|
||||
return new DummyChecker(conf, new Timer(), numBadVolumes);
|
||||
return new DummyChecker(configuration, new Timer(), numBadVolumes);
|
||||
}
|
||||
};
|
||||
|
||||
assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
|
||||
assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes));
|
||||
assertThat(volumeSet.getVolumesList().size(),
|
||||
is(numVolumes - numBadVolumes));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -139,9 +136,9 @@ public class TestVolumeSetDiskChecks {
|
|||
final VolumeSet volumeSet = new VolumeSet(
|
||||
UUID.randomUUID().toString(), conf) {
|
||||
@Override
|
||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
||||
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||
throws DiskErrorException {
|
||||
return new DummyChecker(conf, new Timer(), numVolumes);
|
||||
return new DummyChecker(configuration, new Timer(), numVolumes);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -153,13 +150,13 @@ public class TestVolumeSetDiskChecks {
|
|||
* @param numDirs
|
||||
*/
|
||||
private Configuration getConfWithDataNodeDirs(int numDirs) {
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
final Configuration ozoneConf = new OzoneConfiguration();
|
||||
final List<String> dirs = new ArrayList<>();
|
||||
for (int i = 0; i < numDirs; ++i) {
|
||||
dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
|
||||
}
|
||||
conf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
|
||||
return conf;
|
||||
ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
|
||||
return ozoneConf;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -169,7 +166,7 @@ public class TestVolumeSetDiskChecks {
|
|||
static class DummyChecker extends HddsVolumeChecker {
|
||||
private final int numBadVolumes;
|
||||
|
||||
public DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
|
||||
DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
|
||||
throws DiskErrorException {
|
||||
super(conf, timer);
|
||||
this.numBadVolumes = numBadVolumes;
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
|
|||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
|
||||
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
|
||||
|
@ -167,10 +168,10 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
|
|||
* @param cmdType type of the container command.
|
||||
* @return
|
||||
*/
|
||||
private ContainerProtos.ContainerCommandRequestProto getDummyCommandRequestProto(
|
||||
private ContainerCommandRequestProto getDummyCommandRequestProto(
|
||||
ContainerProtos.Type cmdType) {
|
||||
final ContainerProtos.ContainerCommandRequestProto.Builder builder =
|
||||
ContainerProtos.ContainerCommandRequestProto.newBuilder()
|
||||
final ContainerCommandRequestProto.Builder builder =
|
||||
ContainerCommandRequestProto.newBuilder()
|
||||
.setCmdType(cmdType)
|
||||
.setContainerID(DUMMY_CONTAINER_ID)
|
||||
.setDatanodeUuid(DATANODE_UUID);
|
||||
|
@ -190,36 +191,39 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
|
|||
.build())
|
||||
.build();
|
||||
|
||||
switch(cmdType) {
|
||||
case ReadContainer:
|
||||
builder.setReadContainer(ContainerProtos.ReadContainerRequestProto.newBuilder().build());
|
||||
break;
|
||||
case GetBlock:
|
||||
builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).build());
|
||||
break;
|
||||
case GetCommittedBlockLength:
|
||||
builder.setGetCommittedBlockLength(
|
||||
ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).build());
|
||||
case ReadChunk:
|
||||
builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||
break;
|
||||
case DeleteChunk:
|
||||
builder.setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||
break;
|
||||
case GetSmallFile:
|
||||
builder.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
|
||||
.setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId)
|
||||
.build())
|
||||
.build());
|
||||
break;
|
||||
switch (cmdType) {
|
||||
case ReadContainer:
|
||||
builder.setReadContainer(
|
||||
ContainerProtos.ReadContainerRequestProto.newBuilder().build());
|
||||
break;
|
||||
case GetBlock:
|
||||
builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).build());
|
||||
break;
|
||||
case GetCommittedBlockLength:
|
||||
builder.setGetCommittedBlockLength(
|
||||
ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).build());
|
||||
case ReadChunk:
|
||||
builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||
break;
|
||||
case DeleteChunk:
|
||||
builder
|
||||
.setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||
break;
|
||||
case GetSmallFile:
|
||||
builder
|
||||
.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
|
||||
.setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||
.setBlockID(fakeBlockId)
|
||||
.build())
|
||||
.build());
|
||||
break;
|
||||
|
||||
default:
|
||||
Assert.fail("Unhandled request type " + cmdType + " in unit test");
|
||||
default:
|
||||
Assert.fail("Unhandled request type " + cmdType + " in unit test");
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
|
|
|
@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<version>3.0.4</version>
|
||||
<configuration>
|
||||
<excludeFilterFile combine.self="override"></excludeFilterFile>
|
||||
</configuration>
|
||||
|
|
|
@ -261,15 +261,13 @@ public final class RatisPipelineUtils {
|
|||
|
||||
for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
|
||||
.values()) {
|
||||
while (true) {
|
||||
try {
|
||||
pipelineManager.createPipeline(type, factor);
|
||||
} catch (IOException ioe) {
|
||||
break;
|
||||
} catch (Throwable t) {
|
||||
LOG.error("Error while creating pipelines {}", t);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
pipelineManager.createPipeline(type, factor);
|
||||
} catch (IOException ioe) {
|
||||
break;
|
||||
} catch (Throwable t) {
|
||||
LOG.error("Error while creating pipelines {}", t);
|
||||
break;
|
||||
}
|
||||
}
|
||||
isPipelineCreatorRunning.set(false);
|
||||
|
|
|
@ -138,6 +138,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>javax.servlet-api</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>findbugs</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
|
|
@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
|
|||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
||||
.newUpdater;
|
||||
|
||||
|
|
|
@ -17,30 +17,26 @@
|
|||
*/
|
||||
node("ubuntu") {
|
||||
docker.image('elek/ozone-build').pull()
|
||||
docker.image('elek/ozone-build').inside {
|
||||
docker.image('elek/ozone-build').inside("--privileged") {
|
||||
|
||||
stage('Checkout') {
|
||||
checkout scm
|
||||
//use this for external Jenkinsfile builds
|
||||
//checkout poll: false, scm: [$class: 'GitSCM', branches: [[name: env.branch]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: 'github-token', url: "https://github.com/${organization}/${repository}.git"]]]
|
||||
|
||||
}
|
||||
|
||||
stage('Clean') {
|
||||
status = sh returnStatus: true, script: 'mvn clean'
|
||||
status = sh returnStatus: true, script: 'mvn clean -P hdds -am -pl :hadoop-ozone-dist '
|
||||
}
|
||||
|
||||
stageRunner('Author', "author", {})
|
||||
|
||||
stageRunner('Isolation', "isolation", {})
|
||||
|
||||
|
||||
stageRunner('Build', "build", {})
|
||||
|
||||
stageRunner('Licence', "rat", {
|
||||
archiveArtifacts 'target/rat-aggregated.txt'
|
||||
}, 'artifact/target/rat-aggregated.txt/*view*/')
|
||||
|
||||
stageRunner('Unit test', "unit", {
|
||||
junit '**/target/surefire-reports/*.xml'
|
||||
}, 'testReport/')
|
||||
stageRunner('Build', "build", {})
|
||||
|
||||
stageRunner('Findbugs', "findbugs", {
|
||||
archiveArtifacts 'target/findbugs-all.txt'
|
||||
|
@ -48,9 +44,17 @@ node("ubuntu") {
|
|||
}, 'artifact/target/findbugs-all.txt/*view*/')
|
||||
|
||||
stageRunner('Checkstyle', "checkstyle", {
|
||||
checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-result.xml', unHealthy: ''
|
||||
checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-errors.xml', unHealthy: ''
|
||||
}, 'checkstyleResult')
|
||||
|
||||
stageRunner('Acceptance', "acceptance", {
|
||||
archiveArtifacts 'hadoop-ozone/dist/target/ozone-0.4.0-SNAPSHOT/smoketest/result/**'
|
||||
})
|
||||
|
||||
stageRunner('Unit test', "unit", {
|
||||
junit '**/target/surefire-reports/*.xml'
|
||||
}, 'testReport/')
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -70,35 +74,42 @@ def stageRunner(name, type, processResult, url = '') {
|
|||
}
|
||||
}
|
||||
|
||||
def githubStatus(name, status, description, url='') {
|
||||
commitId = sh(returnStdout: true, script: 'git rev-parse HEAD')
|
||||
context = 'ci/ozone/' + name
|
||||
if (url) {
|
||||
githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status, targetUrl: url
|
||||
} else {
|
||||
githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status
|
||||
}
|
||||
}
|
||||
def prStatusStart(name) {
|
||||
if (env.CHANGE_ID) {
|
||||
pullRequest.createStatus(status: "pending",
|
||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
||||
description: name + " is started")
|
||||
}
|
||||
githubStatus(name,
|
||||
"PENDING",
|
||||
name + " is started")
|
||||
|
||||
|
||||
}
|
||||
|
||||
def prStatusResult(responseCode, name, url = '') {
|
||||
status = "error"
|
||||
status = "ERROR"
|
||||
desc = "failed"
|
||||
if (responseCode == 0) {
|
||||
status = "success"
|
||||
status = "SUCCESS"
|
||||
desc = "passed"
|
||||
}
|
||||
message = name + " is " + desc
|
||||
//System.out.println(responseCode)
|
||||
if (env.CHANGE_ID) {
|
||||
message = name + " check is " + desc
|
||||
if (url) {
|
||||
pullRequest.createStatus(status: status,
|
||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
||||
description: message,
|
||||
targetUrl: env.BUILD_URL + url)
|
||||
githubStatus(name,
|
||||
status,
|
||||
message,
|
||||
env.BUILD_URL + url)
|
||||
} else {
|
||||
pullRequest.createStatus(status: status,
|
||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
||||
description: message)
|
||||
githubStatus(name,
|
||||
status,
|
||||
message)
|
||||
}
|
||||
}
|
||||
|
||||
if (responseCode != 0) {
|
||||
throw new RuntimeException(message)
|
||||
}
|
||||
|
|
|
@ -17,9 +17,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.client.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdds.client.BlockID;
|
||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
|
||||
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
||||
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
||||
import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
|
||||
|
@ -27,15 +31,10 @@ import org.apache.hadoop.ozone.common.Checksum;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Helper class used inside {@link BlockOutputStream}.
|
||||
* */
|
||||
public class BlockOutputStreamEntry extends OutputStream {
|
||||
public final class BlockOutputStreamEntry extends OutputStream {
|
||||
|
||||
private OutputStream outputStream;
|
||||
private BlockID blockID;
|
||||
|
@ -56,6 +55,7 @@ public class BlockOutputStreamEntry extends OutputStream {
|
|||
private final long watchTimeout;
|
||||
private List<ByteBuffer> bufferList;
|
||||
|
||||
@SuppressWarnings("parameternumber")
|
||||
private BlockOutputStreamEntry(BlockID blockID, String key,
|
||||
XceiverClientManager xceiverClientManager,
|
||||
Pipeline pipeline, String requestId, int chunkSize,
|
||||
|
@ -137,56 +137,48 @@ public class BlockOutputStreamEntry extends OutputStream {
|
|||
this.outputStream.close();
|
||||
// after closing the chunkOutPutStream, blockId would have been
|
||||
// reconstructed with updated bcsId
|
||||
if (this.outputStream instanceof BlockOutputStream) {
|
||||
this.blockID = ((BlockOutputStream) outputStream).getBlockID();
|
||||
}
|
||||
this.blockID = ((BlockOutputStream) outputStream).getBlockID();
|
||||
}
|
||||
}
|
||||
|
||||
long getTotalSuccessfulFlushedData() throws IOException {
|
||||
if (this.outputStream instanceof BlockOutputStream) {
|
||||
if (outputStream != null) {
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
blockID = out.getBlockID();
|
||||
return out.getTotalSuccessfulFlushedData();
|
||||
} else if (outputStream == null) {
|
||||
// For a pre allocated block for which no write has been initiated,
|
||||
// the OutputStream will be null here.
|
||||
// In such cases, the default blockCommitSequenceId will be 0
|
||||
return 0;
|
||||
}
|
||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
||||
}
|
||||
|
||||
long getWrittenDataLength() throws IOException {
|
||||
if (this.outputStream instanceof BlockOutputStream) {
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
return out.getWrittenDataLength();
|
||||
} else if (outputStream == null) {
|
||||
} else {
|
||||
// For a pre allocated block for which no write has been initiated,
|
||||
// the OutputStream will be null here.
|
||||
// In such cases, the default blockCommitSequenceId will be 0
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
long getWrittenDataLength() throws IOException {
|
||||
if (outputStream != null) {
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
return out.getWrittenDataLength();
|
||||
} else {
|
||||
// For a pre allocated block for which no write has been initiated,
|
||||
// the OutputStream will be null here.
|
||||
// In such cases, the default blockCommitSequenceId will be 0
|
||||
return 0;
|
||||
}
|
||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
||||
}
|
||||
|
||||
void cleanup(boolean invalidateClient) throws IOException {
|
||||
checkStream();
|
||||
if (this.outputStream instanceof BlockOutputStream) {
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
out.cleanup(invalidateClient);
|
||||
}
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
out.cleanup(invalidateClient);
|
||||
|
||||
}
|
||||
|
||||
void writeOnRetry(long len) throws IOException {
|
||||
checkStream();
|
||||
if (this.outputStream instanceof BlockOutputStream) {
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
out.writeOnRetry(len);
|
||||
this.currentPosition += len;
|
||||
} else {
|
||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
||||
}
|
||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||
out.writeOnRetry(len);
|
||||
this.currentPosition += len;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -229,8 +221,8 @@ public class BlockOutputStreamEntry extends OutputStream {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setPipeline(Pipeline pipeline) {
|
||||
this.pipeline = pipeline;
|
||||
public Builder setPipeline(Pipeline ppln) {
|
||||
this.pipeline = ppln;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -264,8 +256,8 @@ public class BlockOutputStreamEntry extends OutputStream {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setBufferList(List<ByteBuffer> bufferList) {
|
||||
this.bufferList = bufferList;
|
||||
public Builder setBufferList(List<ByteBuffer> bffrLst) {
|
||||
this.bufferList = bffrLst;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
|
||||
package org.apache.hadoop.ozone.client.io;
|
||||
|
||||
import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.om.helpers;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
|
|
@ -221,7 +221,6 @@ public final class OmKeyInfo extends WithMetadata {
|
|||
private long modificationTime;
|
||||
private HddsProtos.ReplicationType type;
|
||||
private HddsProtos.ReplicationFactor factor;
|
||||
private boolean isMultipartKey;
|
||||
private Map<String, String> metadata;
|
||||
|
||||
public Builder() {
|
||||
|
@ -275,11 +274,6 @@ public final class OmKeyInfo extends WithMetadata {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setIsMultipartKey(boolean isMultipart) {
|
||||
this.isMultipartKey = isMultipart;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addMetadata(String key, String value) {
|
||||
metadata.put(key, value);
|
||||
return this;
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Map;
|
|||
*/
|
||||
public class WithMetadata {
|
||||
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
protected Map<String, String> metadata = new HashMap<>();
|
||||
|
||||
/**
|
||||
|
|
|
@ -60,6 +60,7 @@ public abstract class OzoneSecretManager<T extends TokenIdentifier>
|
|||
private OzoneSecretKey currentKey;
|
||||
private AtomicInteger currentKeyId;
|
||||
private AtomicInteger tokenSequenceNumber;
|
||||
@SuppressWarnings("visibilitymodifier")
|
||||
protected final Map<Integer, OzoneSecretKey> allKeys;
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,9 +22,8 @@ touch "$FINDBUGS_ALL_FILE"
|
|||
|
||||
mvn -fn findbugs:check -Dfindbugs.failOnError=false -am -pl :hadoop-ozone-dist -Phdds
|
||||
|
||||
find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
|
||||
find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
|
||||
|
||||
find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
|
||||
find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
|
||||
|
||||
bugs=$(cat "$FINDBUGS_ALL_FILE" | wc -l)
|
||||
|
||||
|
@ -32,4 +31,4 @@ if [[ ${bugs} -gt 0 ]]; then
|
|||
exit -1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -35,7 +35,7 @@ wait_for_datanodes(){
|
|||
|
||||
#This line checks the number of HEALTHY datanodes registered in scm over the
|
||||
# jmx HTTP servlet
|
||||
datanodes=$(docker-compose -f "$1" exec scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
|
||||
datanodes=$(docker-compose -f "$1" exec -T scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
|
||||
if [[ "$datanodes" == "3" ]]; then
|
||||
|
||||
#It's up and running. Let's return from the function.
|
||||
|
@ -51,7 +51,6 @@ wait_for_datanodes(){
|
|||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
|
||||
}
|
||||
|
||||
|
@ -73,11 +72,13 @@ execute_tests(){
|
|||
docker-compose -f "$COMPOSE_FILE" down
|
||||
docker-compose -f "$COMPOSE_FILE" up -d --scale datanode=3
|
||||
wait_for_datanodes "$COMPOSE_FILE"
|
||||
#TODO: we need to wait for the OM here
|
||||
sleep 10
|
||||
for TEST in "${TESTS[@]}"; do
|
||||
TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
|
||||
set +e
|
||||
OUTPUT_NAME="$COMPOSE_DIR-${TEST//\//_}"
|
||||
docker-compose -f "$COMPOSE_FILE" exec ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
|
||||
docker-compose -f "$COMPOSE_FILE" exec -T ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
|
||||
set -e
|
||||
docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$OUTPUT_NAME.log"
|
||||
done
|
||||
|
@ -140,12 +141,14 @@ if [ "$RUN_ALL" = true ]; then
|
|||
#
|
||||
# We select the test suites and execute them on multiple type of clusters
|
||||
#
|
||||
DEFAULT_TESTS=("basic")
|
||||
execute_tests ozone "${DEFAULT_TESTS[@]}"
|
||||
TESTS=("basic")
|
||||
execute_tests ozone "${TESTS[@]}"
|
||||
TESTS=("audiparser")
|
||||
execute_tests ozone "${TESTS[@]}"
|
||||
TESTS=("ozonefs")
|
||||
execute_tests ozonefs "${TESTS[@]}"
|
||||
TESTS=("ozone-hdfs")
|
||||
execute_tests ozone-hdfs "${DEFAULT_TESTS[@]}"
|
||||
TESTS=("basic")
|
||||
execute_tests ozone-hdfs "${TESTS[@]}"
|
||||
TESTS=("s3")
|
||||
execute_tests ozones3 "${TESTS[@]}"
|
||||
else
|
||||
|
@ -153,4 +156,4 @@ else
|
|||
fi
|
||||
|
||||
#Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
|
||||
docker run --rm -it -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
|
||||
docker run --rm -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
|
||||
|
|
|
@ -28,9 +28,6 @@ import java.util.Map;
|
|||
import java.util.TreeMap;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hdds.client.OzoneQuota;
|
||||
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||
|
@ -83,6 +80,9 @@ import org.apache.hadoop.test.LambdaTestUtils;
|
|||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.either;
|
||||
import org.junit.Assert;
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TestReadRetries {
|
|||
private static StorageContainerLocationProtocolClientSideTranslatorPB
|
||||
storageContainerLocationClient;
|
||||
|
||||
private static String SCM_ID = UUID.randomUUID().toString();
|
||||
private static final String SCM_ID = UUID.randomUUID().toString();
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -78,7 +78,8 @@ public class TestContainerDeletionChoosingPolicy {
|
|||
KeyValueContainer container = new KeyValueContainer(data, conf);
|
||||
containerSet.addContainer(container);
|
||||
Assert.assertTrue(
|
||||
containerSet.getContainerMapCopy().containsKey(data.getContainerID()));
|
||||
containerSet.getContainerMapCopy()
|
||||
.containsKey(data.getContainerID()));
|
||||
}
|
||||
|
||||
ContainerDeletionChoosingPolicy deletionPolicy =
|
||||
|
|
|
@ -154,7 +154,8 @@ public class TestSecureContainerServer {
|
|||
XceiverClientSpi client = null;
|
||||
String containerName = OzoneUtils.getRequestID();
|
||||
try {
|
||||
final Pipeline pipeline = ContainerTestHelper.createPipeline(numDatanodes);
|
||||
final Pipeline pipeline =
|
||||
ContainerTestHelper.createPipeline(numDatanodes);
|
||||
|
||||
initConf.accept(pipeline, CONF);
|
||||
|
||||
|
|
|
@ -101,6 +101,10 @@ public final class OMNodeDetails {
|
|||
return ratisPort;
|
||||
}
|
||||
|
||||
public int getRpcPort() {
|
||||
return rpcPort;
|
||||
}
|
||||
|
||||
public String getRpcAddressString() {
|
||||
return NetUtils.getHostPortString(rpcAddress);
|
||||
}
|
||||
|
|
|
@ -176,6 +176,8 @@ public final class OzoneManagerRatisServer {
|
|||
}
|
||||
}
|
||||
|
||||
//TODO simplify it to make it shorter
|
||||
@SuppressWarnings("methodlength")
|
||||
private RaftProperties newRaftProperties(Configuration conf) {
|
||||
final RaftProperties properties = new RaftProperties();
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ public class OzoneManagerRequestHandler {
|
|||
this.impl = om;
|
||||
}
|
||||
|
||||
//TODO: use map to make shorted methods
|
||||
//TODO simplify it to make it shorter
|
||||
@SuppressWarnings("methodlength")
|
||||
public OMResponse handle(OMRequest request) {
|
||||
LOG.debug("Received OMRequest: {}, ", request);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Command;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
|
@ -63,7 +64,8 @@ public class CancelTokenHandler extends Handler {
|
|||
}
|
||||
Token token = new Token();
|
||||
token.decodeFromUrlString(
|
||||
new String(Files.readAllBytes(Paths.get(tokenFile))));
|
||||
new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||
StandardCharsets.UTF_8));
|
||||
client.getObjectStore().cancelDelegationToken(token);
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Command;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
|
@ -58,7 +59,8 @@ public class PrintTokenHandler extends Handler {
|
|||
return null;
|
||||
}
|
||||
|
||||
String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)));
|
||||
String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||
StandardCharsets.UTF_8);
|
||||
Token token = new Token();
|
||||
token.decodeFromUrlString(encodedToken);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Command;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
|
@ -63,7 +64,8 @@ public class RenewTokenHandler extends Handler {
|
|||
}
|
||||
Token token = new Token();
|
||||
token.decodeFromUrlString(
|
||||
new String(Files.readAllBytes(Paths.get(tokenFile))));
|
||||
new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||
StandardCharsets.UTF_8));
|
||||
long expiryTime = client.getObjectStore().renewDelegationToken(token);
|
||||
|
||||
System.out.printf("Token renewed successfully, expiry time: %s",
|
||||
|
|
|
@ -91,6 +91,13 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<configuration>
|
||||
<skip>true</skip>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -76,6 +76,13 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<configuration>
|
||||
<skip>true</skip>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -132,6 +132,13 @@
|
|||
<artifactId>hadoop-ozone-objectstore-service</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>findbugs</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.ArrayList;
|
|||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -38,6 +39,7 @@ public final class OzoneClientAdapterFactory {
|
|||
private OzoneClientAdapterFactory() {
|
||||
}
|
||||
|
||||
@SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
|
||||
public static OzoneClientAdapter createAdapter(
|
||||
String volumeStr,
|
||||
String bucketStr, OzoneFSStorageStatistics storageStatistics)
|
||||
|
|
|
@ -207,7 +207,7 @@ public class OzoneFileSystem extends FileSystem {
|
|||
short replication, long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
LOG.trace("create() path:{}", f);
|
||||
storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
|
||||
storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
|
||||
statistics.incrementWriteOps(1);
|
||||
final String key = pathToKey(f);
|
||||
final FileStatus status;
|
||||
|
|
|
@ -308,6 +308,7 @@
|
|||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<version>3.0.4</version>
|
||||
<configuration>
|
||||
<excludeFilterFile combine.self="override"/>
|
||||
</configuration>
|
||||
|
|
|
@ -70,6 +70,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>jmh-generator-annprocess</artifactId>
|
||||
<version>1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>findbugs</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-ozone-integration-test</artifactId>
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import io.opentracing.Scope;
|
||||
import io.opentracing.util.GlobalTracer;
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
|
@ -552,6 +553,7 @@ public final class RandomKeyGenerator implements Callable<Void> {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressFBWarnings("REC_CATCH_EXCEPTION")
|
||||
public void run() {
|
||||
LOG.trace("Creating volume: {}", volumeName);
|
||||
long start = System.nanoTime();
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.fsck;
|
||||
|
||||
|
@ -79,4 +80,4 @@ public class BlockIdDetails {
|
|||
public int hashCode() {
|
||||
return Objects.hash(bucketName, blockVol, keyName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue