HDDS-1103.Fix rat/findbug/checkstyle errors in ozone/hdds projects.
Contributed by Elek, Marton.
This commit is contained in:
parent
5cb67cf044
commit
75e15cc0c4
|
@ -35,7 +35,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
|
|
|
@ -219,7 +219,8 @@ public final class ScmConfigKeys {
|
||||||
"ozone.scm.https-address";
|
"ozone.scm.https-address";
|
||||||
public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
|
public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
|
||||||
"hdds.scm.kerberos.keytab.file";
|
"hdds.scm.kerberos.keytab.file";
|
||||||
public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
|
public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
|
||||||
|
"hdds.scm.kerberos.principal";
|
||||||
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
|
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
|
||||||
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
|
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
|
||||||
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
|
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
|
||||||
|
|
|
@ -65,8 +65,9 @@ public interface CertificateServer {
|
||||||
* approved.
|
* approved.
|
||||||
* @throws SCMSecurityException - on Error.
|
* @throws SCMSecurityException - on Error.
|
||||||
*/
|
*/
|
||||||
Future<X509CertificateHolder>
|
Future<X509CertificateHolder> requestCertificate(
|
||||||
requestCertificate(PKCS10CertificationRequest csr, CertificateApprover.ApprovalType type)
|
PKCS10CertificationRequest csr,
|
||||||
|
CertificateApprover.ApprovalType type)
|
||||||
throws SCMSecurityException;
|
throws SCMSecurityException;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.slf4j.Logger;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.security.InvalidKeyException;
|
import java.security.InvalidKeyException;
|
||||||
|
@ -524,10 +525,9 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
getLogger().error("Unexpected case: {}, Private key:{} , " +
|
getLogger().error("Unexpected case: {} (private/public/cert)",
|
||||||
"public key:{}, certificate:{}", init,
|
Integer.toBinaryString(init.ordinal()));
|
||||||
((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
|
|
||||||
((init.ordinal() & 1 << 0) == 1));
|
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -584,7 +584,8 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
||||||
* */
|
* */
|
||||||
protected boolean validateKeyPair(PublicKey pubKey)
|
protected boolean validateKeyPair(PublicKey pubKey)
|
||||||
throws CertificateException {
|
throws CertificateException {
|
||||||
byte[] challenge = RandomStringUtils.random(1000).getBytes();
|
byte[] challenge = RandomStringUtils.random(1000).getBytes(
|
||||||
|
StandardCharsets.UTF_8);
|
||||||
byte[] sign = signDataStream(new ByteArrayInputStream(challenge));
|
byte[] sign = signDataStream(new ByteArrayInputStream(challenge));
|
||||||
return verifySignature(challenge, sign, pubKey);
|
return verifySignature(challenge, sign, pubKey);
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,10 +90,8 @@ public class OMCertificateClient extends DefaultCertificateClient {
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
LOG.error("Unexpected case: {}, Private key:{} , " +
|
LOG.error("Unexpected case: {} (private/public/cert)",
|
||||||
"public key:{}, certificate:{}", init,
|
Integer.toBinaryString(init.ordinal()));
|
||||||
((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
|
|
||||||
((init.ordinal() & 1 << 0) == 1));
|
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,8 +73,8 @@ data node not only sends the CSR but signs the request with a shared secret
|
||||||
with SCM. SCM then can issue a certificate without the intervention of a
|
with SCM. SCM then can issue a certificate without the intervention of a
|
||||||
human administrator.
|
human administrator.
|
||||||
|
|
||||||
The last, TESTING method which never should be used other than in development and
|
The last, TESTING method which never should be used other than in development
|
||||||
testing clusters, is merely a mechanism to bypass all identity checks. If
|
and testing clusters, is merely a mechanism to bypass all identity checks. If
|
||||||
this flag is setup, then CA will issue a CSR if the base approves all fields.
|
this flag is setup, then CA will issue a CSR if the base approves all fields.
|
||||||
|
|
||||||
* Please do not use this mechanism(TESTING) for any purpose other than
|
* Please do not use this mechanism(TESTING) for any purpose other than
|
||||||
|
|
|
@ -56,6 +56,7 @@ import static org.junit.Assert.assertTrue;
|
||||||
* Test class for {@link DefaultCertificateClient}.
|
* Test class for {@link DefaultCertificateClient}.
|
||||||
*/
|
*/
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
public class TestCertificateClientInit {
|
public class TestCertificateClientInit {
|
||||||
|
|
||||||
private CertificateClient dnCertificateClient;
|
private CertificateClient dnCertificateClient;
|
||||||
|
|
|
@ -18,4 +18,16 @@
|
||||||
<Match>
|
<Match>
|
||||||
<Package name="org.apache.hadoop.hdds.protocol.proto"/>
|
<Package name="org.apache.hadoop.hdds.protocol.proto"/>
|
||||||
</Match>
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||||
|
<Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||||
|
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
|
||||||
|
<Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
|
||||||
|
</Match>
|
||||||
</FindBugsFilter>
|
</FindBugsFilter>
|
||||||
|
|
|
@ -50,6 +50,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>snakeyaml</artifactId>
|
<artifactId>snakeyaml</artifactId>
|
||||||
<version>1.8</version>
|
<version>1.8</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>findbugs</artifactId>
|
||||||
|
<version>3.0.1</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.dropwizard.metrics</groupId>
|
<groupId>io.dropwizard.metrics</groupId>
|
||||||
|
|
|
@ -18,7 +18,8 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.container.common.transport.server;
|
package org.apache.hadoop.ozone.container.common.transport.server;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
@ -28,8 +29,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier;
|
||||||
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
||||||
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
|
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
|
||||||
|
|
||||||
import java.io.IOException;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import java.util.Objects;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A server endpoint that acts as the communication layer for Ozone containers.
|
* A server endpoint that acts as the communication layer for Ozone containers.
|
||||||
|
@ -40,7 +41,7 @@ public abstract class XceiverServer implements XceiverServerSpi {
|
||||||
private final TokenVerifier tokenVerifier;
|
private final TokenVerifier tokenVerifier;
|
||||||
|
|
||||||
public XceiverServer(Configuration conf) {
|
public XceiverServer(Configuration conf) {
|
||||||
Objects.nonNull(conf);
|
Preconditions.checkNotNull(conf);
|
||||||
this.secConfig = new SecurityConfig(conf);
|
this.secConfig = new SecurityConfig(conf);
|
||||||
tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient());
|
tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient());
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,6 +138,7 @@ public class ContainerStateMachine extends BaseStateMachine {
|
||||||
*/
|
*/
|
||||||
private final CSMMetrics metrics;
|
private final CSMMetrics metrics;
|
||||||
|
|
||||||
|
@SuppressWarnings("parameternumber")
|
||||||
public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
|
public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
|
||||||
ThreadPoolExecutor chunkExecutor, XceiverServerRatis ratisServer,
|
ThreadPoolExecutor chunkExecutor, XceiverServerRatis ratisServer,
|
||||||
List<ExecutorService> executors, long expiryInterval,
|
List<ExecutorService> executors, long expiryInterval,
|
||||||
|
|
|
@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
|
||||||
import com.google.common.util.concurrent.ListenableFuture;
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
import com.google.common.util.concurrent.SettableFuture;
|
import com.google.common.util.concurrent.SettableFuture;
|
||||||
import com.google.common.util.concurrent.Uninterruptibles;
|
import com.google.common.util.concurrent.Uninterruptibles;
|
||||||
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
||||||
.newUpdater;
|
.newUpdater;
|
||||||
|
|
||||||
|
@ -116,7 +117,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Logger to log exceptions caught when running listeners.
|
// Logger to log exceptions caught when running listeners.
|
||||||
private static final Logger log = Logger
|
private static final Logger LOG = Logger
|
||||||
.getLogger(AbstractFuture.class.getName());
|
.getLogger(AbstractFuture.class.getName());
|
||||||
|
|
||||||
// A heuristic for timed gets. If the remaining timeout is less than this,
|
// A heuristic for timed gets. If the remaining timeout is less than this,
|
||||||
|
@ -150,8 +151,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
// the field is definitely there.
|
// the field is definitely there.
|
||||||
// For these users fallback to a suboptimal implementation, based on
|
// For these users fallback to a suboptimal implementation, based on
|
||||||
// synchronized. This will be a definite performance hit to those users.
|
// synchronized. This will be a definite performance hit to those users.
|
||||||
log.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
|
LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
|
||||||
log.log(
|
LOG.log(
|
||||||
Level.SEVERE, "SafeAtomicHelper is broken!",
|
Level.SEVERE, "SafeAtomicHelper is broken!",
|
||||||
atomicReferenceFieldUpdaterFailure);
|
atomicReferenceFieldUpdaterFailure);
|
||||||
helper = new SynchronizedHelper();
|
helper = new SynchronizedHelper();
|
||||||
|
@ -162,12 +163,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
// Prevent rare disastrous classloading in first call to LockSupport.park.
|
// Prevent rare disastrous classloading in first call to LockSupport.park.
|
||||||
// See: https://bugs.openjdk.java.net/browse/JDK-8074773
|
// See: https://bugs.openjdk.java.net/browse/JDK-8074773
|
||||||
@SuppressWarnings("unused")
|
@SuppressWarnings("unused")
|
||||||
|
@SuppressFBWarnings
|
||||||
Class<?> ensureLoaded = LockSupport.class;
|
Class<?> ensureLoaded = LockSupport.class;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waiter links form a Treiber stack, in the {@link #waiters} field.
|
* Waiter links form a Treiber stack, in the {@link #waiters} field.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class Waiter {
|
private static final class Waiter {
|
||||||
static final Waiter TOMBSTONE = new Waiter(false /* ignored param */);
|
static final Waiter TOMBSTONE = new Waiter(false /* ignored param */);
|
||||||
|
|
||||||
|
@ -252,6 +255,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
/**
|
/**
|
||||||
* Listeners also form a stack through the {@link #listeners} field.
|
* Listeners also form a stack through the {@link #listeners} field.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class Listener {
|
private static final class Listener {
|
||||||
static final Listener TOMBSTONE = new Listener(null, null);
|
static final Listener TOMBSTONE = new Listener(null, null);
|
||||||
final Runnable task;
|
final Runnable task;
|
||||||
|
@ -276,16 +280,17 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
* A special value to represent failure, when {@link #setException} is
|
* A special value to represent failure, when {@link #setException} is
|
||||||
* called successfully.
|
* called successfully.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class Failure {
|
private static final class Failure {
|
||||||
static final Failure FALLBACK_INSTANCE =
|
static final Failure FALLBACK_INSTANCE =
|
||||||
new Failure(
|
new Failure(
|
||||||
new Throwable("Failure occurred while trying to finish a future" +
|
new Throwable("Failure occurred while trying to finish a future" +
|
||||||
".") {
|
".") {
|
||||||
@Override
|
@Override
|
||||||
public synchronized Throwable fillInStackTrace() {
|
public synchronized Throwable fillInStackTrace() {
|
||||||
return this; // no stack trace
|
return this; // no stack trace
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
final Throwable exception;
|
final Throwable exception;
|
||||||
|
|
||||||
Failure(Throwable exception) {
|
Failure(Throwable exception) {
|
||||||
|
@ -296,6 +301,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
/**
|
/**
|
||||||
* A special value to represent cancellation and the 'wasInterrupted' bit.
|
* A special value to represent cancellation and the 'wasInterrupted' bit.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class Cancellation {
|
private static final class Cancellation {
|
||||||
final boolean wasInterrupted;
|
final boolean wasInterrupted;
|
||||||
@Nullable final Throwable cause;
|
@Nullable final Throwable cause;
|
||||||
|
@ -309,6 +315,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
/**
|
/**
|
||||||
* A special value that encodes the 'setFuture' state.
|
* A special value that encodes the 'setFuture' state.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class SetFuture<V> implements Runnable {
|
private static final class SetFuture<V> implements Runnable {
|
||||||
final AbstractFuture<V> owner;
|
final AbstractFuture<V> owner;
|
||||||
final ListenableFuture<? extends V> future;
|
final ListenableFuture<? extends V> future;
|
||||||
|
@ -711,8 +718,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
* @param value the value to be used as the result
|
* @param value the value to be used as the result
|
||||||
* @return true if the attempt was accepted, completing the {@code Future}
|
* @return true if the attempt was accepted, completing the {@code Future}
|
||||||
*/
|
*/
|
||||||
protected boolean set(@Nullable V value) {
|
protected boolean set(@Nullable V val) {
|
||||||
Object valueToSet = value == null ? NULL : value;
|
Object valueToSet = value == null ? NULL : val;
|
||||||
if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
|
if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
|
||||||
complete(this);
|
complete(this);
|
||||||
return true;
|
return true;
|
||||||
|
@ -769,13 +776,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
* @since 19.0
|
* @since 19.0
|
||||||
*/
|
*/
|
||||||
@Beta
|
@Beta
|
||||||
|
@SuppressWarnings("deadstore")
|
||||||
protected boolean setFuture(ListenableFuture<? extends V> future) {
|
protected boolean setFuture(ListenableFuture<? extends V> future) {
|
||||||
checkNotNull(future);
|
checkNotNull(future);
|
||||||
Object localValue = value;
|
Object localValue = value;
|
||||||
if (localValue == null) {
|
if (localValue == null) {
|
||||||
if (future.isDone()) {
|
if (future.isDone()) {
|
||||||
Object value = getFutureValue(future);
|
Object val = getFutureValue(future);
|
||||||
if (ATOMIC_HELPER.casValue(this, null, value)) {
|
if (ATOMIC_HELPER.casValue(this, null, val)) {
|
||||||
complete(this);
|
complete(this);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -950,10 +958,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
do {
|
do {
|
||||||
head = waiters;
|
head = waiters;
|
||||||
} while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE));
|
} while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE));
|
||||||
for (
|
for (Waiter currentWaiter = head;
|
||||||
Waiter currentWaiter = head;
|
currentWaiter != null; currentWaiter = currentWaiter.next) {
|
||||||
currentWaiter != null;
|
|
||||||
currentWaiter = currentWaiter.next) {
|
|
||||||
currentWaiter.unpark();
|
currentWaiter.unpark();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -995,7 +1001,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
// Log it and keep going -- bad runnable and/or executor. Don't punish
|
// Log it and keep going -- bad runnable and/or executor. Don't punish
|
||||||
// the other runnables if we're given a bad one. We only catch
|
// the other runnables if we're given a bad one. We only catch
|
||||||
// RuntimeException because we want Errors to propagate up.
|
// RuntimeException because we want Errors to propagate up.
|
||||||
log.log(
|
LOG.log(
|
||||||
Level.SEVERE,
|
Level.SEVERE,
|
||||||
"RuntimeException while executing runnable " + runnable + " with " +
|
"RuntimeException while executing runnable " + runnable + " with " +
|
||||||
"executor " + executor,
|
"executor " + executor,
|
||||||
|
@ -1147,6 +1153,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
|
||||||
/**
|
/**
|
||||||
* {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}.
|
* {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
private static final class SafeAtomicHelper extends AtomicHelper {
|
private static final class SafeAtomicHelper extends AtomicHelper {
|
||||||
final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
|
final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
|
||||||
final AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater;
|
final AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater;
|
||||||
|
|
|
@ -66,6 +66,7 @@ import java.util.UUID;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
@SuppressWarnings("finalclass")
|
||||||
public class HddsVolume
|
public class HddsVolume
|
||||||
implements Checkable<Boolean, VolumeCheckResult> {
|
implements Checkable<Boolean, VolumeCheckResult> {
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.Timer;
|
import org.apache.hadoop.util.Timer;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -141,7 +143,7 @@ public class HddsVolumeChecker {
|
||||||
|
|
||||||
lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
|
lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
|
||||||
|
|
||||||
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
|
if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
|
||||||
throw new DiskErrorException("Invalid value configured for "
|
throw new DiskErrorException("Invalid value configured for "
|
||||||
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
|
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
|
||||||
+ maxVolumeFailuresTolerated + " "
|
+ maxVolumeFailuresTolerated + " "
|
||||||
|
@ -310,21 +312,21 @@ public class HddsVolumeChecker {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onSuccess(@Nonnull VolumeCheckResult result) {
|
public void onSuccess(@Nonnull VolumeCheckResult result) {
|
||||||
switch(result) {
|
switch (result) {
|
||||||
case HEALTHY:
|
case HEALTHY:
|
||||||
case DEGRADED:
|
case DEGRADED:
|
||||||
LOG.debug("Volume {} is {}.", volume, result);
|
LOG.debug("Volume {} is {}.", volume, result);
|
||||||
markHealthy();
|
markHealthy();
|
||||||
break;
|
break;
|
||||||
case FAILED:
|
case FAILED:
|
||||||
LOG.warn("Volume {} detected as being unhealthy", volume);
|
LOG.warn("Volume {} detected as being unhealthy", volume);
|
||||||
markFailed();
|
markFailed();
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LOG.error("Unexpected health check result {} for volume {}",
|
LOG.error("Unexpected health check result {} for volume {}",
|
||||||
result, volume);
|
result, volume);
|
||||||
markHealthy();
|
markHealthy();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cleanup();
|
cleanup();
|
||||||
}
|
}
|
||||||
|
@ -378,7 +380,8 @@ public class HddsVolumeChecker {
|
||||||
try {
|
try {
|
||||||
delegateChecker.shutdownAndWait(gracePeriod, timeUnit);
|
delegateChecker.shutdownAndWait(gracePeriod, timeUnit);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
LOG.warn("{} interrupted during shutdown.", this.getClass().getSimpleName());
|
LOG.warn("{} interrupted during shutdown.",
|
||||||
|
this.getClass().getSimpleName());
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
||||||
* the results of the operation.
|
* the results of the operation.
|
||||||
* Protected by the object lock.
|
* Protected by the object lock.
|
||||||
*/
|
*/
|
||||||
private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>> completedChecks;
|
private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>>
|
||||||
|
completedChecks;
|
||||||
|
|
||||||
public ThrottledAsyncChecker(final Timer timer,
|
public ThrottledAsyncChecker(final Timer timer,
|
||||||
final long minMsBetweenChecks,
|
final long minMsBetweenChecks,
|
||||||
|
@ -125,7 +126,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (completedChecks.containsKey(target)) {
|
if (completedChecks.containsKey(target)) {
|
||||||
final ThrottledAsyncChecker.LastCheckResult<V> result = completedChecks.get(target);
|
final ThrottledAsyncChecker.LastCheckResult<V> result =
|
||||||
|
completedChecks.get(target);
|
||||||
final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
|
final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
|
||||||
if (msSinceLastCheck < minMsBetweenChecks) {
|
if (msSinceLastCheck < minMsBetweenChecks) {
|
||||||
LOG.debug("Skipped checking {}. Time since last check {}ms " +
|
LOG.debug("Skipped checking {}. Time since last check {}ms " +
|
||||||
|
|
|
@ -94,7 +94,7 @@ final class TimeoutFuture<V> extends AbstractFuture.TrustedFuture<V> {
|
||||||
*/
|
*/
|
||||||
private static final class Fire<V> implements Runnable {
|
private static final class Fire<V> implements Runnable {
|
||||||
@Nullable
|
@Nullable
|
||||||
TimeoutFuture<V> timeoutFutureRef;
|
private TimeoutFuture<V> timeoutFutureRef;
|
||||||
|
|
||||||
Fire(
|
Fire(
|
||||||
TimeoutFuture<V> timeoutFuture) {
|
TimeoutFuture<V> timeoutFuture) {
|
||||||
|
|
|
@ -18,33 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.container.common.volume;
|
package org.apache.hadoop.ozone.container.common.volume;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.StorageType;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
||||||
import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
|
||||||
.StorageContainerDatanodeProtocolProtos;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
|
||||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
|
||||||
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
|
|
||||||
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
|
|
||||||
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
|
||||||
import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
|
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
|
||||||
import org.apache.hadoop.util.Timer;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -53,14 +26,35 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.ThreadFactory;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||||
|
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
|
||||||
|
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
|
||||||
|
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
||||||
|
import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
|
||||||
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
|
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||||
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
|
import org.apache.hadoop.util.Timer;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||||
|
import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* VolumeSet to manage HDDS volumes in a DataNode.
|
* VolumeSet to manage HDDS volumes in a DataNode.
|
||||||
*/
|
*/
|
||||||
|
@ -91,8 +85,8 @@ public class VolumeSet {
|
||||||
/**
|
/**
|
||||||
* An executor for periodic disk checks.
|
* An executor for periodic disk checks.
|
||||||
*/
|
*/
|
||||||
final ScheduledExecutorService diskCheckerservice;
|
private final ScheduledExecutorService diskCheckerservice;
|
||||||
final ScheduledFuture<?> periodicDiskChecker;
|
private final ScheduledFuture<?> periodicDiskChecker;
|
||||||
|
|
||||||
private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
|
private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
|
||||||
|
|
||||||
|
@ -125,21 +119,21 @@ public class VolumeSet {
|
||||||
this.diskCheckerservice = Executors.newScheduledThreadPool(
|
this.diskCheckerservice = Executors.newScheduledThreadPool(
|
||||||
1, r -> new Thread(r, "Periodic HDDS volume checker"));
|
1, r -> new Thread(r, "Periodic HDDS volume checker"));
|
||||||
this.periodicDiskChecker =
|
this.periodicDiskChecker =
|
||||||
diskCheckerservice.scheduleWithFixedDelay(() -> {
|
diskCheckerservice.scheduleWithFixedDelay(() -> {
|
||||||
try {
|
try {
|
||||||
checkAllVolumes();
|
checkAllVolumes();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception while checking disks", e);
|
LOG.warn("Exception while checking disks", e);
|
||||||
}
|
}
|
||||||
}, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
|
}, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
|
||||||
TimeUnit.MINUTES);
|
TimeUnit.MINUTES);
|
||||||
initializeVolumeSet();
|
initializeVolumeSet();
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||||
throws DiskChecker.DiskErrorException {
|
throws DiskChecker.DiskErrorException {
|
||||||
return new HddsVolumeChecker(conf, new Timer());
|
return new HddsVolumeChecker(configuration, new Timer());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -18,17 +18,28 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.container.common.volume;
|
package org.apache.hadoop.ozone.container.common.volume;
|
||||||
|
|
||||||
import com.google.common.collect.Iterables;
|
import java.io.File;
|
||||||
import org.apache.commons.io.FileUtils;
|
import java.io.IOException;
|
||||||
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.Timer;
|
import org.apache.hadoop.util.Timer;
|
||||||
|
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||||
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.ExpectedException;
|
import org.junit.rules.ExpectedException;
|
||||||
|
@ -36,21 +47,6 @@ import org.junit.rules.Timeout;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.BindException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
||||||
import static org.hamcrest.CoreMatchers.is;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify that {@link VolumeSet} correctly checks for failed disks
|
* Verify that {@link VolumeSet} correctly checks for failed disks
|
||||||
|
@ -66,7 +62,7 @@ public class TestVolumeSetDiskChecks {
|
||||||
@Rule
|
@Rule
|
||||||
public ExpectedException thrown = ExpectedException.none();
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
Configuration conf = null;
|
private Configuration conf = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup volume directories.
|
* Cleanup volume directories.
|
||||||
|
@ -117,14 +113,15 @@ public class TestVolumeSetDiskChecks {
|
||||||
final VolumeSet volumeSet = new VolumeSet(
|
final VolumeSet volumeSet = new VolumeSet(
|
||||||
UUID.randomUUID().toString(), conf) {
|
UUID.randomUUID().toString(), conf) {
|
||||||
@Override
|
@Override
|
||||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||||
throws DiskErrorException {
|
throws DiskErrorException {
|
||||||
return new DummyChecker(conf, new Timer(), numBadVolumes);
|
return new DummyChecker(configuration, new Timer(), numBadVolumes);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
|
assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
|
||||||
assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes));
|
assertThat(volumeSet.getVolumesList().size(),
|
||||||
|
is(numVolumes - numBadVolumes));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -139,9 +136,9 @@ public class TestVolumeSetDiskChecks {
|
||||||
final VolumeSet volumeSet = new VolumeSet(
|
final VolumeSet volumeSet = new VolumeSet(
|
||||||
UUID.randomUUID().toString(), conf) {
|
UUID.randomUUID().toString(), conf) {
|
||||||
@Override
|
@Override
|
||||||
HddsVolumeChecker getVolumeChecker(Configuration conf)
|
HddsVolumeChecker getVolumeChecker(Configuration configuration)
|
||||||
throws DiskErrorException {
|
throws DiskErrorException {
|
||||||
return new DummyChecker(conf, new Timer(), numVolumes);
|
return new DummyChecker(configuration, new Timer(), numVolumes);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -153,13 +150,13 @@ public class TestVolumeSetDiskChecks {
|
||||||
* @param numDirs
|
* @param numDirs
|
||||||
*/
|
*/
|
||||||
private Configuration getConfWithDataNodeDirs(int numDirs) {
|
private Configuration getConfWithDataNodeDirs(int numDirs) {
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration ozoneConf = new OzoneConfiguration();
|
||||||
final List<String> dirs = new ArrayList<>();
|
final List<String> dirs = new ArrayList<>();
|
||||||
for (int i = 0; i < numDirs; ++i) {
|
for (int i = 0; i < numDirs; ++i) {
|
||||||
dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
|
dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
|
||||||
}
|
}
|
||||||
conf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
|
ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
|
||||||
return conf;
|
return ozoneConf;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -169,7 +166,7 @@ public class TestVolumeSetDiskChecks {
|
||||||
static class DummyChecker extends HddsVolumeChecker {
|
static class DummyChecker extends HddsVolumeChecker {
|
||||||
private final int numBadVolumes;
|
private final int numBadVolumes;
|
||||||
|
|
||||||
public DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
|
DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
|
||||||
throws DiskErrorException {
|
throws DiskErrorException {
|
||||||
super(conf, timer);
|
super(conf, timer);
|
||||||
this.numBadVolumes = numBadVolumes;
|
this.numBadVolumes = numBadVolumes;
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||||
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
|
||||||
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
|
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
|
||||||
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
|
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
|
||||||
|
@ -167,10 +168,10 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
|
||||||
* @param cmdType type of the container command.
|
* @param cmdType type of the container command.
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
private ContainerProtos.ContainerCommandRequestProto getDummyCommandRequestProto(
|
private ContainerCommandRequestProto getDummyCommandRequestProto(
|
||||||
ContainerProtos.Type cmdType) {
|
ContainerProtos.Type cmdType) {
|
||||||
final ContainerProtos.ContainerCommandRequestProto.Builder builder =
|
final ContainerCommandRequestProto.Builder builder =
|
||||||
ContainerProtos.ContainerCommandRequestProto.newBuilder()
|
ContainerCommandRequestProto.newBuilder()
|
||||||
.setCmdType(cmdType)
|
.setCmdType(cmdType)
|
||||||
.setContainerID(DUMMY_CONTAINER_ID)
|
.setContainerID(DUMMY_CONTAINER_ID)
|
||||||
.setDatanodeUuid(DATANODE_UUID);
|
.setDatanodeUuid(DATANODE_UUID);
|
||||||
|
@ -190,36 +191,39 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
|
||||||
.build())
|
.build())
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
switch(cmdType) {
|
switch (cmdType) {
|
||||||
case ReadContainer:
|
case ReadContainer:
|
||||||
builder.setReadContainer(ContainerProtos.ReadContainerRequestProto.newBuilder().build());
|
builder.setReadContainer(
|
||||||
break;
|
ContainerProtos.ReadContainerRequestProto.newBuilder().build());
|
||||||
case GetBlock:
|
break;
|
||||||
builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
case GetBlock:
|
||||||
.setBlockID(fakeBlockId).build());
|
builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||||
break;
|
.setBlockID(fakeBlockId).build());
|
||||||
case GetCommittedBlockLength:
|
break;
|
||||||
builder.setGetCommittedBlockLength(
|
case GetCommittedBlockLength:
|
||||||
ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
|
builder.setGetCommittedBlockLength(
|
||||||
.setBlockID(fakeBlockId).build());
|
ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
|
||||||
case ReadChunk:
|
.setBlockID(fakeBlockId).build());
|
||||||
builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
|
case ReadChunk:
|
||||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
|
||||||
break;
|
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||||
case DeleteChunk:
|
break;
|
||||||
builder.setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
|
case DeleteChunk:
|
||||||
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
builder
|
||||||
break;
|
.setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
|
||||||
case GetSmallFile:
|
.setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
|
||||||
builder.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
|
break;
|
||||||
.setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
case GetSmallFile:
|
||||||
.setBlockID(fakeBlockId)
|
builder
|
||||||
.build())
|
.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
|
||||||
.build());
|
.setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
|
||||||
break;
|
.setBlockID(fakeBlockId)
|
||||||
|
.build())
|
||||||
|
.build());
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
Assert.fail("Unhandled request type " + cmdType + " in unit test");
|
Assert.fail("Unhandled request type " + cmdType + " in unit test");
|
||||||
}
|
}
|
||||||
|
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
|
|
@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.codehaus.mojo</groupId>
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
<artifactId>findbugs-maven-plugin</artifactId>
|
<artifactId>findbugs-maven-plugin</artifactId>
|
||||||
|
<version>3.0.4</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<excludeFilterFile combine.self="override"></excludeFilterFile>
|
<excludeFilterFile combine.self="override"></excludeFilterFile>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -261,15 +261,13 @@ public final class RatisPipelineUtils {
|
||||||
|
|
||||||
for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
|
for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
|
||||||
.values()) {
|
.values()) {
|
||||||
while (true) {
|
try {
|
||||||
try {
|
pipelineManager.createPipeline(type, factor);
|
||||||
pipelineManager.createPipeline(type, factor);
|
} catch (IOException ioe) {
|
||||||
} catch (IOException ioe) {
|
break;
|
||||||
break;
|
} catch (Throwable t) {
|
||||||
} catch (Throwable t) {
|
LOG.error("Error while creating pipelines {}", t);
|
||||||
LOG.error("Error while creating pipelines {}", t);
|
break;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
isPipelineCreatorRunning.set(false);
|
isPipelineCreatorRunning.set(false);
|
||||||
|
|
|
@ -138,6 +138,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>javax.servlet-api</artifactId>
|
<artifactId>javax.servlet-api</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>findbugs</artifactId>
|
||||||
|
<version>3.0.1</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
|
|
|
@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
|
||||||
import com.google.common.util.concurrent.ListenableFuture;
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
import com.google.common.util.concurrent.SettableFuture;
|
import com.google.common.util.concurrent.SettableFuture;
|
||||||
import com.google.common.util.concurrent.Uninterruptibles;
|
import com.google.common.util.concurrent.Uninterruptibles;
|
||||||
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
|
||||||
.newUpdater;
|
.newUpdater;
|
||||||
|
|
||||||
|
|
|
@ -17,30 +17,26 @@
|
||||||
*/
|
*/
|
||||||
node("ubuntu") {
|
node("ubuntu") {
|
||||||
docker.image('elek/ozone-build').pull()
|
docker.image('elek/ozone-build').pull()
|
||||||
docker.image('elek/ozone-build').inside {
|
docker.image('elek/ozone-build').inside("--privileged") {
|
||||||
|
|
||||||
stage('Checkout') {
|
stage('Checkout') {
|
||||||
checkout scm
|
checkout scm
|
||||||
|
//use this for external Jenkinsfile builds
|
||||||
|
//checkout poll: false, scm: [$class: 'GitSCM', branches: [[name: env.branch]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: 'github-token', url: "https://github.com/${organization}/${repository}.git"]]]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stage('Clean') {
|
stage('Clean') {
|
||||||
status = sh returnStatus: true, script: 'mvn clean'
|
status = sh returnStatus: true, script: 'mvn clean -P hdds -am -pl :hadoop-ozone-dist '
|
||||||
}
|
}
|
||||||
|
|
||||||
stageRunner('Author', "author", {})
|
stageRunner('Author', "author", {})
|
||||||
|
|
||||||
stageRunner('Isolation', "isolation", {})
|
|
||||||
|
|
||||||
|
|
||||||
stageRunner('Build', "build", {})
|
|
||||||
|
|
||||||
stageRunner('Licence', "rat", {
|
stageRunner('Licence', "rat", {
|
||||||
archiveArtifacts 'target/rat-aggregated.txt'
|
archiveArtifacts 'target/rat-aggregated.txt'
|
||||||
}, 'artifact/target/rat-aggregated.txt/*view*/')
|
}, 'artifact/target/rat-aggregated.txt/*view*/')
|
||||||
|
|
||||||
stageRunner('Unit test', "unit", {
|
stageRunner('Build', "build", {})
|
||||||
junit '**/target/surefire-reports/*.xml'
|
|
||||||
}, 'testReport/')
|
|
||||||
|
|
||||||
stageRunner('Findbugs', "findbugs", {
|
stageRunner('Findbugs', "findbugs", {
|
||||||
archiveArtifacts 'target/findbugs-all.txt'
|
archiveArtifacts 'target/findbugs-all.txt'
|
||||||
|
@ -48,9 +44,17 @@ node("ubuntu") {
|
||||||
}, 'artifact/target/findbugs-all.txt/*view*/')
|
}, 'artifact/target/findbugs-all.txt/*view*/')
|
||||||
|
|
||||||
stageRunner('Checkstyle', "checkstyle", {
|
stageRunner('Checkstyle', "checkstyle", {
|
||||||
checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-result.xml', unHealthy: ''
|
checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-errors.xml', unHealthy: ''
|
||||||
}, 'checkstyleResult')
|
}, 'checkstyleResult')
|
||||||
|
|
||||||
|
stageRunner('Acceptance', "acceptance", {
|
||||||
|
archiveArtifacts 'hadoop-ozone/dist/target/ozone-0.4.0-SNAPSHOT/smoketest/result/**'
|
||||||
|
})
|
||||||
|
|
||||||
|
stageRunner('Unit test', "unit", {
|
||||||
|
junit '**/target/surefire-reports/*.xml'
|
||||||
|
}, 'testReport/')
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -70,35 +74,42 @@ def stageRunner(name, type, processResult, url = '') {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def githubStatus(name, status, description, url='') {
|
||||||
|
commitId = sh(returnStdout: true, script: 'git rev-parse HEAD')
|
||||||
|
context = 'ci/ozone/' + name
|
||||||
|
if (url) {
|
||||||
|
githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status, targetUrl: url
|
||||||
|
} else {
|
||||||
|
githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status
|
||||||
|
}
|
||||||
|
}
|
||||||
def prStatusStart(name) {
|
def prStatusStart(name) {
|
||||||
if (env.CHANGE_ID) {
|
githubStatus(name,
|
||||||
pullRequest.createStatus(status: "pending",
|
"PENDING",
|
||||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
name + " is started")
|
||||||
description: name + " is started")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def prStatusResult(responseCode, name, url = '') {
|
def prStatusResult(responseCode, name, url = '') {
|
||||||
status = "error"
|
status = "ERROR"
|
||||||
desc = "failed"
|
desc = "failed"
|
||||||
if (responseCode == 0) {
|
if (responseCode == 0) {
|
||||||
status = "success"
|
status = "SUCCESS"
|
||||||
desc = "passed"
|
desc = "passed"
|
||||||
}
|
}
|
||||||
message = name + " is " + desc
|
message = name + " check is " + desc
|
||||||
//System.out.println(responseCode)
|
|
||||||
if (env.CHANGE_ID) {
|
|
||||||
if (url) {
|
if (url) {
|
||||||
pullRequest.createStatus(status: status,
|
githubStatus(name,
|
||||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
status,
|
||||||
description: message,
|
message,
|
||||||
targetUrl: env.BUILD_URL + url)
|
env.BUILD_URL + url)
|
||||||
} else {
|
} else {
|
||||||
pullRequest.createStatus(status: status,
|
githubStatus(name,
|
||||||
context: 'continuous-integration/jenkins/pr-merge/' + name,
|
status,
|
||||||
description: message)
|
message)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (responseCode != 0) {
|
if (responseCode != 0) {
|
||||||
throw new RuntimeException(message)
|
throw new RuntimeException(message)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,9 +17,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.client.io;
|
package org.apache.hadoop.ozone.client.io;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
|
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
||||||
import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
|
import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
|
||||||
|
@ -27,15 +31,10 @@ import org.apache.hadoop.ozone.common.Checksum;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper class used inside {@link BlockOutputStream}.
|
* Helper class used inside {@link BlockOutputStream}.
|
||||||
* */
|
* */
|
||||||
public class BlockOutputStreamEntry extends OutputStream {
|
public final class BlockOutputStreamEntry extends OutputStream {
|
||||||
|
|
||||||
private OutputStream outputStream;
|
private OutputStream outputStream;
|
||||||
private BlockID blockID;
|
private BlockID blockID;
|
||||||
|
@ -56,6 +55,7 @@ public class BlockOutputStreamEntry extends OutputStream {
|
||||||
private final long watchTimeout;
|
private final long watchTimeout;
|
||||||
private List<ByteBuffer> bufferList;
|
private List<ByteBuffer> bufferList;
|
||||||
|
|
||||||
|
@SuppressWarnings("parameternumber")
|
||||||
private BlockOutputStreamEntry(BlockID blockID, String key,
|
private BlockOutputStreamEntry(BlockID blockID, String key,
|
||||||
XceiverClientManager xceiverClientManager,
|
XceiverClientManager xceiverClientManager,
|
||||||
Pipeline pipeline, String requestId, int chunkSize,
|
Pipeline pipeline, String requestId, int chunkSize,
|
||||||
|
@ -137,56 +137,48 @@ public class BlockOutputStreamEntry extends OutputStream {
|
||||||
this.outputStream.close();
|
this.outputStream.close();
|
||||||
// after closing the chunkOutPutStream, blockId would have been
|
// after closing the chunkOutPutStream, blockId would have been
|
||||||
// reconstructed with updated bcsId
|
// reconstructed with updated bcsId
|
||||||
if (this.outputStream instanceof BlockOutputStream) {
|
this.blockID = ((BlockOutputStream) outputStream).getBlockID();
|
||||||
this.blockID = ((BlockOutputStream) outputStream).getBlockID();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
long getTotalSuccessfulFlushedData() throws IOException {
|
long getTotalSuccessfulFlushedData() throws IOException {
|
||||||
if (this.outputStream instanceof BlockOutputStream) {
|
if (outputStream != null) {
|
||||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
blockID = out.getBlockID();
|
blockID = out.getBlockID();
|
||||||
return out.getTotalSuccessfulFlushedData();
|
return out.getTotalSuccessfulFlushedData();
|
||||||
} else if (outputStream == null) {
|
} else {
|
||||||
// For a pre allocated block for which no write has been initiated,
|
// For a pre allocated block for which no write has been initiated,
|
||||||
// the OutputStream will be null here.
|
// the OutputStream will be null here.
|
||||||
// In such cases, the default blockCommitSequenceId will be 0
|
// In such cases, the default blockCommitSequenceId will be 0
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
}
|
||||||
}
|
|
||||||
|
long getWrittenDataLength() throws IOException {
|
||||||
long getWrittenDataLength() throws IOException {
|
if (outputStream != null) {
|
||||||
if (this.outputStream instanceof BlockOutputStream) {
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
return out.getWrittenDataLength();
|
||||||
return out.getWrittenDataLength();
|
} else {
|
||||||
} else if (outputStream == null) {
|
|
||||||
// For a pre allocated block for which no write has been initiated,
|
// For a pre allocated block for which no write has been initiated,
|
||||||
// the OutputStream will be null here.
|
// the OutputStream will be null here.
|
||||||
// In such cases, the default blockCommitSequenceId will be 0
|
// In such cases, the default blockCommitSequenceId will be 0
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cleanup(boolean invalidateClient) throws IOException {
|
void cleanup(boolean invalidateClient) throws IOException {
|
||||||
checkStream();
|
checkStream();
|
||||||
if (this.outputStream instanceof BlockOutputStream) {
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
out.cleanup(invalidateClient);
|
||||||
out.cleanup(invalidateClient);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeOnRetry(long len) throws IOException {
|
void writeOnRetry(long len) throws IOException {
|
||||||
checkStream();
|
checkStream();
|
||||||
if (this.outputStream instanceof BlockOutputStream) {
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
out.writeOnRetry(len);
|
||||||
out.writeOnRetry(len);
|
this.currentPosition += len;
|
||||||
this.currentPosition += len;
|
|
||||||
} else {
|
|
||||||
throw new IOException("Invalid Output Stream for Key: " + key);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -229,8 +221,8 @@ public class BlockOutputStreamEntry extends OutputStream {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder setPipeline(Pipeline pipeline) {
|
public Builder setPipeline(Pipeline ppln) {
|
||||||
this.pipeline = pipeline;
|
this.pipeline = ppln;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,8 +256,8 @@ public class BlockOutputStreamEntry extends OutputStream {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder setBufferList(List<ByteBuffer> bufferList) {
|
public Builder setBufferList(List<ByteBuffer> bffrLst) {
|
||||||
this.bufferList = bufferList;
|
this.bufferList = bffrLst;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.client.io;
|
package org.apache.hadoop.ozone.client.io;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.om.helpers;
|
package org.apache.hadoop.ozone.om.helpers;
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
|
@ -221,7 +221,6 @@ public final class OmKeyInfo extends WithMetadata {
|
||||||
private long modificationTime;
|
private long modificationTime;
|
||||||
private HddsProtos.ReplicationType type;
|
private HddsProtos.ReplicationType type;
|
||||||
private HddsProtos.ReplicationFactor factor;
|
private HddsProtos.ReplicationFactor factor;
|
||||||
private boolean isMultipartKey;
|
|
||||||
private Map<String, String> metadata;
|
private Map<String, String> metadata;
|
||||||
|
|
||||||
public Builder() {
|
public Builder() {
|
||||||
|
@ -275,11 +274,6 @@ public final class OmKeyInfo extends WithMetadata {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder setIsMultipartKey(boolean isMultipart) {
|
|
||||||
this.isMultipartKey = isMultipart;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Builder addMetadata(String key, String value) {
|
public Builder addMetadata(String key, String value) {
|
||||||
metadata.put(key, value);
|
metadata.put(key, value);
|
||||||
return this;
|
return this;
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Map;
|
||||||
*/
|
*/
|
||||||
public class WithMetadata {
|
public class WithMetadata {
|
||||||
|
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
protected Map<String, String> metadata = new HashMap<>();
|
protected Map<String, String> metadata = new HashMap<>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -60,6 +60,7 @@ public abstract class OzoneSecretManager<T extends TokenIdentifier>
|
||||||
private OzoneSecretKey currentKey;
|
private OzoneSecretKey currentKey;
|
||||||
private AtomicInteger currentKeyId;
|
private AtomicInteger currentKeyId;
|
||||||
private AtomicInteger tokenSequenceNumber;
|
private AtomicInteger tokenSequenceNumber;
|
||||||
|
@SuppressWarnings("visibilitymodifier")
|
||||||
protected final Map<Integer, OzoneSecretKey> allKeys;
|
protected final Map<Integer, OzoneSecretKey> allKeys;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -22,9 +22,8 @@ touch "$FINDBUGS_ALL_FILE"
|
||||||
|
|
||||||
mvn -fn findbugs:check -Dfindbugs.failOnError=false -am -pl :hadoop-ozone-dist -Phdds
|
mvn -fn findbugs:check -Dfindbugs.failOnError=false -am -pl :hadoop-ozone-dist -Phdds
|
||||||
|
|
||||||
find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
|
find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
|
||||||
find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
|
find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
|
||||||
|
|
||||||
|
|
||||||
bugs=$(cat "$FINDBUGS_ALL_FILE" | wc -l)
|
bugs=$(cat "$FINDBUGS_ALL_FILE" | wc -l)
|
||||||
|
|
||||||
|
@ -32,4 +31,4 @@ if [[ ${bugs} -gt 0 ]]; then
|
||||||
exit -1
|
exit -1
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -35,7 +35,7 @@ wait_for_datanodes(){
|
||||||
|
|
||||||
#This line checks the number of HEALTHY datanodes registered in scm over the
|
#This line checks the number of HEALTHY datanodes registered in scm over the
|
||||||
# jmx HTTP servlet
|
# jmx HTTP servlet
|
||||||
datanodes=$(docker-compose -f "$1" exec scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
|
datanodes=$(docker-compose -f "$1" exec -T scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
|
||||||
if [[ "$datanodes" == "3" ]]; then
|
if [[ "$datanodes" == "3" ]]; then
|
||||||
|
|
||||||
#It's up and running. Let's return from the function.
|
#It's up and running. Let's return from the function.
|
||||||
|
@ -51,7 +51,6 @@ wait_for_datanodes(){
|
||||||
|
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
|
echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,11 +72,13 @@ execute_tests(){
|
||||||
docker-compose -f "$COMPOSE_FILE" down
|
docker-compose -f "$COMPOSE_FILE" down
|
||||||
docker-compose -f "$COMPOSE_FILE" up -d --scale datanode=3
|
docker-compose -f "$COMPOSE_FILE" up -d --scale datanode=3
|
||||||
wait_for_datanodes "$COMPOSE_FILE"
|
wait_for_datanodes "$COMPOSE_FILE"
|
||||||
|
#TODO: we need to wait for the OM here
|
||||||
|
sleep 10
|
||||||
for TEST in "${TESTS[@]}"; do
|
for TEST in "${TESTS[@]}"; do
|
||||||
TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
|
TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
|
||||||
set +e
|
set +e
|
||||||
OUTPUT_NAME="$COMPOSE_DIR-${TEST//\//_}"
|
OUTPUT_NAME="$COMPOSE_DIR-${TEST//\//_}"
|
||||||
docker-compose -f "$COMPOSE_FILE" exec ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
|
docker-compose -f "$COMPOSE_FILE" exec -T ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
|
||||||
set -e
|
set -e
|
||||||
docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$OUTPUT_NAME.log"
|
docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$OUTPUT_NAME.log"
|
||||||
done
|
done
|
||||||
|
@ -140,12 +141,14 @@ if [ "$RUN_ALL" = true ]; then
|
||||||
#
|
#
|
||||||
# We select the test suites and execute them on multiple type of clusters
|
# We select the test suites and execute them on multiple type of clusters
|
||||||
#
|
#
|
||||||
DEFAULT_TESTS=("basic")
|
TESTS=("basic")
|
||||||
execute_tests ozone "${DEFAULT_TESTS[@]}"
|
execute_tests ozone "${TESTS[@]}"
|
||||||
|
TESTS=("audiparser")
|
||||||
|
execute_tests ozone "${TESTS[@]}"
|
||||||
TESTS=("ozonefs")
|
TESTS=("ozonefs")
|
||||||
execute_tests ozonefs "${TESTS[@]}"
|
execute_tests ozonefs "${TESTS[@]}"
|
||||||
TESTS=("ozone-hdfs")
|
TESTS=("basic")
|
||||||
execute_tests ozone-hdfs "${DEFAULT_TESTS[@]}"
|
execute_tests ozone-hdfs "${TESTS[@]}"
|
||||||
TESTS=("s3")
|
TESTS=("s3")
|
||||||
execute_tests ozones3 "${TESTS[@]}"
|
execute_tests ozones3 "${TESTS[@]}"
|
||||||
else
|
else
|
||||||
|
@ -153,4 +156,4 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
|
#Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
|
||||||
docker run --rm -it -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
|
docker run --rm -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
|
||||||
|
|
|
@ -28,9 +28,6 @@ import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
|
||||||
import org.apache.commons.lang3.RandomUtils;
|
|
||||||
import org.apache.hadoop.hdds.client.OzoneQuota;
|
import org.apache.hadoop.hdds.client.OzoneQuota;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
|
@ -83,6 +80,9 @@ import org.apache.hadoop.test.LambdaTestUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
import static org.hamcrest.CoreMatchers.containsString;
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.CoreMatchers.either;
|
import static org.hamcrest.CoreMatchers.either;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
|
@ -76,7 +76,7 @@ public class TestReadRetries {
|
||||||
private static StorageContainerLocationProtocolClientSideTranslatorPB
|
private static StorageContainerLocationProtocolClientSideTranslatorPB
|
||||||
storageContainerLocationClient;
|
storageContainerLocationClient;
|
||||||
|
|
||||||
private static String SCM_ID = UUID.randomUUID().toString();
|
private static final String SCM_ID = UUID.randomUUID().toString();
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -78,7 +78,8 @@ public class TestContainerDeletionChoosingPolicy {
|
||||||
KeyValueContainer container = new KeyValueContainer(data, conf);
|
KeyValueContainer container = new KeyValueContainer(data, conf);
|
||||||
containerSet.addContainer(container);
|
containerSet.addContainer(container);
|
||||||
Assert.assertTrue(
|
Assert.assertTrue(
|
||||||
containerSet.getContainerMapCopy().containsKey(data.getContainerID()));
|
containerSet.getContainerMapCopy()
|
||||||
|
.containsKey(data.getContainerID()));
|
||||||
}
|
}
|
||||||
|
|
||||||
ContainerDeletionChoosingPolicy deletionPolicy =
|
ContainerDeletionChoosingPolicy deletionPolicy =
|
||||||
|
|
|
@ -154,7 +154,8 @@ public class TestSecureContainerServer {
|
||||||
XceiverClientSpi client = null;
|
XceiverClientSpi client = null;
|
||||||
String containerName = OzoneUtils.getRequestID();
|
String containerName = OzoneUtils.getRequestID();
|
||||||
try {
|
try {
|
||||||
final Pipeline pipeline = ContainerTestHelper.createPipeline(numDatanodes);
|
final Pipeline pipeline =
|
||||||
|
ContainerTestHelper.createPipeline(numDatanodes);
|
||||||
|
|
||||||
initConf.accept(pipeline, CONF);
|
initConf.accept(pipeline, CONF);
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,10 @@ public final class OMNodeDetails {
|
||||||
return ratisPort;
|
return ratisPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int getRpcPort() {
|
||||||
|
return rpcPort;
|
||||||
|
}
|
||||||
|
|
||||||
public String getRpcAddressString() {
|
public String getRpcAddressString() {
|
||||||
return NetUtils.getHostPortString(rpcAddress);
|
return NetUtils.getHostPortString(rpcAddress);
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,6 +176,8 @@ public final class OzoneManagerRatisServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO simplify it to make it shorter
|
||||||
|
@SuppressWarnings("methodlength")
|
||||||
private RaftProperties newRaftProperties(Configuration conf) {
|
private RaftProperties newRaftProperties(Configuration conf) {
|
||||||
final RaftProperties properties = new RaftProperties();
|
final RaftProperties properties = new RaftProperties();
|
||||||
|
|
||||||
|
|
|
@ -131,7 +131,7 @@ public class OzoneManagerRequestHandler {
|
||||||
this.impl = om;
|
this.impl = om;
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: use map to make shorted methods
|
//TODO simplify it to make it shorter
|
||||||
@SuppressWarnings("methodlength")
|
@SuppressWarnings("methodlength")
|
||||||
public OMResponse handle(OMRequest request) {
|
public OMResponse handle(OMRequest request) {
|
||||||
LOG.debug("Received OMRequest: {}, ", request);
|
LOG.debug("Received OMRequest: {}, ", request);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import picocli.CommandLine;
|
import picocli.CommandLine;
|
||||||
import picocli.CommandLine.Command;
|
import picocli.CommandLine.Command;
|
||||||
|
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
@ -63,7 +64,8 @@ public class CancelTokenHandler extends Handler {
|
||||||
}
|
}
|
||||||
Token token = new Token();
|
Token token = new Token();
|
||||||
token.decodeFromUrlString(
|
token.decodeFromUrlString(
|
||||||
new String(Files.readAllBytes(Paths.get(tokenFile))));
|
new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||||
|
StandardCharsets.UTF_8));
|
||||||
client.getObjectStore().cancelDelegationToken(token);
|
client.getObjectStore().cancelDelegationToken(token);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import picocli.CommandLine;
|
import picocli.CommandLine;
|
||||||
import picocli.CommandLine.Command;
|
import picocli.CommandLine.Command;
|
||||||
|
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
@ -58,7 +59,8 @@ public class PrintTokenHandler extends Handler {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)));
|
String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||||
|
StandardCharsets.UTF_8);
|
||||||
Token token = new Token();
|
Token token = new Token();
|
||||||
token.decodeFromUrlString(encodedToken);
|
token.decodeFromUrlString(encodedToken);
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import picocli.CommandLine;
|
import picocli.CommandLine;
|
||||||
import picocli.CommandLine.Command;
|
import picocli.CommandLine.Command;
|
||||||
|
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
@ -63,7 +64,8 @@ public class RenewTokenHandler extends Handler {
|
||||||
}
|
}
|
||||||
Token token = new Token();
|
Token token = new Token();
|
||||||
token.decodeFromUrlString(
|
token.decodeFromUrlString(
|
||||||
new String(Files.readAllBytes(Paths.get(tokenFile))));
|
new String(Files.readAllBytes(Paths.get(tokenFile)),
|
||||||
|
StandardCharsets.UTF_8));
|
||||||
long expiryTime = client.getObjectStore().renewDelegationToken(token);
|
long expiryTime = client.getObjectStore().renewDelegationToken(token);
|
||||||
|
|
||||||
System.out.printf("Token renewed successfully, expiry time: %s",
|
System.out.printf("Token renewed successfully, expiry time: %s",
|
||||||
|
|
|
@ -91,6 +91,13 @@
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>findbugs-maven-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<skip>true</skip>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,13 @@
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>findbugs-maven-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<skip>true</skip>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,13 @@
|
||||||
<artifactId>hadoop-ozone-objectstore-service</artifactId>
|
<artifactId>hadoop-ozone-objectstore-service</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>findbugs</artifactId>
|
||||||
|
<version>3.0.1</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Enumeration;
|
import java.util.Enumeration;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -38,6 +39,7 @@ public final class OzoneClientAdapterFactory {
|
||||||
private OzoneClientAdapterFactory() {
|
private OzoneClientAdapterFactory() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
|
||||||
public static OzoneClientAdapter createAdapter(
|
public static OzoneClientAdapter createAdapter(
|
||||||
String volumeStr,
|
String volumeStr,
|
||||||
String bucketStr, OzoneFSStorageStatistics storageStatistics)
|
String bucketStr, OzoneFSStorageStatistics storageStatistics)
|
||||||
|
|
|
@ -207,7 +207,7 @@ public class OzoneFileSystem extends FileSystem {
|
||||||
short replication, long blockSize,
|
short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
LOG.trace("create() path:{}", f);
|
LOG.trace("create() path:{}", f);
|
||||||
storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
|
storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
|
||||||
statistics.incrementWriteOps(1);
|
statistics.incrementWriteOps(1);
|
||||||
final String key = pathToKey(f);
|
final String key = pathToKey(f);
|
||||||
final FileStatus status;
|
final FileStatus status;
|
||||||
|
|
|
@ -308,6 +308,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.codehaus.mojo</groupId>
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
<artifactId>findbugs-maven-plugin</artifactId>
|
<artifactId>findbugs-maven-plugin</artifactId>
|
||||||
|
<version>3.0.4</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<excludeFilterFile combine.self="override"/>
|
<excludeFilterFile combine.self="override"/>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -70,6 +70,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>jmh-generator-annprocess</artifactId>
|
<artifactId>jmh-generator-annprocess</artifactId>
|
||||||
<version>1.19</version>
|
<version>1.19</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>findbugs</artifactId>
|
||||||
|
<version>3.0.1</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-integration-test</artifactId>
|
<artifactId>hadoop-ozone-integration-test</artifactId>
|
||||||
|
|
|
@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import io.opentracing.Scope;
|
import io.opentracing.Scope;
|
||||||
import io.opentracing.util.GlobalTracer;
|
import io.opentracing.util.GlobalTracer;
|
||||||
import org.apache.commons.codec.digest.DigestUtils;
|
import org.apache.commons.codec.digest.DigestUtils;
|
||||||
|
@ -552,6 +553,7 @@ public final class RandomKeyGenerator implements Callable<Void> {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@SuppressFBWarnings("REC_CATCH_EXCEPTION")
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.trace("Creating volume: {}", volumeName);
|
LOG.trace("Creating volume: {}", volumeName);
|
||||||
long start = System.nanoTime();
|
long start = System.nanoTime();
|
||||||
|
|
|
@ -1,18 +1,19 @@
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* contributor license agreements. See the NOTICE file distributed with this
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* work for additional information regarding copyright ownership. The ASF
|
* distributed with this work for additional information
|
||||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
* "License"); you may not use this file except in compliance with the License.
|
* to you under the Apache License, Version 2.0 (the
|
||||||
* You may obtain a copy of the License at
|
* "License"); you may not use this file except in compliance
|
||||||
*
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
* <p>
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* License for the specific language governing permissions and limitations under
|
* See the License for the specific language governing permissions and
|
||||||
* the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.fsck;
|
package org.apache.hadoop.ozone.fsck;
|
||||||
|
|
||||||
|
@ -79,4 +80,4 @@ public class BlockIdDetails {
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(bucketName, blockVol, keyName);
|
return Objects.hash(bucketName, blockVol, keyName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue