Merge branch 'trunk' into HDDS-1535
This commit is contained in:
commit
9da62f33be
|
@ -334,6 +334,16 @@
|
||||||
<artifactId>dnsjava</artifactId>
|
<artifactId>dnsjava</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.wildfly.openssl</groupId>
|
||||||
|
<artifactId>wildfly-openssl</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.assertj</groupId>
|
||||||
|
<artifactId>assertj-core</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs.protocolPB;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.util.StringInterner;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -91,8 +92,8 @@ public final class PBHelper {
|
||||||
mtime = proto.getModificationTime();
|
mtime = proto.getModificationTime();
|
||||||
atime = proto.getAccessTime();
|
atime = proto.getAccessTime();
|
||||||
permission = convert(proto.getPermission());
|
permission = convert(proto.getPermission());
|
||||||
owner = proto.getOwner();
|
owner = StringInterner.weakIntern(proto.getOwner());
|
||||||
group = proto.getGroup();
|
group = StringInterner.weakIntern(proto.getGroup());
|
||||||
int flags = proto.getFlags();
|
int flags = proto.getFlags();
|
||||||
FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
|
FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
|
||||||
blocksize, mtime, atime, permission, owner, group, symlink, path,
|
blocksize, mtime, atime, permission, owner, group, symlink, path,
|
||||||
|
|
|
@ -107,7 +107,7 @@ public class MutableQuantiles extends MutableMetric {
|
||||||
estimator = new SampleQuantiles(quantiles);
|
estimator = new SampleQuantiles(quantiles);
|
||||||
|
|
||||||
this.interval = interval;
|
this.interval = interval;
|
||||||
scheduledTask = scheduler.scheduleAtFixedRate(new RolloverSample(this),
|
scheduledTask = scheduler.scheduleWithFixedDelay(new RolloverSample(this),
|
||||||
interval, interval, TimeUnit.SECONDS);
|
interval, interval, TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.fs.azurebfs.utils;
|
package org.apache.hadoop.security.ssl;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
@ -42,11 +42,11 @@ import org.wildfly.openssl.SSL;
|
||||||
* performance.
|
* performance.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
public final class OpenSSLSocketFactory extends SSLSocketFactory {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default indicates Ordered, preferred OpenSSL, if failed to load then fall
|
* Default indicates Ordered, preferred OpenSSL, if failed to load then fall
|
||||||
* back to Default_JSSE
|
* back to Default_JSSE.
|
||||||
*/
|
*/
|
||||||
public enum SSLChannelMode {
|
public enum SSLChannelMode {
|
||||||
OpenSSL,
|
OpenSSL,
|
||||||
|
@ -54,9 +54,9 @@ public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
||||||
Default_JSSE
|
Default_JSSE
|
||||||
}
|
}
|
||||||
|
|
||||||
private static SSLSocketFactoryEx instance = null;
|
private static OpenSSLSocketFactory instance = null;
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
SSLSocketFactoryEx.class);
|
OpenSSLSocketFactory.class);
|
||||||
private String providerName;
|
private String providerName;
|
||||||
private SSLContext ctx;
|
private SSLContext ctx;
|
||||||
private String[] ciphers;
|
private String[] ciphers;
|
||||||
|
@ -71,7 +71,7 @@ public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
||||||
public static synchronized void initializeDefaultFactory(
|
public static synchronized void initializeDefaultFactory(
|
||||||
SSLChannelMode preferredMode) throws IOException {
|
SSLChannelMode preferredMode) throws IOException {
|
||||||
if (instance == null) {
|
if (instance == null) {
|
||||||
instance = new SSLSocketFactoryEx(preferredMode);
|
instance = new OpenSSLSocketFactory(preferredMode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
||||||
* @return instance of the SSLSocketFactory, instance must be initialized by
|
* @return instance of the SSLSocketFactory, instance must be initialized by
|
||||||
* initializeDefaultFactory.
|
* initializeDefaultFactory.
|
||||||
*/
|
*/
|
||||||
public static SSLSocketFactoryEx getDefaultFactory() {
|
public static OpenSSLSocketFactory getDefaultFactory() {
|
||||||
return instance;
|
return instance;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
||||||
OpenSSLProvider.register();
|
OpenSSLProvider.register();
|
||||||
}
|
}
|
||||||
|
|
||||||
private SSLSocketFactoryEx(SSLChannelMode preferredChannelMode)
|
private OpenSSLSocketFactory(SSLChannelMode preferredChannelMode)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try {
|
try {
|
||||||
initializeSSLContext(preferredChannelMode);
|
initializeSSLContext(preferredChannelMode);
|
||||||
|
@ -120,11 +120,13 @@ public final class SSLSocketFactoryEx extends SSLSocketFactory {
|
||||||
switch (preferredChannelMode) {
|
switch (preferredChannelMode) {
|
||||||
case Default:
|
case Default:
|
||||||
try {
|
try {
|
||||||
java.util.logging.Logger logger = java.util.logging.Logger.getLogger(SSL.class.getName());
|
java.util.logging.Logger logger = java.util.logging.Logger.getLogger(
|
||||||
|
SSL.class.getName());
|
||||||
logger.setLevel(Level.WARNING);
|
logger.setLevel(Level.WARNING);
|
||||||
ctx = SSLContext.getInstance("openssl.TLS");
|
ctx = SSLContext.getInstance("openssl.TLS");
|
||||||
ctx.init(null, null, null);
|
ctx.init(null, null, null);
|
||||||
// Strong reference needs to be kept to logger until initialization of SSLContext finished (see HADOOP-16174):
|
// Strong reference needs to be kept to logger until initialization of
|
||||||
|
// SSLContext finished (see HADOOP-16174):
|
||||||
logger.setLevel(Level.INFO);
|
logger.setLevel(Level.INFO);
|
||||||
channelMode = SSLChannelMode.OpenSSL;
|
channelMode = SSLChannelMode.OpenSSL;
|
||||||
} catch (NoSuchAlgorithmException e) {
|
} catch (NoSuchAlgorithmException e) {
|
|
@ -1904,15 +1904,15 @@
|
||||||
<name>fs.s3a.change.detection.mode</name>
|
<name>fs.s3a.change.detection.mode</name>
|
||||||
<value>server</value>
|
<value>server</value>
|
||||||
<description>
|
<description>
|
||||||
Determines how change detection is applied to alert to S3 objects
|
Determines how change detection is applied to alert to inconsistent S3
|
||||||
rewritten while being read. Value 'server' indicates to apply the attribute
|
objects read during or after an overwrite. Value 'server' indicates to apply
|
||||||
constraint directly on GetObject requests to S3. Value 'client' means to do a
|
the attribute constraint directly on GetObject requests to S3. Value 'client'
|
||||||
client-side comparison of the attribute value returned in the response. Value
|
means to do a client-side comparison of the attribute value returned in the
|
||||||
'server' would not work with third-party S3 implementations that do not
|
response. Value 'server' would not work with third-party S3 implementations
|
||||||
support these constraints on GetObject. Values 'server' and 'client' generate
|
that do not support these constraints on GetObject. Values 'server' and
|
||||||
RemoteObjectChangedException when a mismatch is detected. Value 'warn' works
|
'client' generate RemoteObjectChangedException when a mismatch is detected.
|
||||||
like 'client' but generates only a warning. Value 'none' will ignore change
|
Value 'warn' works like 'client' but generates only a warning. Value 'none'
|
||||||
detection completely.
|
will ignore change detection completely.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.security.ssl;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests for {@link OpenSSLSocketFactory}.
|
||||||
|
*/
|
||||||
|
public class TestOpenSSLSocketFactory {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOpenSSL() throws IOException {
|
||||||
|
assumeTrue("Unable to load native libraries",
|
||||||
|
NativeCodeLoader.isNativeCodeLoaded());
|
||||||
|
assumeTrue("Build was not compiled with support for OpenSSL",
|
||||||
|
NativeCodeLoader.buildSupportsOpenssl());
|
||||||
|
OpenSSLSocketFactory.initializeDefaultFactory(
|
||||||
|
OpenSSLSocketFactory.SSLChannelMode.OpenSSL);
|
||||||
|
assertThat(OpenSSLSocketFactory.getDefaultFactory()
|
||||||
|
.getProviderName()).contains("openssl");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testJSEEJava8() throws IOException {
|
||||||
|
assumeTrue("Not running on Java 8",
|
||||||
|
System.getProperty("java.version").startsWith("1.8"));
|
||||||
|
OpenSSLSocketFactory.initializeDefaultFactory(
|
||||||
|
OpenSSLSocketFactory.SSLChannelMode.Default_JSSE);
|
||||||
|
assertThat(Arrays.stream(OpenSSLSocketFactory.getDefaultFactory()
|
||||||
|
.getSupportedCipherSuites())).noneMatch("GCM"::contains);
|
||||||
|
}
|
||||||
|
}
|
|
@ -178,7 +178,7 @@ public final class HddsUtils {
|
||||||
* @return {@link SCMSecurityProtocol}
|
* @return {@link SCMSecurityProtocol}
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static SCMSecurityProtocol getScmSecurityClient(
|
public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient(
|
||||||
OzoneConfiguration conf, InetSocketAddress address) throws IOException {
|
OzoneConfiguration conf, InetSocketAddress address) throws IOException {
|
||||||
RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
|
RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
|
||||||
ProtobufRpcEngine.class);
|
ProtobufRpcEngine.class);
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto.Builder;
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto.Builder;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
|
||||||
|
@ -79,18 +80,8 @@ public class SCMSecurityProtocolClientSideTranslatorPB implements
|
||||||
@Override
|
@Override
|
||||||
public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails,
|
public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails,
|
||||||
String certSignReq) throws IOException {
|
String certSignReq) throws IOException {
|
||||||
SCMGetDataNodeCertRequestProto.Builder builder =
|
return getDataNodeCertificateChain(dataNodeDetails, certSignReq)
|
||||||
SCMGetDataNodeCertRequestProto
|
|
||||||
.newBuilder()
|
|
||||||
.setCSR(certSignReq)
|
|
||||||
.setDatanodeDetails(dataNodeDetails);
|
|
||||||
try {
|
|
||||||
return rpcProxy
|
|
||||||
.getDataNodeCertificate(NULL_RPC_CONTROLLER, builder.build())
|
|
||||||
.getX509Certificate();
|
.getX509Certificate();
|
||||||
} catch (ServiceException e) {
|
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -103,13 +94,25 @@ public class SCMSecurityProtocolClientSideTranslatorPB implements
|
||||||
@Override
|
@Override
|
||||||
public String getOMCertificate(OzoneManagerDetailsProto omDetails,
|
public String getOMCertificate(OzoneManagerDetailsProto omDetails,
|
||||||
String certSignReq) throws IOException {
|
String certSignReq) throws IOException {
|
||||||
|
return getOMCertChain(omDetails, certSignReq).getX509Certificate();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get SCM signed certificate for OM.
|
||||||
|
*
|
||||||
|
* @param omDetails - OzoneManager Details.
|
||||||
|
* @param certSignReq - Certificate signing request.
|
||||||
|
* @return byte[] - SCM signed certificate.
|
||||||
|
*/
|
||||||
|
public SCMGetCertResponseProto getOMCertChain(
|
||||||
|
OzoneManagerDetailsProto omDetails, String certSignReq)
|
||||||
|
throws IOException {
|
||||||
SCMGetOMCertRequestProto.Builder builder = SCMGetOMCertRequestProto
|
SCMGetOMCertRequestProto.Builder builder = SCMGetOMCertRequestProto
|
||||||
.newBuilder()
|
.newBuilder()
|
||||||
.setCSR(certSignReq)
|
.setCSR(certSignReq)
|
||||||
.setOmDetails(omDetails);
|
.setOmDetails(omDetails);
|
||||||
try {
|
try {
|
||||||
return rpcProxy.getOMCertificate(NULL_RPC_CONTROLLER, builder.build())
|
return rpcProxy.getOMCertificate(NULL_RPC_CONTROLLER, builder.build());
|
||||||
.getX509Certificate();
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
@ -135,6 +138,28 @@ public class SCMSecurityProtocolClientSideTranslatorPB implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get SCM signed certificate for Datanode.
|
||||||
|
*
|
||||||
|
* @param dnDetails - Datanode Details.
|
||||||
|
* @param certSignReq - Certificate signing request.
|
||||||
|
* @return byte[] - SCM signed certificate.
|
||||||
|
*/
|
||||||
|
public SCMGetCertResponseProto getDataNodeCertificateChain(
|
||||||
|
DatanodeDetailsProto dnDetails, String certSignReq)
|
||||||
|
throws IOException {
|
||||||
|
SCMGetDataNodeCertRequestProto.Builder builder =
|
||||||
|
SCMGetDataNodeCertRequestProto.newBuilder()
|
||||||
|
.setCSR(certSignReq)
|
||||||
|
.setDatanodeDetails(dnDetails);
|
||||||
|
try {
|
||||||
|
return rpcProxy.getDataNodeCertificate(NULL_RPC_CONTROLLER,
|
||||||
|
builder.build());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get CA certificate.
|
* Get CA certificate.
|
||||||
*
|
*
|
||||||
|
|
|
@ -61,7 +61,9 @@ public class SCMSecurityProtocolServerSideTranslatorPB implements
|
||||||
SCMGetCertResponseProto
|
SCMGetCertResponseProto
|
||||||
.newBuilder()
|
.newBuilder()
|
||||||
.setResponseCode(ResponseCode.success)
|
.setResponseCode(ResponseCode.success)
|
||||||
.setX509Certificate(certificate);
|
.setX509Certificate(certificate)
|
||||||
|
.setX509CACertificate(impl.getCACertificate());
|
||||||
|
|
||||||
return builder.build();
|
return builder.build();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
|
@ -87,7 +89,8 @@ public class SCMSecurityProtocolServerSideTranslatorPB implements
|
||||||
SCMGetCertResponseProto
|
SCMGetCertResponseProto
|
||||||
.newBuilder()
|
.newBuilder()
|
||||||
.setResponseCode(ResponseCode.success)
|
.setResponseCode(ResponseCode.success)
|
||||||
.setX509Certificate(certificate);
|
.setX509Certificate(certificate)
|
||||||
|
.setX509CACertificate(impl.getCACertificate());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
|
|
|
@ -260,7 +260,7 @@ public final class ScmConfigKeys {
|
||||||
public static final String OZONE_SCM_STALENODE_INTERVAL =
|
public static final String OZONE_SCM_STALENODE_INTERVAL =
|
||||||
"ozone.scm.stale.node.interval";
|
"ozone.scm.stale.node.interval";
|
||||||
public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
|
public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
|
||||||
"90s";
|
"5m";
|
||||||
|
|
||||||
public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
|
public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
|
||||||
"ozone.scm.heartbeat.rpc-timeout";
|
"ozone.scm.heartbeat.rpc-timeout";
|
||||||
|
@ -330,7 +330,7 @@ public final class ScmConfigKeys {
|
||||||
"ozone.scm.pipeline.destroy.timeout";
|
"ozone.scm.pipeline.destroy.timeout";
|
||||||
|
|
||||||
public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
|
public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
|
||||||
"300s";
|
"66s";
|
||||||
|
|
||||||
public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
|
public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
|
||||||
"ozone.scm.pipeline.creation.interval";
|
"ozone.scm.pipeline.creation.interval";
|
||||||
|
|
|
@ -141,6 +141,19 @@ public interface CertificateClient {
|
||||||
void storeCertificate(String pemEncodedCert, boolean force)
|
void storeCertificate(String pemEncodedCert, boolean force)
|
||||||
throws CertificateException;
|
throws CertificateException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stores the Certificate for this client. Don't use this api to add
|
||||||
|
* trusted certificates of others.
|
||||||
|
*
|
||||||
|
* @param pemEncodedCert - pem encoded X509 Certificate
|
||||||
|
* @param force - override any existing file
|
||||||
|
* @param caCert - Is CA certificate.
|
||||||
|
* @throws CertificateException - on Error.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void storeCertificate(String pemEncodedCert, boolean force, boolean caCert)
|
||||||
|
throws CertificateException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores the trusted chain of certificates.
|
* Stores the trusted chain of certificates.
|
||||||
*
|
*
|
||||||
|
|
|
@ -80,6 +80,7 @@ import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateExcepti
|
||||||
public abstract class DefaultCertificateClient implements CertificateClient {
|
public abstract class DefaultCertificateClient implements CertificateClient {
|
||||||
|
|
||||||
private static final String CERT_FILE_NAME_FORMAT = "%s.crt";
|
private static final String CERT_FILE_NAME_FORMAT = "%s.crt";
|
||||||
|
private static final String CA_CERT_PREFIX = "CA-";
|
||||||
private final Logger logger;
|
private final Logger logger;
|
||||||
private final SecurityConfig securityConfig;
|
private final SecurityConfig securityConfig;
|
||||||
private final KeyCodec keyCodec;
|
private final KeyCodec keyCodec;
|
||||||
|
@ -460,6 +461,22 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
||||||
@Override
|
@Override
|
||||||
public void storeCertificate(String pemEncodedCert, boolean force)
|
public void storeCertificate(String pemEncodedCert, boolean force)
|
||||||
throws CertificateException {
|
throws CertificateException {
|
||||||
|
this.storeCertificate(pemEncodedCert, force, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stores the Certificate for this client. Don't use this api to add trusted
|
||||||
|
* certificates of others.
|
||||||
|
*
|
||||||
|
* @param pemEncodedCert - pem encoded X509 Certificate
|
||||||
|
* @param force - override any existing file
|
||||||
|
* @param caCert - Is CA certificate.
|
||||||
|
* @throws CertificateException - on Error.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void storeCertificate(String pemEncodedCert, boolean force,
|
||||||
|
boolean caCert) throws CertificateException {
|
||||||
CertificateCodec certificateCodec = new CertificateCodec(securityConfig);
|
CertificateCodec certificateCodec = new CertificateCodec(securityConfig);
|
||||||
try {
|
try {
|
||||||
Path basePath = securityConfig.getCertificateLocation();
|
Path basePath = securityConfig.getCertificateLocation();
|
||||||
|
@ -469,6 +486,10 @@ public abstract class DefaultCertificateClient implements CertificateClient {
|
||||||
String certName = String.format(CERT_FILE_NAME_FORMAT,
|
String certName = String.format(CERT_FILE_NAME_FORMAT,
|
||||||
cert.getSerialNumber().toString());
|
cert.getSerialNumber().toString());
|
||||||
|
|
||||||
|
if(caCert) {
|
||||||
|
certName = CA_CERT_PREFIX + certName;
|
||||||
|
}
|
||||||
|
|
||||||
certificateCodec.writeCertificate(basePath, certName,
|
certificateCodec.writeCertificate(basePath, certName,
|
||||||
pemEncodedCert, force);
|
pemEncodedCert, force);
|
||||||
certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert);
|
certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert);
|
||||||
|
|
|
@ -56,7 +56,7 @@ public final class OzoneConfigKeys {
|
||||||
|
|
||||||
public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
|
public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
|
||||||
"dfs.container.chunk.write.sync";
|
"dfs.container.chunk.write.sync";
|
||||||
public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = true;
|
public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
|
||||||
/**
|
/**
|
||||||
* Ratis Port where containers listen to.
|
* Ratis Port where containers listen to.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -52,12 +52,11 @@ public final class OzoneConsts {
|
||||||
public static final String OZONE_ACL_USER_TYPE = "user";
|
public static final String OZONE_ACL_USER_TYPE = "user";
|
||||||
public static final String OZONE_ACL_GROUP_TYPE = "group";
|
public static final String OZONE_ACL_GROUP_TYPE = "group";
|
||||||
public static final String OZONE_ACL_WORLD_TYPE = "world";
|
public static final String OZONE_ACL_WORLD_TYPE = "world";
|
||||||
|
public static final String OZONE_ACL_ANONYMOUS_TYPE = "anonymous";
|
||||||
public static final String OZONE_ACL_IP_TYPE = "ip";
|
public static final String OZONE_ACL_IP_TYPE = "ip";
|
||||||
|
|
||||||
public static final String OZONE_ACL_READ = "r";
|
public static final String OZONE_ACL_READ = "r";
|
||||||
public static final String OZONE_ACL_WRITE = "w";
|
public static final String OZONE_ACL_WRITE = "w";
|
||||||
public static final String OZONE_ACL_READ_WRITE = "rw";
|
|
||||||
public static final String OZONE_ACL_WRITE_READ = "wr";
|
|
||||||
public static final String OZONE_ACL_DELETE = "d";
|
public static final String OZONE_ACL_DELETE = "d";
|
||||||
public static final String OZONE_ACL_LIST = "l";
|
public static final String OZONE_ACL_LIST = "l";
|
||||||
public static final String OZONE_ACL_ALL = "a";
|
public static final String OZONE_ACL_ALL = "a";
|
||||||
|
|
|
@ -44,6 +44,7 @@ public interface DBStore extends AutoCloseable {
|
||||||
*/
|
*/
|
||||||
Table<byte[], byte[]> getTable(String name) throws IOException;
|
Table<byte[], byte[]> getTable(String name) throws IOException;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets an existing TableStore with implicit key/value conversion.
|
* Gets an existing TableStore with implicit key/value conversion.
|
||||||
*
|
*
|
||||||
|
|
|
@ -298,4 +298,8 @@ public class RDBStore implements DBStore {
|
||||||
public File getDbLocation() {
|
public File getDbLocation() {
|
||||||
return dbLocation;
|
return dbLocation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public CodecRegistry getCodecRegistry() {
|
||||||
|
return codecRegistry;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.utils.db;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
|
||||||
import org.rocksdb.ColumnFamilyHandle;
|
import org.rocksdb.ColumnFamilyHandle;
|
||||||
|
@ -33,9 +34,12 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* RocksDB implementation of ozone metadata store.
|
* RocksDB implementation of ozone metadata store. This class should be only
|
||||||
|
* used as part of TypedTable as it's underlying implementation to access the
|
||||||
|
* metadata store content. All other user's using Table should use TypedTable.
|
||||||
*/
|
*/
|
||||||
public class RDBTable implements Table<byte[], byte[]> {
|
@InterfaceAudience.Private
|
||||||
|
class RDBTable implements Table<byte[], byte[]> {
|
||||||
|
|
||||||
|
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
|
@ -52,7 +56,7 @@ public class RDBTable implements Table<byte[], byte[]> {
|
||||||
* @param handle - ColumnFamily Handle.
|
* @param handle - ColumnFamily Handle.
|
||||||
* @param writeOptions - RocksDB write Options.
|
* @param writeOptions - RocksDB write Options.
|
||||||
*/
|
*/
|
||||||
public RDBTable(RocksDB db, ColumnFamilyHandle handle,
|
RDBTable(RocksDB db, ColumnFamilyHandle handle,
|
||||||
WriteOptions writeOptions) {
|
WriteOptions writeOptions) {
|
||||||
this.db = db;
|
this.db = db;
|
||||||
this.handle = handle;
|
this.handle = handle;
|
||||||
|
|
|
@ -21,8 +21,10 @@ package org.apache.hadoop.utils.db;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.NotImplementedException;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||||
/**
|
/**
|
||||||
* Interface for key-value store that stores ozone metadata. Ozone metadata is
|
* Interface for key-value store that stores ozone metadata. Ozone metadata is
|
||||||
* stored as key value pairs, both key and value are arbitrary byte arrays. Each
|
* stored as key value pairs, both key and value are arbitrary byte arrays. Each
|
||||||
|
@ -97,6 +99,28 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
|
||||||
*/
|
*/
|
||||||
String getName() throws IOException;
|
String getName() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add entry to the table cache.
|
||||||
|
*
|
||||||
|
* If the cacheKey already exists, it will override the entry.
|
||||||
|
* @param cacheKey
|
||||||
|
* @param cacheValue
|
||||||
|
*/
|
||||||
|
default void addCacheEntry(CacheKey<KEY> cacheKey,
|
||||||
|
CacheValue<VALUE> cacheValue) {
|
||||||
|
throw new NotImplementedException("addCacheEntry is not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes all the entries from the table cache which are having epoch value
|
||||||
|
* less
|
||||||
|
* than or equal to specified epoch value.
|
||||||
|
* @param epoch
|
||||||
|
*/
|
||||||
|
default void cleanupCache(long epoch) {
|
||||||
|
throw new NotImplementedException("cleanupCache is not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class used to represent the key and value pair of a db entry.
|
* Class used to represent the key and value pair of a db entry.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -20,6 +20,12 @@ package org.apache.hadoop.utils.db;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||||
|
import org.apache.hadoop.utils.db.cache.PartialTableCache;
|
||||||
|
import org.apache.hadoop.utils.db.cache.TableCache;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Strongly typed table implementation.
|
* Strongly typed table implementation.
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -31,13 +37,16 @@ import java.io.IOException;
|
||||||
*/
|
*/
|
||||||
public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
|
public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
|
||||||
|
|
||||||
private Table<byte[], byte[]> rawTable;
|
private final Table<byte[], byte[]> rawTable;
|
||||||
|
|
||||||
private CodecRegistry codecRegistry;
|
private final CodecRegistry codecRegistry;
|
||||||
|
|
||||||
private Class<KEY> keyType;
|
private final Class<KEY> keyType;
|
||||||
|
|
||||||
|
private final Class<VALUE> valueType;
|
||||||
|
|
||||||
|
private final TableCache<CacheKey<KEY>, CacheValue<VALUE>> cache;
|
||||||
|
|
||||||
private Class<VALUE> valueType;
|
|
||||||
|
|
||||||
public TypedTable(
|
public TypedTable(
|
||||||
Table<byte[], byte[]> rawTable,
|
Table<byte[], byte[]> rawTable,
|
||||||
|
@ -47,6 +56,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
|
||||||
this.codecRegistry = codecRegistry;
|
this.codecRegistry = codecRegistry;
|
||||||
this.keyType = keyType;
|
this.keyType = keyType;
|
||||||
this.valueType = valueType;
|
this.valueType = valueType;
|
||||||
|
cache = new PartialTableCache<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -69,8 +79,34 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
|
||||||
return rawTable.isEmpty();
|
return rawTable.isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the value mapped to the given key in byte array or returns null
|
||||||
|
* if the key is not found.
|
||||||
|
*
|
||||||
|
* Caller's of this method should use synchronization mechanism, when
|
||||||
|
* accessing. First it will check from cache, if it has entry return the
|
||||||
|
* value, otherwise get from the RocksDB table.
|
||||||
|
*
|
||||||
|
* @param key metadata key
|
||||||
|
* @return VALUE
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public VALUE get(KEY key) throws IOException {
|
public VALUE get(KEY key) throws IOException {
|
||||||
|
// Here the metadata lock will guarantee that cache is not updated for same
|
||||||
|
// key during get key.
|
||||||
|
CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key));
|
||||||
|
if (cacheValue == null) {
|
||||||
|
// If no cache for the table or if it does not exist in cache get from
|
||||||
|
// RocksDB table.
|
||||||
|
return getFromTable(key);
|
||||||
|
} else {
|
||||||
|
// We have a value in cache, return the value.
|
||||||
|
return cacheValue.getValue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private VALUE getFromTable(KEY key) throws IOException {
|
||||||
byte[] keyBytes = codecRegistry.asRawData(key);
|
byte[] keyBytes = codecRegistry.asRawData(key);
|
||||||
byte[] valueBytes = rawTable.get(keyBytes);
|
byte[] valueBytes = rawTable.get(keyBytes);
|
||||||
return codecRegistry.asObject(valueBytes, valueType);
|
return codecRegistry.asObject(valueBytes, valueType);
|
||||||
|
@ -106,6 +142,40 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addCacheEntry(CacheKey<KEY> cacheKey,
|
||||||
|
CacheValue<VALUE> cacheValue) {
|
||||||
|
// This will override the entry if there is already entry for this key.
|
||||||
|
cache.put(cacheKey, cacheValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void cleanupCache(long epoch) {
|
||||||
|
cache.cleanup(epoch);
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
TableCache<CacheKey<KEY>, CacheValue<VALUE>> getCache() {
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Table<byte[], byte[]> getRawTable() {
|
||||||
|
return rawTable;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CodecRegistry getCodecRegistry() {
|
||||||
|
return codecRegistry;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Class<KEY> getKeyType() {
|
||||||
|
return keyType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Class<VALUE> getValueType() {
|
||||||
|
return valueType;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Key value implementation for strongly typed tables.
|
* Key value implementation for strongly typed tables.
|
||||||
*/
|
*/
|
||||||
|
|
56
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java
vendored
Normal file
56
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CacheKey for the RocksDB table.
|
||||||
|
* @param <KEY>
|
||||||
|
*/
|
||||||
|
public class CacheKey<KEY> {
|
||||||
|
|
||||||
|
private final KEY key;
|
||||||
|
|
||||||
|
public CacheKey(KEY key) {
|
||||||
|
Objects.requireNonNull(key, "Key Should not be null in CacheKey");
|
||||||
|
this.key = key;
|
||||||
|
}
|
||||||
|
|
||||||
|
public KEY getKey() {
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
CacheKey<?> cacheKey = (CacheKey<?>) o;
|
||||||
|
return Objects.equals(key, cacheKey.key);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(key);
|
||||||
|
}
|
||||||
|
}
|
47
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java
vendored
Normal file
47
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import com.google.common.base.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CacheValue for the RocksDB Table.
|
||||||
|
* @param <VALUE>
|
||||||
|
*/
|
||||||
|
public class CacheValue<VALUE> {
|
||||||
|
|
||||||
|
private Optional<VALUE> value;
|
||||||
|
// This value is used for evict entries from cache.
|
||||||
|
// This value is set with ratis transaction context log entry index.
|
||||||
|
private long epoch;
|
||||||
|
|
||||||
|
public CacheValue(Optional<VALUE> value, long epoch) {
|
||||||
|
this.value = value;
|
||||||
|
this.epoch = epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
public VALUE getValue() {
|
||||||
|
return value.orNull();
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getEpoch() {
|
||||||
|
return epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
74
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java
vendored
Normal file
74
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class used which describes epoch entry. This will be used during deletion
|
||||||
|
* entries from cache for partial table cache.
|
||||||
|
* @param <CACHEKEY>
|
||||||
|
*/
|
||||||
|
public class EpochEntry<CACHEKEY> implements Comparable<CACHEKEY> {
|
||||||
|
|
||||||
|
private long epoch;
|
||||||
|
private CACHEKEY cachekey;
|
||||||
|
|
||||||
|
EpochEntry(long epoch, CACHEKEY cachekey) {
|
||||||
|
this.epoch = epoch;
|
||||||
|
this.cachekey = cachekey;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getEpoch() {
|
||||||
|
return epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CACHEKEY getCachekey() {
|
||||||
|
return cachekey;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
EpochEntry<?> that = (EpochEntry<?>) o;
|
||||||
|
return epoch == that.epoch && cachekey == that.cachekey;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(epoch, cachekey);
|
||||||
|
}
|
||||||
|
|
||||||
|
public int compareTo(Object o) {
|
||||||
|
if(this.epoch == ((EpochEntry<?>)o).epoch) {
|
||||||
|
return 0;
|
||||||
|
} else if (this.epoch < ((EpochEntry<?>)o).epoch) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
97
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java
vendored
Normal file
97
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ThreadFactory;
|
||||||
|
|
||||||
|
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache implementation for the table, this cache is partial cache, this will
|
||||||
|
* be cleaned up, after entries are flushed to DB.
|
||||||
|
*/
|
||||||
|
@Private
|
||||||
|
@Evolving
|
||||||
|
public class PartialTableCache<CACHEKEY extends CacheKey,
|
||||||
|
CACHEVALUE extends CacheValue> implements TableCache<CACHEKEY, CACHEVALUE> {
|
||||||
|
|
||||||
|
private final ConcurrentHashMap<CACHEKEY, CACHEVALUE> cache;
|
||||||
|
private final TreeSet<EpochEntry<CACHEKEY>> epochEntries;
|
||||||
|
private ExecutorService executorService;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
public PartialTableCache() {
|
||||||
|
cache = new ConcurrentHashMap<>();
|
||||||
|
epochEntries = new TreeSet<>();
|
||||||
|
// Created a singleThreadExecutor, so one cleanup will be running at a
|
||||||
|
// time.
|
||||||
|
ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
|
||||||
|
.setNameFormat("PartialTableCache Cleanup Thread - %d").build();
|
||||||
|
executorService = Executors.newSingleThreadExecutor(build);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CACHEVALUE get(CACHEKEY cachekey) {
|
||||||
|
return cache.get(cachekey);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void put(CACHEKEY cacheKey, CACHEVALUE value) {
|
||||||
|
cache.put(cacheKey, value);
|
||||||
|
epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void cleanup(long epoch) {
|
||||||
|
executorService.submit(() -> evictCache(epoch));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return cache.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void evictCache(long epoch) {
|
||||||
|
EpochEntry<CACHEKEY> currentEntry = null;
|
||||||
|
for (Iterator<EpochEntry<CACHEKEY>> iterator = epochEntries.iterator();
|
||||||
|
iterator.hasNext();) {
|
||||||
|
currentEntry = iterator.next();
|
||||||
|
CACHEKEY cachekey = currentEntry.getCachekey();
|
||||||
|
CacheValue cacheValue = cache.get(cachekey);
|
||||||
|
if (cacheValue.getEpoch() <= epoch) {
|
||||||
|
cache.remove(cachekey);
|
||||||
|
iterator.remove();
|
||||||
|
} else {
|
||||||
|
// If currentEntry epoch is greater than epoch, we have deleted all
|
||||||
|
// entries less than specified epoch. So, we can break.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
63
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java
vendored
Normal file
63
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache used for RocksDB tables.
|
||||||
|
* @param <CACHEKEY>
|
||||||
|
* @param <CACHEVALUE>
|
||||||
|
*/
|
||||||
|
|
||||||
|
@Private
|
||||||
|
@Evolving
|
||||||
|
public interface TableCache<CACHEKEY extends CacheKey,
|
||||||
|
CACHEVALUE extends CacheValue> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the value for the key if it is present, otherwise return null.
|
||||||
|
* @param cacheKey
|
||||||
|
* @return CACHEVALUE
|
||||||
|
*/
|
||||||
|
CACHEVALUE get(CACHEKEY cacheKey);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add an entry to the cache, if the key already exists it overrides.
|
||||||
|
* @param cacheKey
|
||||||
|
* @param value
|
||||||
|
*/
|
||||||
|
void put(CACHEKEY cacheKey, CACHEVALUE value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes all the entries from the cache which are having epoch value less
|
||||||
|
* than or equal to specified epoch value. For FullTable Cache this is a
|
||||||
|
* do-nothing operation.
|
||||||
|
* @param epoch
|
||||||
|
*/
|
||||||
|
void cleanup(long epoch);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the size of the cache.
|
||||||
|
* @return size
|
||||||
|
*/
|
||||||
|
int size();
|
||||||
|
}
|
18
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java
vendored
Normal file
18
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
|
@ -76,6 +76,7 @@ message SCMGetCertResponseProto {
|
||||||
}
|
}
|
||||||
required ResponseCode responseCode = 1;
|
required ResponseCode responseCode = 1;
|
||||||
required string x509Certificate = 2; // Base64 encoded X509 certificate.
|
required string x509Certificate = 2; // Base64 encoded X509 certificate.
|
||||||
|
optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.container.chunk.write.sync</name>
|
<name>dfs.container.chunk.write.sync</name>
|
||||||
<value>true</value>
|
<value>false</value>
|
||||||
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
|
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
|
||||||
<description>Determines whether the chunk writes in the container happen as
|
<description>Determines whether the chunk writes in the container happen as
|
||||||
sync I/0 or buffered I/O operation.
|
sync I/0 or buffered I/O operation.
|
||||||
|
@ -540,14 +540,6 @@
|
||||||
the address of the OM.
|
the address of the OM.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
|
||||||
<name>ozone.om.group.rights</name>
|
|
||||||
<value>READ_WRITE</value>
|
|
||||||
<tag>OM, SECURITY</tag>
|
|
||||||
<description>
|
|
||||||
Default group permissions in Ozone OM.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>ozone.om.handler.count.key</name>
|
<name>ozone.om.handler.count.key</name>
|
||||||
<value>20</value>
|
<value>20</value>
|
||||||
|
@ -640,14 +632,6 @@
|
||||||
of buckets or keys inside each bucket a user can create.
|
of buckets or keys inside each bucket a user can create.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
|
||||||
<name>ozone.om.user.rights</name>
|
|
||||||
<value>READ_WRITE</value>
|
|
||||||
<tag>OM, SECURITY</tag>
|
|
||||||
<description>
|
|
||||||
Default user permissions used in OM.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>ozone.om.db.dirs</name>
|
<name>ozone.om.db.dirs</name>
|
||||||
<value/>
|
<value/>
|
||||||
|
@ -1052,7 +1036,7 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>ozone.scm.stale.node.interval</name>
|
<name>ozone.scm.stale.node.interval</name>
|
||||||
<value>90s</value>
|
<value>5m</value>
|
||||||
<tag>OZONE, MANAGEMENT</tag>
|
<tag>OZONE, MANAGEMENT</tag>
|
||||||
<description>
|
<description>
|
||||||
The interval for stale node flagging. Please
|
The interval for stale node flagging. Please
|
||||||
|
@ -1291,7 +1275,7 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>ozone.scm.pipeline.destroy.timeout</name>
|
<name>ozone.scm.pipeline.destroy.timeout</name>
|
||||||
<value>300s</value>
|
<value>66s</value>
|
||||||
<tag>OZONE, SCM, PIPELINE</tag>
|
<tag>OZONE, SCM, PIPELINE</tag>
|
||||||
<description>
|
<description>
|
||||||
Once a pipeline is closed, SCM should wait for the above configured time
|
Once a pipeline is closed, SCM should wait for the above configured time
|
||||||
|
|
|
@ -26,10 +26,14 @@ import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import com.google.common.base.Optional;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.utils.db.Table.KeyValue;
|
import org.apache.hadoop.utils.db.Table.KeyValue;
|
||||||
|
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||||
|
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -51,7 +55,7 @@ public class TestTypedRDBTableStore {
|
||||||
Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
|
Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
|
||||||
"First", "Second", "Third",
|
"First", "Second", "Third",
|
||||||
"Fourth", "Fifth",
|
"Fourth", "Fifth",
|
||||||
"Sixth");
|
"Sixth", "Seven");
|
||||||
@Rule
|
@Rule
|
||||||
public TemporaryFolder folder = new TemporaryFolder();
|
public TemporaryFolder folder = new TemporaryFolder();
|
||||||
private RDBStore rdbStore = null;
|
private RDBStore rdbStore = null;
|
||||||
|
@ -236,4 +240,80 @@ public class TestTypedRDBTableStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTypedTableWithCache() throws Exception {
|
||||||
|
int iterCount = 10;
|
||||||
|
try (Table<String, String> testTable = createTypedTable(
|
||||||
|
"Seven")) {
|
||||||
|
|
||||||
|
for (int x = 0; x < iterCount; x++) {
|
||||||
|
String key = Integer.toString(x);
|
||||||
|
String value = Integer.toString(x);
|
||||||
|
testTable.addCacheEntry(new CacheKey<>(key),
|
||||||
|
new CacheValue<>(Optional.of(value),
|
||||||
|
x));
|
||||||
|
}
|
||||||
|
|
||||||
|
// As we have added to cache, so get should return value even if it
|
||||||
|
// does not exist in DB.
|
||||||
|
for (int x = 0; x < iterCount; x++) {
|
||||||
|
Assert.assertEquals(Integer.toString(1),
|
||||||
|
testTable.get(Integer.toString(1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTypedTableWithCacheWithFewDeletedOperationType()
|
||||||
|
throws Exception {
|
||||||
|
int iterCount = 10;
|
||||||
|
try (Table<String, String> testTable = createTypedTable(
|
||||||
|
"Seven")) {
|
||||||
|
|
||||||
|
for (int x = 0; x < iterCount; x++) {
|
||||||
|
String key = Integer.toString(x);
|
||||||
|
String value = Integer.toString(x);
|
||||||
|
if (x % 2 == 0) {
|
||||||
|
testTable.addCacheEntry(new CacheKey<>(key),
|
||||||
|
new CacheValue<>(Optional.of(value), x));
|
||||||
|
} else {
|
||||||
|
testTable.addCacheEntry(new CacheKey<>(key),
|
||||||
|
new CacheValue<>(Optional.absent(),
|
||||||
|
x));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As we have added to cache, so get should return value even if it
|
||||||
|
// does not exist in DB.
|
||||||
|
for (int x = 0; x < iterCount; x++) {
|
||||||
|
if (x % 2 == 0) {
|
||||||
|
Assert.assertEquals(Integer.toString(x),
|
||||||
|
testTable.get(Integer.toString(x)));
|
||||||
|
} else {
|
||||||
|
Assert.assertNull(testTable.get(Integer.toString(x)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testTable.cleanupCache(5);
|
||||||
|
|
||||||
|
GenericTestUtils.waitFor(() ->
|
||||||
|
((TypedTable<String, String>) testTable).getCache().size() == 4,
|
||||||
|
100, 5000);
|
||||||
|
|
||||||
|
|
||||||
|
//Check remaining values
|
||||||
|
for (int x = 6; x < iterCount; x++) {
|
||||||
|
if (x % 2 == 0) {
|
||||||
|
Assert.assertEquals(Integer.toString(x),
|
||||||
|
testTable.get(Integer.toString(x)));
|
||||||
|
} else {
|
||||||
|
Assert.assertNull(testTable.get(Integer.toString(x)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
142
hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java
vendored
Normal file
142
hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java
vendored
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
||||||
|
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
|
import com.google.common.base.Optional;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class tests partial table cache.
|
||||||
|
*/
|
||||||
|
public class TestPartialTableCache {
|
||||||
|
private TableCache<CacheKey<String>, CacheValue<String>> tableCache;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void create() {
|
||||||
|
tableCache = new PartialTableCache<>();
|
||||||
|
}
|
||||||
|
@Test
|
||||||
|
public void testPartialTableCache() {
|
||||||
|
|
||||||
|
|
||||||
|
for (int i = 0; i< 10; i++) {
|
||||||
|
tableCache.put(new CacheKey<>(Integer.toString(i)),
|
||||||
|
new CacheValue<>(Optional.of(Integer.toString(i)), i));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for (int i=0; i < 10; i++) {
|
||||||
|
Assert.assertEquals(Integer.toString(i),
|
||||||
|
tableCache.get(new CacheKey<>(Integer.toString(i))).getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
// On a full table cache if some one calls cleanup it is a no-op.
|
||||||
|
tableCache.cleanup(4);
|
||||||
|
|
||||||
|
for (int i=5; i < 10; i++) {
|
||||||
|
Assert.assertEquals(Integer.toString(i),
|
||||||
|
tableCache.get(new CacheKey<>(Integer.toString(i))).getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPartialTableCacheParallel() throws Exception {
|
||||||
|
|
||||||
|
int totalCount = 0;
|
||||||
|
CompletableFuture<Integer> future =
|
||||||
|
CompletableFuture.supplyAsync(() -> {
|
||||||
|
try {
|
||||||
|
return writeToCache(10, 1, 0);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
fail("writeToCache got interrupt exception");
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
int value = future.get();
|
||||||
|
Assert.assertEquals(10, value);
|
||||||
|
|
||||||
|
totalCount += value;
|
||||||
|
|
||||||
|
future =
|
||||||
|
CompletableFuture.supplyAsync(() -> {
|
||||||
|
try {
|
||||||
|
return writeToCache(10, 11, 100);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
fail("writeToCache got interrupt exception");
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check we have first 10 entries in cache.
|
||||||
|
for (int i=1; i <= 10; i++) {
|
||||||
|
Assert.assertEquals(Integer.toString(i),
|
||||||
|
tableCache.get(new CacheKey<>(Integer.toString(i))).getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
int deleted = 5;
|
||||||
|
// cleanup first 5 entires
|
||||||
|
tableCache.cleanup(deleted);
|
||||||
|
|
||||||
|
value = future.get();
|
||||||
|
Assert.assertEquals(10, value);
|
||||||
|
|
||||||
|
totalCount += value;
|
||||||
|
|
||||||
|
// We should totalCount - deleted entries in cache.
|
||||||
|
final int tc = totalCount;
|
||||||
|
GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100,
|
||||||
|
5000);
|
||||||
|
|
||||||
|
// Check if we have remaining entries.
|
||||||
|
for (int i=6; i <= totalCount; i++) {
|
||||||
|
Assert.assertEquals(Integer.toString(i),
|
||||||
|
tableCache.get(new CacheKey<>(Integer.toString(i))).getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
tableCache.cleanup(10);
|
||||||
|
|
||||||
|
tableCache.cleanup(totalCount);
|
||||||
|
|
||||||
|
// Cleaned up all entries, so cache size should be zero.
|
||||||
|
GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100,
|
||||||
|
5000);
|
||||||
|
}
|
||||||
|
|
||||||
|
private int writeToCache(int count, int startVal, long sleep)
|
||||||
|
throws InterruptedException {
|
||||||
|
int counter = 1;
|
||||||
|
while (counter <= count){
|
||||||
|
tableCache.put(new CacheKey<>(Integer.toString(startVal)),
|
||||||
|
new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal));
|
||||||
|
startVal++;
|
||||||
|
counter++;
|
||||||
|
Thread.sleep(sleep);
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
}
|
22
hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java
vendored
Normal file
22
hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Tests for the DB Cache Utilities.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.utils.db.cache;
|
|
@ -253,25 +253,15 @@ public final class HddsServerUtil {
|
||||||
//
|
//
|
||||||
// Here we check that staleNodeInterval is at least five times more than the
|
// Here we check that staleNodeInterval is at least five times more than the
|
||||||
// frequency at which the accounting thread is going to run.
|
// frequency at which the accounting thread is going to run.
|
||||||
try {
|
staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
|
||||||
sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
|
staleNodeIntervalMs, OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
|
||||||
5, 1000);
|
heartbeatThreadFrequencyMs, 5, 1000);
|
||||||
} catch (IllegalArgumentException ex) {
|
|
||||||
LOG.error("Stale Node Interval is cannot be honored due to " +
|
|
||||||
"mis-configured {}. ex: {}",
|
|
||||||
OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
|
|
||||||
throw ex;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that stale node value is greater than configured value that
|
// Make sure that stale node value is greater than configured value that
|
||||||
// datanodes are going to send HBs.
|
// datanodes are going to send HBs.
|
||||||
try {
|
staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
|
||||||
sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
|
staleNodeIntervalMs, HDDS_HEARTBEAT_INTERVAL, heartbeatIntervalMs, 3,
|
||||||
} catch (IllegalArgumentException ex) {
|
1000);
|
||||||
LOG.error("Stale Node Interval MS is cannot be honored due to " +
|
|
||||||
"mis-configured {}. ex: {}", HDDS_HEARTBEAT_INTERVAL, ex);
|
|
||||||
throw ex;
|
|
||||||
}
|
|
||||||
return staleNodeIntervalMs;
|
return staleNodeIntervalMs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,16 +280,10 @@ public final class HddsServerUtil {
|
||||||
OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
|
OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
|
||||||
TimeUnit.MILLISECONDS);
|
TimeUnit.MILLISECONDS);
|
||||||
|
|
||||||
try {
|
|
||||||
// Make sure that dead nodes Ms is at least twice the time for staleNodes
|
// Make sure that dead nodes Ms is at least twice the time for staleNodes
|
||||||
// with a max of 1000 times the staleNodes.
|
// with a max of 1000 times the staleNodes.
|
||||||
sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
|
return sanitizeUserArgs(OZONE_SCM_DEADNODE_INTERVAL, deadNodeIntervalMs,
|
||||||
} catch (IllegalArgumentException ex) {
|
OZONE_SCM_STALENODE_INTERVAL, staleNodeIntervalMs, 2, 1000);
|
||||||
LOG.error("Dead Node Interval MS is cannot be honored due to " +
|
|
||||||
"mis-configured {}. ex: {}", OZONE_SCM_STALENODE_INTERVAL, ex);
|
|
||||||
throw ex;
|
|
||||||
}
|
|
||||||
return deadNodeIntervalMs;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -26,7 +26,8 @@ import org.apache.hadoop.hdds.cli.GenericCli;
|
||||||
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
|
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
|
||||||
|
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdds.scm.HddsServerUtil;
|
import org.apache.hadoop.hdds.scm.HddsServerUtil;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
||||||
|
@ -271,16 +272,25 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
|
||||||
try {
|
try {
|
||||||
PKCS10CertificationRequest csr = getCSR(config);
|
PKCS10CertificationRequest csr = getCSR(config);
|
||||||
// TODO: For SCM CA we should fetch certificate from multiple SCMs.
|
// TODO: For SCM CA we should fetch certificate from multiple SCMs.
|
||||||
SCMSecurityProtocol secureScmClient =
|
SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
|
||||||
HddsUtils.getScmSecurityClient(config,
|
HddsUtils.getScmSecurityClient(config,
|
||||||
HddsUtils.getScmAddressForSecurityProtocol(config));
|
HddsUtils.getScmAddressForSecurityProtocol(config));
|
||||||
|
SCMGetCertResponseProto response = secureScmClient.
|
||||||
String pemEncodedCert = secureScmClient.getDataNodeCertificate(
|
getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(),
|
||||||
datanodeDetails.getProtoBufMessage(), getEncodedString(csr));
|
getEncodedString(csr));
|
||||||
|
// Persist certificates.
|
||||||
|
if(response.hasX509CACertificate()) {
|
||||||
|
String pemEncodedCert = response.getX509Certificate();
|
||||||
dnCertClient.storeCertificate(pemEncodedCert, true);
|
dnCertClient.storeCertificate(pemEncodedCert, true);
|
||||||
|
dnCertClient.storeCertificate(response.getX509CACertificate(), true,
|
||||||
|
true);
|
||||||
datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert).
|
datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert).
|
||||||
getSerialNumber().toString());
|
getSerialNumber().toString());
|
||||||
persistDatanodeDetails(datanodeDetails);
|
persistDatanodeDetails(datanodeDetails);
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException("Unable to retrieve datanode certificate " +
|
||||||
|
"chain");
|
||||||
|
}
|
||||||
} catch (IOException | CertificateException e) {
|
} catch (IOException | CertificateException e) {
|
||||||
LOG.error("Error while storing SCM signed certificate.", e);
|
LOG.error("Error while storing SCM signed certificate.", e);
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
|
||||||
import static org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
|
import static org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
|
@ -35,6 +36,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
|
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
|
||||||
|
@ -51,6 +53,9 @@ import com.google.common.base.Preconditions;
|
||||||
*/
|
*/
|
||||||
public final class ContainerUtils {
|
public final class ContainerUtils {
|
||||||
|
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(ContainerUtils.class);
|
||||||
|
|
||||||
private ContainerUtils() {
|
private ContainerUtils() {
|
||||||
//never constructed.
|
//never constructed.
|
||||||
}
|
}
|
||||||
|
@ -198,7 +203,7 @@ public final class ContainerUtils {
|
||||||
throw new IOException("Unable to overwrite the datanode ID file.");
|
throw new IOException("Unable to overwrite the datanode ID file.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if(!path.getParentFile().exists() &&
|
if (!path.getParentFile().exists() &&
|
||||||
!path.getParentFile().mkdirs()) {
|
!path.getParentFile().mkdirs()) {
|
||||||
throw new IOException("Unable to create datanode ID directories.");
|
throw new IOException("Unable to create datanode ID directories.");
|
||||||
}
|
}
|
||||||
|
@ -221,8 +226,16 @@ public final class ContainerUtils {
|
||||||
try {
|
try {
|
||||||
return DatanodeIdYaml.readDatanodeIdFile(path);
|
return DatanodeIdYaml.readDatanodeIdFile(path);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Error loading DatanodeDetails yaml from " +
|
||||||
|
path.getAbsolutePath(), e);
|
||||||
|
// Try to load as protobuf before giving up
|
||||||
|
try (FileInputStream in = new FileInputStream(path)) {
|
||||||
|
return DatanodeDetails.getFromProtoBuf(
|
||||||
|
HddsProtos.DatanodeDetailsProto.parseFrom(in));
|
||||||
|
} catch (IOException io) {
|
||||||
throw new IOException("Failed to parse DatanodeDetails from "
|
throw new IOException("Failed to parse DatanodeDetails from "
|
||||||
+ path.getAbsolutePath(), e);
|
+ path.getAbsolutePath(), io);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.utils.BatchOperation;
|
import org.apache.hadoop.utils.BatchOperation;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -198,18 +198,19 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
int newDeletionBlocks = 0;
|
int newDeletionBlocks = 0;
|
||||||
MetadataStore containerDB = BlockUtils.getDB(containerData, conf);
|
try(ReferenceCountedDB containerDB =
|
||||||
|
BlockUtils.getDB(containerData, conf)) {
|
||||||
for (Long blk : delTX.getLocalIDList()) {
|
for (Long blk : delTX.getLocalIDList()) {
|
||||||
BatchOperation batch = new BatchOperation();
|
BatchOperation batch = new BatchOperation();
|
||||||
byte[] blkBytes = Longs.toByteArray(blk);
|
byte[] blkBytes = Longs.toByteArray(blk);
|
||||||
byte[] blkInfo = containerDB.get(blkBytes);
|
byte[] blkInfo = containerDB.getStore().get(blkBytes);
|
||||||
if (blkInfo != null) {
|
if (blkInfo != null) {
|
||||||
byte[] deletingKeyBytes =
|
byte[] deletingKeyBytes =
|
||||||
DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
|
DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
|
||||||
byte[] deletedKeyBytes =
|
byte[] deletedKeyBytes =
|
||||||
DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
|
DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
|
||||||
if (containerDB.get(deletingKeyBytes) != null
|
if (containerDB.getStore().get(deletingKeyBytes) != null
|
||||||
|| containerDB.get(deletedKeyBytes) != null) {
|
|| containerDB.getStore().get(deletedKeyBytes) != null) {
|
||||||
LOG.debug(String.format(
|
LOG.debug(String.format(
|
||||||
"Ignoring delete for block %d in container %d."
|
"Ignoring delete for block %d in container %d."
|
||||||
+ " Entry already added.", blk, containerId));
|
+ " Entry already added.", blk, containerId));
|
||||||
|
@ -220,7 +221,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
|
||||||
batch.put(deletingKeyBytes, blkInfo);
|
batch.put(deletingKeyBytes, blkInfo);
|
||||||
batch.delete(blkBytes);
|
batch.delete(blkBytes);
|
||||||
try {
|
try {
|
||||||
containerDB.writeBatch(batch);
|
containerDB.getStore().writeBatch(batch);
|
||||||
newDeletionBlocks++;
|
newDeletionBlocks++;
|
||||||
LOG.debug("Transited Block {} to DELETING state in container {}",
|
LOG.debug("Transited Block {} to DELETING state in container {}",
|
||||||
blk, containerId);
|
blk, containerId);
|
||||||
|
@ -237,7 +238,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
containerDB
|
containerDB.getStore()
|
||||||
.put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX),
|
.put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX),
|
||||||
Longs.toByteArray(delTX.getTxID()));
|
Longs.toByteArray(delTX.getTxID()));
|
||||||
containerData
|
containerData
|
||||||
|
@ -245,6 +246,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
|
||||||
// update pending deletion blocks count in in-memory container status
|
// update pending deletion blocks count in in-memory container status
|
||||||
containerData.incrPendingDeletionBlocks(newDeletionBlocks);
|
containerData.incrPendingDeletionBlocks(newDeletionBlocks);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SCMCommandProto.Type getCommandType() {
|
public SCMCommandProto.Type getCommandType() {
|
||||||
|
|
|
@ -28,8 +28,11 @@ import org.apache.hadoop.utils.MetadataStoreBuilder;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
|
@ -92,8 +95,8 @@ public final class ContainerCache extends LRUMap {
|
||||||
MapIterator iterator = cache.mapIterator();
|
MapIterator iterator = cache.mapIterator();
|
||||||
while (iterator.hasNext()) {
|
while (iterator.hasNext()) {
|
||||||
iterator.next();
|
iterator.next();
|
||||||
MetadataStore db = (MetadataStore) iterator.getValue();
|
ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue();
|
||||||
closeDB((String)iterator.getKey(), db);
|
db.setEvicted(true);
|
||||||
}
|
}
|
||||||
// reset the cache
|
// reset the cache
|
||||||
cache.clear();
|
cache.clear();
|
||||||
|
@ -107,11 +110,11 @@ public final class ContainerCache extends LRUMap {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected boolean removeLRU(LinkEntry entry) {
|
protected boolean removeLRU(LinkEntry entry) {
|
||||||
MetadataStore db = (MetadataStore) entry.getValue();
|
ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue();
|
||||||
String dbFile = (String)entry.getKey();
|
String dbFile = (String)entry.getKey();
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
closeDB(dbFile, db);
|
db.setEvicted(false);
|
||||||
return true;
|
return true;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("Eviction for db:{} failed", dbFile, e);
|
LOG.error("Eviction for db:{} failed", dbFile, e);
|
||||||
|
@ -128,26 +131,30 @@ public final class ContainerCache extends LRUMap {
|
||||||
* @param containerDBType - DB type of the container.
|
* @param containerDBType - DB type of the container.
|
||||||
* @param containerDBPath - DB path of the container.
|
* @param containerDBPath - DB path of the container.
|
||||||
* @param conf - Hadoop Configuration.
|
* @param conf - Hadoop Configuration.
|
||||||
* @return MetadataStore.
|
* @return ReferenceCountedDB.
|
||||||
*/
|
*/
|
||||||
public MetadataStore getDB(long containerID, String containerDBType,
|
public ReferenceCountedDB getDB(long containerID, String containerDBType,
|
||||||
String containerDBPath, Configuration conf)
|
String containerDBPath, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkState(containerID >= 0,
|
Preconditions.checkState(containerID >= 0,
|
||||||
"Container ID cannot be negative.");
|
"Container ID cannot be negative.");
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
MetadataStore db = (MetadataStore) this.get(containerDBPath);
|
ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath);
|
||||||
|
|
||||||
if (db == null) {
|
if (db == null) {
|
||||||
db = MetadataStoreBuilder.newBuilder()
|
MetadataStore metadataStore =
|
||||||
|
MetadataStoreBuilder.newBuilder()
|
||||||
.setDbFile(new File(containerDBPath))
|
.setDbFile(new File(containerDBPath))
|
||||||
.setCreateIfMissing(false)
|
.setCreateIfMissing(false)
|
||||||
.setConf(conf)
|
.setConf(conf)
|
||||||
.setDBType(containerDBType)
|
.setDBType(containerDBType)
|
||||||
.build();
|
.build();
|
||||||
|
db = new ReferenceCountedDB(metadataStore, containerDBPath);
|
||||||
this.put(containerDBPath, db);
|
this.put(containerDBPath, db);
|
||||||
}
|
}
|
||||||
|
// increment the reference before returning the object
|
||||||
|
db.incrementReference();
|
||||||
return db;
|
return db;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("Error opening DB. Container:{} ContainerPath:{}",
|
LOG.error("Error opening DB. Container:{} ContainerPath:{}",
|
||||||
|
@ -161,16 +168,70 @@ public final class ContainerCache extends LRUMap {
|
||||||
/**
|
/**
|
||||||
* Remove a DB handler from cache.
|
* Remove a DB handler from cache.
|
||||||
*
|
*
|
||||||
* @param containerPath - path of the container db file.
|
* @param containerDBPath - path of the container db file.
|
||||||
*/
|
*/
|
||||||
public void removeDB(String containerPath) {
|
public void removeDB(String containerDBPath) {
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
MetadataStore db = (MetadataStore)this.get(containerPath);
|
ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath);
|
||||||
closeDB(containerPath, db);
|
if (db != null) {
|
||||||
this.remove(containerPath);
|
// marking it as evicted will close the db as well.
|
||||||
|
db.setEvicted(true);
|
||||||
|
}
|
||||||
|
this.remove(containerDBPath);
|
||||||
} finally {
|
} finally {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class to implement reference counting over instances handed by Container
|
||||||
|
* Cache.
|
||||||
|
*/
|
||||||
|
public class ReferenceCountedDB implements Closeable {
|
||||||
|
private final AtomicInteger referenceCount;
|
||||||
|
private final AtomicBoolean isEvicted;
|
||||||
|
private final MetadataStore store;
|
||||||
|
private final String containerDBPath;
|
||||||
|
|
||||||
|
public ReferenceCountedDB(MetadataStore store, String containerDBPath) {
|
||||||
|
this.referenceCount = new AtomicInteger(0);
|
||||||
|
this.isEvicted = new AtomicBoolean(false);
|
||||||
|
this.store = store;
|
||||||
|
this.containerDBPath = containerDBPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void incrementReference() {
|
||||||
|
this.referenceCount.incrementAndGet();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void decrementReference() {
|
||||||
|
this.referenceCount.decrementAndGet();
|
||||||
|
cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setEvicted(boolean checkNoReferences) {
|
||||||
|
Preconditions.checkState(!checkNoReferences ||
|
||||||
|
(referenceCount.get() == 0),
|
||||||
|
"checkNoReferences:%b, referencount:%d",
|
||||||
|
checkNoReferences, referenceCount.get());
|
||||||
|
isEvicted.set(true);
|
||||||
|
cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void cleanup() {
|
||||||
|
if (referenceCount.get() == 0 && isEvicted.get() && store != null) {
|
||||||
|
closeDB(containerDBPath, store);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetadataStore getStore() {
|
||||||
|
return store;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void close() {
|
||||||
|
decrementReference();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,8 @@ public final class VolumeInfo {
|
||||||
private final StorageType storageType;
|
private final StorageType storageType;
|
||||||
|
|
||||||
// Space usage calculator
|
// Space usage calculator
|
||||||
private VolumeUsage usage;
|
private final VolumeUsage usage;
|
||||||
|
|
||||||
// Capacity configured. This is useful when we want to
|
// Capacity configured. This is useful when we want to
|
||||||
// limit the visible capacity for tests. If negative, then we just
|
// limit the visible capacity for tests. If negative, then we just
|
||||||
// query from the filesystem.
|
// query from the filesystem.
|
||||||
|
@ -97,37 +98,22 @@ public final class VolumeInfo {
|
||||||
|
|
||||||
public long getCapacity() throws IOException {
|
public long getCapacity() throws IOException {
|
||||||
if (configuredCapacity < 0) {
|
if (configuredCapacity < 0) {
|
||||||
if (usage == null) {
|
|
||||||
throw new IOException("Volume Usage thread is not running. This error" +
|
|
||||||
" is usually seen during DataNode shutdown.");
|
|
||||||
}
|
|
||||||
return usage.getCapacity();
|
return usage.getCapacity();
|
||||||
}
|
}
|
||||||
return configuredCapacity;
|
return configuredCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getAvailable() throws IOException {
|
public long getAvailable() throws IOException {
|
||||||
if (usage == null) {
|
|
||||||
throw new IOException("Volume Usage thread is not running. This error " +
|
|
||||||
"is usually seen during DataNode shutdown.");
|
|
||||||
}
|
|
||||||
return usage.getAvailable();
|
return usage.getAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getScmUsed() throws IOException {
|
public long getScmUsed() throws IOException {
|
||||||
if (usage == null) {
|
|
||||||
throw new IOException("Volume Usage thread is not running. This error " +
|
|
||||||
"is usually seen during DataNode shutdown.");
|
|
||||||
}
|
|
||||||
return usage.getScmUsed();
|
return usage.getScmUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void shutdownUsageThread() {
|
protected void shutdownUsageThread() {
|
||||||
if (usage != null) {
|
|
||||||
usage.shutdown();
|
usage.shutdown();
|
||||||
}
|
}
|
||||||
usage = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getRootDir() {
|
public String getRootDir() {
|
||||||
return this.rootDir;
|
return this.rootDir;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.ozone.container.common.volume;
|
package org.apache.hadoop.ozone.container.common.volume;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CachingGetSpaceUsed;
|
import org.apache.hadoop.fs.CachingGetSpaceUsed;
|
||||||
import org.apache.hadoop.fs.DF;
|
import org.apache.hadoop.fs.DF;
|
||||||
|
@ -35,6 +36,7 @@ import java.io.IOException;
|
||||||
import java.io.OutputStreamWriter;
|
import java.io.OutputStreamWriter;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.Scanner;
|
import java.util.Scanner;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class that wraps the space df of the Datanode Volumes used by SCM
|
* Class that wraps the space df of the Datanode Volumes used by SCM
|
||||||
|
@ -46,7 +48,8 @@ public class VolumeUsage {
|
||||||
private final File rootDir;
|
private final File rootDir;
|
||||||
private final DF df;
|
private final DF df;
|
||||||
private final File scmUsedFile;
|
private final File scmUsedFile;
|
||||||
private GetSpaceUsed scmUsage;
|
private AtomicReference<GetSpaceUsed> scmUsage;
|
||||||
|
private boolean shutdownComplete;
|
||||||
|
|
||||||
private static final String DU_CACHE_FILE = "scmUsed";
|
private static final String DU_CACHE_FILE = "scmUsed";
|
||||||
private volatile boolean scmUsedSaved = false;
|
private volatile boolean scmUsedSaved = false;
|
||||||
|
@ -65,10 +68,11 @@ public class VolumeUsage {
|
||||||
|
|
||||||
void startScmUsageThread(Configuration conf) throws IOException {
|
void startScmUsageThread(Configuration conf) throws IOException {
|
||||||
// get SCM specific df
|
// get SCM specific df
|
||||||
this.scmUsage = new CachingGetSpaceUsed.Builder().setPath(rootDir)
|
scmUsage = new AtomicReference<>(
|
||||||
|
new CachingGetSpaceUsed.Builder().setPath(rootDir)
|
||||||
.setConf(conf)
|
.setConf(conf)
|
||||||
.setInitialUsed(loadScmUsed())
|
.setInitialUsed(loadScmUsed())
|
||||||
.build();
|
.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
long getCapacity() {
|
long getCapacity() {
|
||||||
|
@ -89,14 +93,18 @@ public class VolumeUsage {
|
||||||
}
|
}
|
||||||
|
|
||||||
long getScmUsed() throws IOException{
|
long getScmUsed() throws IOException{
|
||||||
return scmUsage.getUsed();
|
return scmUsage.get().getUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void shutdown() {
|
public synchronized void shutdown() {
|
||||||
|
if (!shutdownComplete) {
|
||||||
saveScmUsed();
|
saveScmUsed();
|
||||||
|
|
||||||
if (scmUsage instanceof CachingGetSpaceUsed) {
|
if (scmUsage.get() instanceof CachingGetSpaceUsed) {
|
||||||
IOUtils.cleanupWithLogger(null, ((CachingGetSpaceUsed) scmUsage));
|
IOUtils.cleanupWithLogger(
|
||||||
|
null, ((CachingGetSpaceUsed) scmUsage.get()));
|
||||||
|
}
|
||||||
|
shutdownComplete = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +183,11 @@ public class VolumeUsage {
|
||||||
* Only for testing. Do not use otherwise.
|
* Only for testing. Do not use otherwise.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@SuppressFBWarnings(
|
||||||
|
value = "IS2_INCONSISTENT_SYNC",
|
||||||
|
justification = "scmUsage is an AtomicReference. No additional " +
|
||||||
|
"synchronization is needed.")
|
||||||
public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
|
public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
|
||||||
this.scmUsage = scmUsageForTest;
|
scmUsage.set(scmUsageForTest);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,11 +31,12 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocat
|
||||||
import org.apache.hadoop.utils.MetaStoreIterator;
|
import org.apache.hadoop.utils.MetaStoreIterator;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
|
@ -48,12 +49,14 @@ import java.util.NoSuchElementException;
|
||||||
* {@link MetadataKeyFilters#getNormalKeyFilter()}
|
* {@link MetadataKeyFilters#getNormalKeyFilter()}
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
public class KeyValueBlockIterator implements BlockIterator<BlockData> {
|
public class KeyValueBlockIterator implements BlockIterator<BlockData>,
|
||||||
|
Closeable {
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
KeyValueBlockIterator.class);
|
KeyValueBlockIterator.class);
|
||||||
|
|
||||||
private MetaStoreIterator<KeyValue> blockIterator;
|
private MetaStoreIterator<KeyValue> blockIterator;
|
||||||
|
private final ReferenceCountedDB db;
|
||||||
private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
|
private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
|
||||||
.getNormalKeyFilter();
|
.getNormalKeyFilter();
|
||||||
private KeyPrefixFilter blockFilter;
|
private KeyPrefixFilter blockFilter;
|
||||||
|
@ -91,9 +94,9 @@ public class KeyValueBlockIterator implements BlockIterator<BlockData> {
|
||||||
containerData;
|
containerData;
|
||||||
keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
|
keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
|
||||||
.getContainerDBFile(metdataPath, containerId));
|
.getContainerDBFile(metdataPath, containerId));
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new
|
db = BlockUtils.getDB(keyValueContainerData, new
|
||||||
OzoneConfiguration());
|
OzoneConfiguration());
|
||||||
blockIterator = metadataStore.iterator();
|
blockIterator = db.getStore().iterator();
|
||||||
blockFilter = filter;
|
blockFilter = filter;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,4 +148,8 @@ public class KeyValueBlockIterator implements BlockIterator<BlockData> {
|
||||||
nextBlock = null;
|
nextBlock = null;
|
||||||
blockIterator.seekToLast();
|
blockIterator.seekToLast();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void close() {
|
||||||
|
db.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers
|
||||||
.KeyValueContainerLocationUtil;
|
.KeyValueContainerLocationUtil;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
|
@ -74,6 +73,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||||
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||||
.Result.UNSUPPORTED_REQUEST;
|
.Result.UNSUPPORTED_REQUEST;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -349,11 +349,12 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
|
||||||
|
|
||||||
void compactDB() throws StorageContainerException {
|
void compactDB() throws StorageContainerException {
|
||||||
try {
|
try {
|
||||||
MetadataStore db = BlockUtils.getDB(containerData, config);
|
try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
|
||||||
db.compactDB();
|
db.getStore().compactDB();
|
||||||
LOG.info("Container {} is closed with bcsId {}.",
|
LOG.info("Container {} is closed with bcsId {}.",
|
||||||
containerData.getContainerID(),
|
containerData.getContainerID(),
|
||||||
containerData.getBlockCommitSequenceId());
|
containerData.getBlockCommitSequenceId());
|
||||||
|
}
|
||||||
} catch (StorageContainerException ex) {
|
} catch (StorageContainerException ex) {
|
||||||
throw ex;
|
throw ex;
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
|
|
|
@ -30,12 +30,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -236,19 +236,19 @@ public class KeyValueContainerCheck {
|
||||||
|
|
||||||
|
|
||||||
onDiskContainerData.setDbFile(dbFile);
|
onDiskContainerData.setDbFile(dbFile);
|
||||||
MetadataStore db = BlockUtils
|
try(ReferenceCountedDB db =
|
||||||
.getDB(onDiskContainerData, checkConfig);
|
BlockUtils.getDB(onDiskContainerData, checkConfig)) {
|
||||||
|
|
||||||
iterateBlockDB(db);
|
iterateBlockDB(db);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void iterateBlockDB(MetadataStore db)
|
private void iterateBlockDB(ReferenceCountedDB db)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkState(db != null);
|
Preconditions.checkState(db != null);
|
||||||
|
|
||||||
// get "normal" keys from the Block DB
|
// get "normal" keys from the Block DB
|
||||||
KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
|
try(KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
|
||||||
new File(onDiskContainerData.getContainerPath()));
|
new File(onDiskContainerData.getContainerPath()))) {
|
||||||
|
|
||||||
// ensure there is a chunk file for each key in the DB
|
// ensure there is a chunk file for each key in the DB
|
||||||
while (kvIter.hasNext()) {
|
while (kvIter.hasNext()) {
|
||||||
|
@ -262,7 +262,7 @@ public class KeyValueContainerCheck {
|
||||||
|
|
||||||
if (!chunkFile.exists()) {
|
if (!chunkFile.exists()) {
|
||||||
// concurrent mutation in Block DB? lookup the block again.
|
// concurrent mutation in Block DB? lookup the block again.
|
||||||
byte[] bdata = db.get(
|
byte[] bdata = db.getStore().get(
|
||||||
Longs.toByteArray(block.getBlockID().getLocalID()));
|
Longs.toByteArray(block.getBlockID().getLocalID()));
|
||||||
if (bdata == null) {
|
if (bdata == null) {
|
||||||
LOG.trace("concurrency with delete, ignoring deleted block");
|
LOG.trace("concurrency with delete, ignoring deleted block");
|
||||||
|
@ -276,6 +276,7 @@ public class KeyValueContainerCheck {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void loadContainerData() throws IOException {
|
private void loadContainerData() throws IOException {
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
|
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
|
||||||
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ public final class BlockUtils {
|
||||||
* @return MetadataStore handle.
|
* @return MetadataStore handle.
|
||||||
* @throws StorageContainerException
|
* @throws StorageContainerException
|
||||||
*/
|
*/
|
||||||
public static MetadataStore getDB(KeyValueContainerData containerData,
|
public static ReferenceCountedDB getDB(KeyValueContainerData containerData,
|
||||||
Configuration conf) throws
|
Configuration conf) throws
|
||||||
StorageContainerException {
|
StorageContainerException {
|
||||||
Preconditions.checkNotNull(containerData);
|
Preconditions.checkNotNull(containerData);
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.utils.MetadataStoreBuilder;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -174,11 +175,13 @@ public final class KeyValueContainerUtil {
|
||||||
}
|
}
|
||||||
kvContainerData.setDbFile(dbFile);
|
kvContainerData.setDbFile(dbFile);
|
||||||
|
|
||||||
MetadataStore metadata = BlockUtils.getDB(kvContainerData, config);
|
try(ReferenceCountedDB metadata =
|
||||||
|
BlockUtils.getDB(kvContainerData, config)) {
|
||||||
long bytesUsed = 0;
|
long bytesUsed = 0;
|
||||||
List<Map.Entry<byte[], byte[]>> liveKeys = metadata
|
List<Map.Entry<byte[], byte[]>> liveKeys = metadata.getStore()
|
||||||
.getRangeKVs(null, Integer.MAX_VALUE,
|
.getRangeKVs(null, Integer.MAX_VALUE,
|
||||||
MetadataKeyFilters.getNormalKeyFilter());
|
MetadataKeyFilters.getNormalKeyFilter());
|
||||||
|
|
||||||
bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
|
bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
|
||||||
BlockData blockData;
|
BlockData blockData;
|
||||||
try {
|
try {
|
||||||
|
@ -191,6 +194,7 @@ public final class KeyValueContainerUtil {
|
||||||
kvContainerData.setBytesUsed(bytesUsed);
|
kvContainerData.setBytesUsed(bytesUsed);
|
||||||
kvContainerData.setKeyCount(liveKeys.size());
|
kvContainerData.setKeyCount(liveKeys.size());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the path where data or chunks live for a given container.
|
* Returns the path where data or chunks live for a given container.
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
|
||||||
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
|
||||||
import org.apache.hadoop.utils.BatchOperation;
|
import org.apache.hadoop.utils.BatchOperation;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -84,16 +84,15 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
"cannot be negative");
|
"cannot be negative");
|
||||||
// We are not locking the key manager since LevelDb serializes all actions
|
// We are not locking the key manager since LevelDb serializes all actions
|
||||||
// against a single DB. We rely on DB level locking to avoid conflicts.
|
// against a single DB. We rely on DB level locking to avoid conflicts.
|
||||||
MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container
|
try(ReferenceCountedDB db = BlockUtils.
|
||||||
.getContainerData(), config);
|
getDB((KeyValueContainerData) container.getContainerData(), config)) {
|
||||||
|
|
||||||
// This is a post condition that acts as a hint to the user.
|
// This is a post condition that acts as a hint to the user.
|
||||||
// Should never fail.
|
// Should never fail.
|
||||||
Preconditions.checkNotNull(db, "DB cannot be null here");
|
Preconditions.checkNotNull(db, "DB cannot be null here");
|
||||||
|
|
||||||
long bcsId = data.getBlockCommitSequenceId();
|
long bcsId = data.getBlockCommitSequenceId();
|
||||||
long containerBCSId = ((KeyValueContainerData) container.getContainerData())
|
long containerBCSId = ((KeyValueContainerData) container.
|
||||||
.getBlockCommitSequenceId();
|
getContainerData()).getBlockCommitSequenceId();
|
||||||
|
|
||||||
// default blockCommitSequenceId for any block is 0. It the putBlock
|
// default blockCommitSequenceId for any block is 0. It the putBlock
|
||||||
// request is not coming via Ratis(for test scenarios), it will be 0.
|
// request is not coming via Ratis(for test scenarios), it will be 0.
|
||||||
|
@ -117,7 +116,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
data.getProtoBufMessage().toByteArray());
|
data.getProtoBufMessage().toByteArray());
|
||||||
batch.put(blockCommitSequenceIdKey,
|
batch.put(blockCommitSequenceIdKey,
|
||||||
Longs.toByteArray(bcsId));
|
Longs.toByteArray(bcsId));
|
||||||
db.writeBatch(batch);
|
db.getStore().writeBatch(batch);
|
||||||
container.updateBlockCommitSequenceId(bcsId);
|
container.updateBlockCommitSequenceId(bcsId);
|
||||||
// Increment keycount here
|
// Increment keycount here
|
||||||
container.getContainerData().incrKeyCount();
|
container.getContainerData().incrKeyCount();
|
||||||
|
@ -126,6 +125,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
+ bcsId + " chunk size " + data.getChunks().size());
|
+ bcsId + " chunk size " + data.getChunks().size());
|
||||||
return data.getSize();
|
return data.getSize();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets an existing block.
|
* Gets an existing block.
|
||||||
|
@ -146,7 +146,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
|
|
||||||
KeyValueContainerData containerData = (KeyValueContainerData) container
|
KeyValueContainerData containerData = (KeyValueContainerData) container
|
||||||
.getContainerData();
|
.getContainerData();
|
||||||
MetadataStore db = BlockUtils.getDB(containerData, config);
|
try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
|
||||||
// This is a post condition that acts as a hint to the user.
|
// This is a post condition that acts as a hint to the user.
|
||||||
// Should never fail.
|
// Should never fail.
|
||||||
Preconditions.checkNotNull(db, "DB cannot be null here");
|
Preconditions.checkNotNull(db, "DB cannot be null here");
|
||||||
|
@ -158,10 +158,10 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
+ container.getContainerData().getContainerID() + " bcsId is "
|
+ container.getContainerData().getContainerID() + " bcsId is "
|
||||||
+ containerBCSId + ".", UNKNOWN_BCSID);
|
+ containerBCSId + ".", UNKNOWN_BCSID);
|
||||||
}
|
}
|
||||||
byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
|
byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
|
||||||
if (kData == null) {
|
if (kData == null) {
|
||||||
throw new StorageContainerException("Unable to find the block." + blockID,
|
throw new StorageContainerException("Unable to find the block." +
|
||||||
NO_SUCH_BLOCK);
|
blockID, NO_SUCH_BLOCK);
|
||||||
}
|
}
|
||||||
ContainerProtos.BlockData blockData =
|
ContainerProtos.BlockData blockData =
|
||||||
ContainerProtos.BlockData.parseFrom(kData);
|
ContainerProtos.BlockData.parseFrom(kData);
|
||||||
|
@ -173,6 +173,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
}
|
}
|
||||||
return BlockData.getFromProtoBuf(blockData);
|
return BlockData.getFromProtoBuf(blockData);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the length of the committed block.
|
* Returns the length of the committed block.
|
||||||
|
@ -187,11 +188,11 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
KeyValueContainerData containerData = (KeyValueContainerData) container
|
KeyValueContainerData containerData = (KeyValueContainerData) container
|
||||||
.getContainerData();
|
.getContainerData();
|
||||||
MetadataStore db = BlockUtils.getDB(containerData, config);
|
try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
|
||||||
// This is a post condition that acts as a hint to the user.
|
// This is a post condition that acts as a hint to the user.
|
||||||
// Should never fail.
|
// Should never fail.
|
||||||
Preconditions.checkNotNull(db, "DB cannot be null here");
|
Preconditions.checkNotNull(db, "DB cannot be null here");
|
||||||
byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
|
byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
|
||||||
if (kData == null) {
|
if (kData == null) {
|
||||||
throw new StorageContainerException("Unable to find the block.",
|
throw new StorageContainerException("Unable to find the block.",
|
||||||
NO_SUCH_BLOCK);
|
NO_SUCH_BLOCK);
|
||||||
|
@ -200,6 +201,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
ContainerProtos.BlockData.parseFrom(kData);
|
ContainerProtos.BlockData.parseFrom(kData);
|
||||||
return blockData.getSize();
|
return blockData.getSize();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes an existing block.
|
* Deletes an existing block.
|
||||||
|
@ -218,7 +220,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
|
|
||||||
KeyValueContainerData cData = (KeyValueContainerData) container
|
KeyValueContainerData cData = (KeyValueContainerData) container
|
||||||
.getContainerData();
|
.getContainerData();
|
||||||
MetadataStore db = BlockUtils.getDB(cData, config);
|
try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
|
||||||
// This is a post condition that acts as a hint to the user.
|
// This is a post condition that acts as a hint to the user.
|
||||||
// Should never fail.
|
// Should never fail.
|
||||||
Preconditions.checkNotNull(db, "DB cannot be null here");
|
Preconditions.checkNotNull(db, "DB cannot be null here");
|
||||||
|
@ -227,16 +229,16 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
// to delete a Block which might have just gotten inserted after
|
// to delete a Block which might have just gotten inserted after
|
||||||
// the get check.
|
// the get check.
|
||||||
byte[] kKey = Longs.toByteArray(blockID.getLocalID());
|
byte[] kKey = Longs.toByteArray(blockID.getLocalID());
|
||||||
byte[] kData = db.get(kKey);
|
try {
|
||||||
if (kData == null) {
|
db.getStore().delete(kKey);
|
||||||
|
} catch (IOException e) {
|
||||||
throw new StorageContainerException("Unable to find the block.",
|
throw new StorageContainerException("Unable to find the block.",
|
||||||
NO_SUCH_BLOCK);
|
NO_SUCH_BLOCK);
|
||||||
}
|
}
|
||||||
db.delete(kKey);
|
|
||||||
|
|
||||||
// Decrement blockcount here
|
// Decrement blockcount here
|
||||||
container.getContainerData().decrKeyCount();
|
container.getContainerData().decrKeyCount();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List blocks in a container.
|
* List blocks in a container.
|
||||||
|
@ -258,11 +260,11 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
List<BlockData> result = null;
|
List<BlockData> result = null;
|
||||||
KeyValueContainerData cData = (KeyValueContainerData) container
|
KeyValueContainerData cData = (KeyValueContainerData) container
|
||||||
.getContainerData();
|
.getContainerData();
|
||||||
MetadataStore db = BlockUtils.getDB(cData, config);
|
try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
|
||||||
result = new ArrayList<>();
|
result = new ArrayList<>();
|
||||||
byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
|
byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
|
||||||
List<Map.Entry<byte[], byte[]>> range =
|
List<Map.Entry<byte[], byte[]>> range =
|
||||||
db.getSequentialRangeKVs(startKeyInBytes, count,
|
db.getStore().getSequentialRangeKVs(startKeyInBytes, count,
|
||||||
MetadataKeyFilters.getNormalKeyFilter());
|
MetadataKeyFilters.getNormalKeyFilter());
|
||||||
for (Map.Entry<byte[], byte[]> entry : range) {
|
for (Map.Entry<byte[], byte[]> entry : range) {
|
||||||
BlockData value = BlockUtils.getBlockData(entry.getValue());
|
BlockData value = BlockUtils.getBlockData(entry.getValue());
|
||||||
|
@ -271,6 +273,7 @@ public class BlockManagerImpl implements BlockManager {
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shutdown KeyValueContainerManager.
|
* Shutdown KeyValueContainerManager.
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.utils.BackgroundTaskQueue;
|
||||||
import org.apache.hadoop.utils.BackgroundTaskResult;
|
import org.apache.hadoop.utils.BackgroundTaskResult;
|
||||||
import org.apache.hadoop.utils.BatchOperation;
|
import org.apache.hadoop.utils.BatchOperation;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -185,13 +185,13 @@ public class BlockDeletingService extends BackgroundService{
|
||||||
ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
|
ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
|
||||||
long startTime = Time.monotonicNow();
|
long startTime = Time.monotonicNow();
|
||||||
// Scan container's db and get list of under deletion blocks
|
// Scan container's db and get list of under deletion blocks
|
||||||
MetadataStore meta = BlockUtils.getDB(
|
try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) {
|
||||||
(KeyValueContainerData) containerData, conf);
|
|
||||||
// # of blocks to delete is throttled
|
// # of blocks to delete is throttled
|
||||||
KeyPrefixFilter filter =
|
KeyPrefixFilter filter =
|
||||||
new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
||||||
List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
|
List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
|
||||||
meta.getSequentialRangeKVs(null, blockLimitPerTask, filter);
|
meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask,
|
||||||
|
filter);
|
||||||
if (toDeleteBlocks.isEmpty()) {
|
if (toDeleteBlocks.isEmpty()) {
|
||||||
LOG.debug("No under deletion block found in container : {}",
|
LOG.debug("No under deletion block found in container : {}",
|
||||||
containerData.getContainerID());
|
containerData.getContainerID());
|
||||||
|
@ -227,7 +227,8 @@ public class BlockDeletingService extends BackgroundService{
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Once files are deleted... replace deleting entries with deleted entries
|
// Once files are deleted... replace deleting entries with deleted
|
||||||
|
// entries
|
||||||
BatchOperation batch = new BatchOperation();
|
BatchOperation batch = new BatchOperation();
|
||||||
succeedBlocks.forEach(entry -> {
|
succeedBlocks.forEach(entry -> {
|
||||||
String blockId =
|
String blockId =
|
||||||
|
@ -237,7 +238,7 @@ public class BlockDeletingService extends BackgroundService{
|
||||||
DFSUtil.string2Bytes(blockId));
|
DFSUtil.string2Bytes(blockId));
|
||||||
batch.delete(DFSUtil.string2Bytes(entry));
|
batch.delete(DFSUtil.string2Bytes(entry));
|
||||||
});
|
});
|
||||||
meta.writeBatch(batch);
|
meta.getStore().writeBatch(batch);
|
||||||
// update count of pending deletion blocks in in-memory container status
|
// update count of pending deletion blocks in in-memory container status
|
||||||
containerData.decrPendingDeletionBlocks(succeedBlocks.size());
|
containerData.decrPendingDeletionBlocks(succeedBlocks.size());
|
||||||
|
|
||||||
|
@ -249,6 +250,7 @@ public class BlockDeletingService extends BackgroundService{
|
||||||
crr.addAll(succeedBlocks);
|
crr.addAll(succeedBlocks);
|
||||||
return crr;
|
return crr;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getPriority() {
|
public int getPriority() {
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -191,23 +191,25 @@ public class ContainerReader implements Runnable {
|
||||||
KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
|
KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
|
||||||
KeyValueContainer kvContainer = new KeyValueContainer(
|
KeyValueContainer kvContainer = new KeyValueContainer(
|
||||||
kvContainerData, config);
|
kvContainerData, config);
|
||||||
MetadataStore containerDB = BlockUtils.getDB(kvContainerData, config);
|
try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
|
||||||
|
config)) {
|
||||||
MetadataKeyFilters.KeyPrefixFilter filter =
|
MetadataKeyFilters.KeyPrefixFilter filter =
|
||||||
new MetadataKeyFilters.KeyPrefixFilter()
|
new MetadataKeyFilters.KeyPrefixFilter()
|
||||||
.addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
.addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
||||||
int numPendingDeletionBlocks =
|
int numPendingDeletionBlocks =
|
||||||
containerDB.getSequentialRangeKVs(null, Integer.MAX_VALUE, filter)
|
containerDB.getStore().getSequentialRangeKVs(null,
|
||||||
|
Integer.MAX_VALUE, filter)
|
||||||
.size();
|
.size();
|
||||||
kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
|
kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
|
||||||
byte[] delTxnId = containerDB.get(
|
byte[] delTxnId = containerDB.getStore().get(
|
||||||
DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX));
|
DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX));
|
||||||
if (delTxnId != null) {
|
if (delTxnId != null) {
|
||||||
kvContainerData
|
kvContainerData
|
||||||
.updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
|
.updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
|
||||||
}
|
}
|
||||||
// sets the BlockCommitSequenceId.
|
// sets the BlockCommitSequenceId.
|
||||||
byte[] bcsId = containerDB.get(
|
byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes(
|
||||||
DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
|
OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
|
||||||
if (bcsId != null) {
|
if (bcsId != null) {
|
||||||
kvContainerData
|
kvContainerData
|
||||||
.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
|
.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
|
||||||
|
@ -218,6 +220,7 @@ public class ContainerReader implements Runnable {
|
||||||
initializeUsedBytes(kvContainer);
|
initializeUsedBytes(kvContainer);
|
||||||
}
|
}
|
||||||
containerSet.addContainer(kvContainer);
|
containerSet.addContainer(kvContainer);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new StorageContainerException("Container File is corrupted. " +
|
throw new StorageContainerException("Container File is corrupted. " +
|
||||||
"ContainerType is KeyValueContainer but cast to " +
|
"ContainerType is KeyValueContainer but cast to " +
|
||||||
|
|
|
@ -29,14 +29,12 @@ import org.junit.rules.TemporaryFolder;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unit tests for {@link HddsVolume}.
|
* Unit tests for {@link HddsVolume}.
|
||||||
|
@ -134,15 +132,8 @@ public class TestHddsVolume {
|
||||||
assertTrue("scmUsed cache file should be saved on shutdown",
|
assertTrue("scmUsed cache file should be saved on shutdown",
|
||||||
scmUsedFile.exists());
|
scmUsedFile.exists());
|
||||||
|
|
||||||
try {
|
// Volume.getAvailable() should succeed even when usage thread
|
||||||
// Volume.getAvailable() should fail with IOException
|
// is shutdown.
|
||||||
// as usage thread is shutdown.
|
|
||||||
volume.getAvailable();
|
volume.getAvailable();
|
||||||
fail("HddsVolume#shutdown test failed");
|
|
||||||
} catch (Exception ex) {
|
|
||||||
assertTrue(ex instanceof IOException);
|
|
||||||
assertTrue(ex.getMessage().contains(
|
|
||||||
"Volume Usage thread is not running."));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,6 @@ import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -213,18 +212,10 @@ public class TestVolumeSet {
|
||||||
|
|
||||||
volumeSet.shutdown();
|
volumeSet.shutdown();
|
||||||
|
|
||||||
// Verify that the volumes are shutdown and the volumeUsage is set to null.
|
// Verify that volume usage can be queried during shutdown.
|
||||||
for (HddsVolume volume : volumesList) {
|
for (HddsVolume volume : volumesList) {
|
||||||
Assert.assertNull(volume.getVolumeInfo().getUsageForTesting());
|
Assert.assertNotNull(volume.getVolumeInfo().getUsageForTesting());
|
||||||
try {
|
|
||||||
// getAvailable() should throw null pointer exception as usage is null.
|
|
||||||
volume.getAvailable();
|
volume.getAvailable();
|
||||||
fail("Volume shutdown failed.");
|
|
||||||
} catch (IOException ex) {
|
|
||||||
// Do Nothing. Exception is expected.
|
|
||||||
assertTrue(ex.getMessage().contains(
|
|
||||||
"Volume Usage thread is not running."));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -109,11 +109,11 @@ public class TestKeyValueBlockIterator {
|
||||||
createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
|
createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
|
||||||
String containerPath = new File(containerData.getMetadataPath())
|
String containerPath = new File(containerData.getMetadataPath())
|
||||||
.getParent();
|
.getParent();
|
||||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||||
containerID, new File(containerPath));
|
containerID, new File(containerPath))) {
|
||||||
|
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
while(keyValueBlockIterator.hasNext()) {
|
while (keyValueBlockIterator.hasNext()) {
|
||||||
BlockData blockData = keyValueBlockIterator.nextBlock();
|
BlockData blockData = keyValueBlockIterator.nextBlock();
|
||||||
assertEquals(blockData.getLocalID(), counter++);
|
assertEquals(blockData.getLocalID(), counter++);
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ public class TestKeyValueBlockIterator {
|
||||||
|
|
||||||
keyValueBlockIterator.seekToFirst();
|
keyValueBlockIterator.seekToFirst();
|
||||||
counter = 0;
|
counter = 0;
|
||||||
while(keyValueBlockIterator.hasNext()) {
|
while (keyValueBlockIterator.hasNext()) {
|
||||||
BlockData blockData = keyValueBlockIterator.nextBlock();
|
BlockData blockData = keyValueBlockIterator.nextBlock();
|
||||||
assertEquals(blockData.getLocalID(), counter++);
|
assertEquals(blockData.getLocalID(), counter++);
|
||||||
}
|
}
|
||||||
|
@ -135,6 +135,7 @@ public class TestKeyValueBlockIterator {
|
||||||
"for ContainerID " + containerID, ex);
|
"for ContainerID " + containerID, ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
|
public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
|
||||||
|
@ -142,8 +143,8 @@ public class TestKeyValueBlockIterator {
|
||||||
createContainerWithBlocks(containerID, 2, 0);
|
createContainerWithBlocks(containerID, 2, 0);
|
||||||
String containerPath = new File(containerData.getMetadataPath())
|
String containerPath = new File(containerData.getMetadataPath())
|
||||||
.getParent();
|
.getParent();
|
||||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||||
containerID, new File(containerPath));
|
containerID, new File(containerPath))) {
|
||||||
long blockID = 0L;
|
long blockID = 0L;
|
||||||
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
|
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
|
||||||
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
||||||
|
@ -155,6 +156,7 @@ public class TestKeyValueBlockIterator {
|
||||||
"for ContainerID " + containerID, ex);
|
"for ContainerID " + containerID, ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKeyValueBlockIteratorWithHasNext() throws Exception {
|
public void testKeyValueBlockIteratorWithHasNext() throws Exception {
|
||||||
|
@ -162,8 +164,8 @@ public class TestKeyValueBlockIterator {
|
||||||
createContainerWithBlocks(containerID, 2, 0);
|
createContainerWithBlocks(containerID, 2, 0);
|
||||||
String containerPath = new File(containerData.getMetadataPath())
|
String containerPath = new File(containerData.getMetadataPath())
|
||||||
.getParent();
|
.getParent();
|
||||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||||
containerID, new File(containerPath));
|
containerID, new File(containerPath))) {
|
||||||
long blockID = 0L;
|
long blockID = 0L;
|
||||||
|
|
||||||
// Even calling multiple times hasNext() should not move entry forward.
|
// Even calling multiple times hasNext() should not move entry forward.
|
||||||
|
@ -196,8 +198,7 @@ public class TestKeyValueBlockIterator {
|
||||||
GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
|
GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
|
||||||
"for ContainerID " + containerID, ex);
|
"for ContainerID " + containerID, ex);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -208,16 +209,17 @@ public class TestKeyValueBlockIterator {
|
||||||
createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
|
createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
|
||||||
String containerPath = new File(containerData.getMetadataPath())
|
String containerPath = new File(containerData.getMetadataPath())
|
||||||
.getParent();
|
.getParent();
|
||||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||||
containerId, new File(containerPath), MetadataKeyFilters
|
containerId, new File(containerPath), MetadataKeyFilters
|
||||||
.getDeletingKeyFilter());
|
.getDeletingKeyFilter())) {
|
||||||
|
|
||||||
int counter = 5;
|
int counter = 5;
|
||||||
while(keyValueBlockIterator.hasNext()) {
|
while (keyValueBlockIterator.hasNext()) {
|
||||||
BlockData blockData = keyValueBlockIterator.nextBlock();
|
BlockData blockData = keyValueBlockIterator.nextBlock();
|
||||||
assertEquals(blockData.getLocalID(), counter++);
|
assertEquals(blockData.getLocalID(), counter++);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
|
public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
|
||||||
|
@ -226,12 +228,13 @@ public class TestKeyValueBlockIterator {
|
||||||
createContainerWithBlocks(containerId, 0, 5);
|
createContainerWithBlocks(containerId, 0, 5);
|
||||||
String containerPath = new File(containerData.getMetadataPath())
|
String containerPath = new File(containerData.getMetadataPath())
|
||||||
.getParent();
|
.getParent();
|
||||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||||
containerId, new File(containerPath));
|
containerId, new File(containerPath))) {
|
||||||
//As all blocks are deleted blocks, blocks does not match with normal key
|
//As all blocks are deleted blocks, blocks does not match with normal key
|
||||||
// filter.
|
// filter.
|
||||||
assertFalse(keyValueBlockIterator.hasNext());
|
assertFalse(keyValueBlockIterator.hasNext());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a container with specified number of normal blocks and deleted
|
* Creates a container with specified number of normal blocks and deleted
|
||||||
|
@ -251,28 +254,31 @@ public class TestKeyValueBlockIterator {
|
||||||
container = new KeyValueContainer(containerData, conf);
|
container = new KeyValueContainer(containerData, conf);
|
||||||
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
|
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
|
||||||
.randomUUID().toString());
|
.randomUUID().toString());
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
|
try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
|
||||||
|
conf)) {
|
||||||
|
|
||||||
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||||
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
||||||
chunkList.add(info.getProtoBufMessage());
|
chunkList.add(info.getProtoBufMessage());
|
||||||
|
|
||||||
for (int i=0; i<normalBlocks; i++) {
|
for (int i = 0; i < normalBlocks; i++) {
|
||||||
BlockID blockID = new BlockID(containerId, i);
|
BlockID blockID = new BlockID(containerId, i);
|
||||||
BlockData blockData = new BlockData(blockID);
|
BlockData blockData = new BlockData(blockID);
|
||||||
blockData.setChunks(chunkList);
|
blockData.setChunks(chunkList);
|
||||||
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
|
metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
|
||||||
|
blockData
|
||||||
.getProtoBufMessage().toByteArray());
|
.getProtoBufMessage().toByteArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i=normalBlocks; i<deletedBlocks; i++) {
|
for (int i = normalBlocks; i < deletedBlocks; i++) {
|
||||||
BlockID blockID = new BlockID(containerId, i);
|
BlockID blockID = new BlockID(containerId, i);
|
||||||
BlockData blockData = new BlockData(blockID);
|
BlockData blockData = new BlockData(blockID);
|
||||||
blockData.setChunks(chunkList);
|
blockData.setChunks(chunkList);
|
||||||
metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
|
metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts
|
||||||
.DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
|
.DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
|
||||||
.getProtoBufMessage().toByteArray());
|
.getProtoBufMessage().toByteArray());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -132,9 +132,9 @@ public class TestKeyValueContainer {
|
||||||
private void addBlocks(int count) throws Exception {
|
private void addBlocks(int count) throws Exception {
|
||||||
long containerId = keyValueContainerData.getContainerID();
|
long containerId = keyValueContainerData.getContainerID();
|
||||||
|
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer
|
try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
|
||||||
.getContainerData(), conf);
|
.getContainerData(), conf)) {
|
||||||
for (int i=0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
// Creating BlockData
|
// Creating BlockData
|
||||||
BlockID blockID = new BlockID(containerId, i);
|
BlockID blockID = new BlockID(containerId, i);
|
||||||
BlockData blockData = new BlockData(blockID);
|
BlockData blockData = new BlockData(blockID);
|
||||||
|
@ -145,10 +145,11 @@ public class TestKeyValueContainer {
|
||||||
.getLocalID(), 0), 0, 1024);
|
.getLocalID(), 0), 0, 1024);
|
||||||
chunkList.add(info.getProtoBufMessage());
|
chunkList.add(info.getProtoBufMessage());
|
||||||
blockData.setChunks(chunkList);
|
blockData.setChunks(chunkList);
|
||||||
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
|
metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
|
||||||
|
blockData
|
||||||
.getProtoBufMessage().toByteArray());
|
.getProtoBufMessage().toByteArray());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("RedundantCast")
|
@SuppressWarnings("RedundantCast")
|
||||||
|
@ -191,9 +192,12 @@ public class TestKeyValueContainer {
|
||||||
|
|
||||||
int numberOfKeysToWrite = 12;
|
int numberOfKeysToWrite = 12;
|
||||||
//write one few keys to check the key count after import
|
//write one few keys to check the key count after import
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
|
try(ReferenceCountedDB metadataStore =
|
||||||
|
BlockUtils.getDB(keyValueContainerData, conf)) {
|
||||||
for (int i = 0; i < numberOfKeysToWrite; i++) {
|
for (int i = 0; i < numberOfKeysToWrite; i++) {
|
||||||
metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8));
|
metadataStore.getStore().put(("test" + i).getBytes(UTF_8),
|
||||||
|
"test".getBytes(UTF_8));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
BlockUtils.removeDB(keyValueContainerData, conf);
|
BlockUtils.removeDB(keyValueContainerData, conf);
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
|
||||||
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -149,7 +149,8 @@ import static org.junit.Assert.assertTrue;
|
||||||
container = new KeyValueContainer(containerData, conf);
|
container = new KeyValueContainer(containerData, conf);
|
||||||
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
|
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
|
||||||
UUID.randomUUID().toString());
|
UUID.randomUUID().toString());
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
|
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
|
||||||
|
conf)) {
|
||||||
chunkManager = new ChunkManagerImpl(true);
|
chunkManager = new ChunkManagerImpl(true);
|
||||||
|
|
||||||
assertTrue(containerData.getChunksPath() != null);
|
assertTrue(containerData.getChunksPath() != null);
|
||||||
|
@ -184,14 +185,15 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
if (i >= normalBlocks) {
|
if (i >= normalBlocks) {
|
||||||
// deleted key
|
// deleted key
|
||||||
metadataStore.put(DFSUtil.string2Bytes(
|
metadataStore.getStore().put(DFSUtil.string2Bytes(
|
||||||
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()),
|
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()),
|
||||||
blockData.getProtoBufMessage().toByteArray());
|
blockData.getProtoBufMessage().toByteArray());
|
||||||
} else {
|
} else {
|
||||||
// normal key
|
// normal key
|
||||||
metadataStore.put(Longs.toByteArray(blockID.getLocalID()),
|
metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
|
||||||
blockData.getProtoBufMessage().toByteArray());
|
blockData.getProtoBufMessage().toByteArray());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -47,23 +47,34 @@ public final class ServerUtils {
|
||||||
* For example, sanitizeUserArgs(17, 3, 5, 10)
|
* For example, sanitizeUserArgs(17, 3, 5, 10)
|
||||||
* ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
|
* ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
|
||||||
*
|
*
|
||||||
|
* @param key - config key of the value
|
||||||
* @param valueTocheck - value to check
|
* @param valueTocheck - value to check
|
||||||
|
* @param baseKey - config key of the baseValue
|
||||||
* @param baseValue - the base value that is being used.
|
* @param baseValue - the base value that is being used.
|
||||||
* @param minFactor - range min - a 2 here makes us ensure that value
|
* @param minFactor - range min - a 2 here makes us ensure that value
|
||||||
* valueTocheck is at least twice the baseValue.
|
* valueTocheck is at least twice the baseValue.
|
||||||
* @param maxFactor - range max
|
* @param maxFactor - range max
|
||||||
* @return long
|
* @return long
|
||||||
*/
|
*/
|
||||||
public static long sanitizeUserArgs(long valueTocheck, long baseValue,
|
public static long sanitizeUserArgs(String key, long valueTocheck,
|
||||||
long minFactor, long maxFactor)
|
String baseKey, long baseValue, long minFactor, long maxFactor) {
|
||||||
throws IllegalArgumentException {
|
long minLimit = baseValue * minFactor;
|
||||||
if ((valueTocheck >= (baseValue * minFactor)) &&
|
long maxLimit = baseValue * maxFactor;
|
||||||
(valueTocheck <= (baseValue * maxFactor))) {
|
if (valueTocheck < minLimit) {
|
||||||
return valueTocheck;
|
LOG.warn(
|
||||||
|
"{} value = {} is smaller than min = {} based on"
|
||||||
|
+ " the key value of {}, reset to the min value {}.",
|
||||||
|
key, valueTocheck, minLimit, baseKey, minLimit);
|
||||||
|
valueTocheck = minLimit;
|
||||||
|
} else if (valueTocheck > maxLimit) {
|
||||||
|
LOG.warn(
|
||||||
|
"{} value = {} is larger than max = {} based on"
|
||||||
|
+ " the key value of {}, reset to the max value {}.",
|
||||||
|
key, valueTocheck, maxLimit, baseKey, maxLimit);
|
||||||
|
valueTocheck = maxLimit;
|
||||||
}
|
}
|
||||||
String errMsg = String.format("%d is not within min = %d or max = " +
|
|
||||||
"%d", valueTocheck, baseValue * minFactor, baseValue * maxFactor);
|
return valueTocheck;
|
||||||
throw new IllegalArgumentException(errMsg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -182,18 +182,27 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
|
||||||
pipelineManager
|
pipelineManager
|
||||||
.getPipelines(type, factor, Pipeline.PipelineState.OPEN,
|
.getPipelines(type, factor, Pipeline.PipelineState.OPEN,
|
||||||
excludeList.getDatanodes(), excludeList.getPipelineIds());
|
excludeList.getDatanodes(), excludeList.getPipelineIds());
|
||||||
Pipeline pipeline;
|
Pipeline pipeline = null;
|
||||||
if (availablePipelines.size() == 0) {
|
if (availablePipelines.size() == 0) {
|
||||||
try {
|
try {
|
||||||
// TODO: #CLUTIL Remove creation logic when all replication types and
|
// TODO: #CLUTIL Remove creation logic when all replication types and
|
||||||
// factors are handled by pipeline creator
|
// factors are handled by pipeline creator
|
||||||
pipeline = pipelineManager.createPipeline(type, factor);
|
pipeline = pipelineManager.createPipeline(type, factor);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Pipeline creation failed for type:{} factor:{}",
|
LOG.warn("Pipeline creation failed for type:{} factor:{}. Retrying " +
|
||||||
type, factor, e);
|
"get pipelines call once.", type, factor, e);
|
||||||
|
availablePipelines = pipelineManager
|
||||||
|
.getPipelines(type, factor, Pipeline.PipelineState.OPEN,
|
||||||
|
excludeList.getDatanodes(), excludeList.getPipelineIds());
|
||||||
|
if (availablePipelines.size() == 0) {
|
||||||
|
LOG.info("Could not find available pipeline of type:{} and " +
|
||||||
|
"factor:{} even after retrying", type, factor);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (null == pipeline) {
|
||||||
// TODO: #CLUTIL Make the selection policy driven.
|
// TODO: #CLUTIL Make the selection policy driven.
|
||||||
pipeline = availablePipelines
|
pipeline = availablePipelines
|
||||||
.get((int) (Math.random() * availablePipelines.size()));
|
.get((int) (Math.random() * availablePipelines.size()));
|
||||||
|
|
|
@ -263,11 +263,15 @@ public class ContainerStateManager {
|
||||||
}
|
}
|
||||||
pipeline = pipelines.get((int) containerCount.get() % pipelines.size());
|
pipeline = pipelines.get((int) containerCount.get() % pipelines.size());
|
||||||
}
|
}
|
||||||
|
synchronized (pipeline) {
|
||||||
return allocateContainer(pipelineManager, owner, pipeline);
|
return allocateContainer(pipelineManager, owner, pipeline);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates a new container based on the type, replication etc.
|
* Allocates a new container based on the type, replication etc.
|
||||||
|
* This method should be called only after the lock on the pipeline is held
|
||||||
|
* on which the container will be allocated.
|
||||||
*
|
*
|
||||||
* @param pipelineManager - Pipeline Manager class.
|
* @param pipelineManager - Pipeline Manager class.
|
||||||
* @param owner - Owner of the container.
|
* @param owner - Owner of the container.
|
||||||
|
@ -296,10 +300,10 @@ public class ContainerStateManager {
|
||||||
.setReplicationFactor(pipeline.getFactor())
|
.setReplicationFactor(pipeline.getFactor())
|
||||||
.setReplicationType(pipeline.getType())
|
.setReplicationType(pipeline.getType())
|
||||||
.build();
|
.build();
|
||||||
pipelineManager.addContainerToPipeline(pipeline.getId(),
|
|
||||||
ContainerID.valueof(containerID));
|
|
||||||
Preconditions.checkNotNull(containerInfo);
|
Preconditions.checkNotNull(containerInfo);
|
||||||
containers.addContainer(containerInfo);
|
containers.addContainer(containerInfo);
|
||||||
|
pipelineManager.addContainerToPipeline(pipeline.getId(),
|
||||||
|
ContainerID.valueof(containerID));
|
||||||
containerStateCount.incrementAndGet(containerInfo.getState());
|
containerStateCount.incrementAndGet(containerInfo.getState());
|
||||||
LOG.trace("New container allocated: {}", containerInfo);
|
LOG.trace("New container allocated: {}", containerInfo);
|
||||||
return containerInfo;
|
return containerInfo;
|
||||||
|
|
|
@ -386,18 +386,17 @@ public class SCMContainerManager implements ContainerManager {
|
||||||
|
|
||||||
public ContainerInfo getMatchingContainer(final long sizeRequired,
|
public ContainerInfo getMatchingContainer(final long sizeRequired,
|
||||||
String owner, Pipeline pipeline, List<ContainerID> excludedContainers) {
|
String owner, Pipeline pipeline, List<ContainerID> excludedContainers) {
|
||||||
|
NavigableSet<ContainerID> containerIDs;
|
||||||
try {
|
try {
|
||||||
|
synchronized (pipeline) {
|
||||||
//TODO: #CLUTIL See if lock is required here
|
//TODO: #CLUTIL See if lock is required here
|
||||||
NavigableSet<ContainerID> containerIDs =
|
containerIDs =
|
||||||
pipelineManager.getContainersInPipeline(pipeline.getId());
|
pipelineManager.getContainersInPipeline(pipeline.getId());
|
||||||
|
|
||||||
containerIDs = getContainersForOwner(containerIDs, owner);
|
containerIDs = getContainersForOwner(containerIDs, owner);
|
||||||
if (containerIDs.size() < numContainerPerOwnerInPipeline) {
|
if (containerIDs.size() < numContainerPerOwnerInPipeline) {
|
||||||
synchronized (pipeline) {
|
|
||||||
// TODO: #CLUTIL Maybe we can add selection logic inside synchronized
|
// TODO: #CLUTIL Maybe we can add selection logic inside synchronized
|
||||||
// as well
|
// as well
|
||||||
containerIDs = getContainersForOwner(
|
|
||||||
pipelineManager.getContainersInPipeline(pipeline.getId()), owner);
|
|
||||||
if (containerIDs.size() < numContainerPerOwnerInPipeline) {
|
if (containerIDs.size() < numContainerPerOwnerInPipeline) {
|
||||||
ContainerInfo containerInfo =
|
ContainerInfo containerInfo =
|
||||||
containerStateManager.allocateContainer(pipelineManager, owner,
|
containerStateManager.allocateContainer(pipelineManager, owner,
|
||||||
|
|
|
@ -61,4 +61,8 @@ public final class PipelineFactory {
|
||||||
List<DatanodeDetails> nodes) {
|
List<DatanodeDetails> nodes) {
|
||||||
return providers.get(type).create(factor, nodes);
|
return providers.get(type).create(factor, nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void shutdown() {
|
||||||
|
providers.values().forEach(provider -> provider.shutdown());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,4 +33,5 @@ public interface PipelineProvider {
|
||||||
|
|
||||||
Pipeline create(ReplicationFactor factor, List<DatanodeDetails> nodes);
|
Pipeline create(ReplicationFactor factor, List<DatanodeDetails> nodes);
|
||||||
|
|
||||||
|
void shutdown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,17 +24,39 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
|
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
|
||||||
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
|
import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
|
||||||
|
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
||||||
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
|
import org.apache.ratis.RatisHelper;
|
||||||
|
import org.apache.ratis.client.RaftClient;
|
||||||
|
import org.apache.ratis.grpc.GrpcTlsConfig;
|
||||||
|
import org.apache.ratis.protocol.RaftClientReply;
|
||||||
|
import org.apache.ratis.protocol.RaftGroup;
|
||||||
|
import org.apache.ratis.protocol.RaftPeer;
|
||||||
|
import org.apache.ratis.retry.RetryPolicy;
|
||||||
|
import org.apache.ratis.rpc.SupportedRpcType;
|
||||||
|
import org.apache.ratis.util.TimeDuration;
|
||||||
|
import org.apache.ratis.util.function.CheckedBiConsumer;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ForkJoinPool;
|
||||||
|
import java.util.concurrent.ForkJoinWorkerThread;
|
||||||
|
import java.util.concurrent.RejectedExecutionException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -42,10 +64,28 @@ import java.util.stream.Collectors;
|
||||||
*/
|
*/
|
||||||
public class RatisPipelineProvider implements PipelineProvider {
|
public class RatisPipelineProvider implements PipelineProvider {
|
||||||
|
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(RatisPipelineProvider.class);
|
||||||
|
|
||||||
private final NodeManager nodeManager;
|
private final NodeManager nodeManager;
|
||||||
private final PipelineStateManager stateManager;
|
private final PipelineStateManager stateManager;
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
|
||||||
|
// Set parallelism at 3, as now in Ratis we create 1 and 3 node pipelines.
|
||||||
|
private final int parallelismForPool = 3;
|
||||||
|
|
||||||
|
private final ForkJoinPool.ForkJoinWorkerThreadFactory factory =
|
||||||
|
(pool -> {
|
||||||
|
final ForkJoinWorkerThread worker = ForkJoinPool.
|
||||||
|
defaultForkJoinWorkerThreadFactory.newThread(pool);
|
||||||
|
worker.setName("RATISCREATEPIPELINE" + worker.getPoolIndex());
|
||||||
|
return worker;
|
||||||
|
});
|
||||||
|
|
||||||
|
private final ForkJoinPool forkJoinPool = new ForkJoinPool(
|
||||||
|
parallelismForPool, factory, null, false);
|
||||||
|
|
||||||
|
|
||||||
RatisPipelineProvider(NodeManager nodeManager,
|
RatisPipelineProvider(NodeManager nodeManager,
|
||||||
PipelineStateManager stateManager, Configuration conf) {
|
PipelineStateManager stateManager, Configuration conf) {
|
||||||
this.nodeManager = nodeManager;
|
this.nodeManager = nodeManager;
|
||||||
|
@ -53,6 +93,7 @@ public class RatisPipelineProvider implements PipelineProvider {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create pluggable container placement policy implementation instance.
|
* Create pluggable container placement policy implementation instance.
|
||||||
*
|
*
|
||||||
|
@ -133,7 +174,81 @@ public class RatisPipelineProvider implements PipelineProvider {
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown() {
|
||||||
|
forkJoinPool.shutdownNow();
|
||||||
|
try {
|
||||||
|
forkJoinPool.awaitTermination(60, TimeUnit.SECONDS);
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Unexpected exception occurred during shutdown of " +
|
||||||
|
"RatisPipelineProvider", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected void initializePipeline(Pipeline pipeline) throws IOException {
|
protected void initializePipeline(Pipeline pipeline) throws IOException {
|
||||||
RatisPipelineUtils.createPipeline(pipeline, conf);
|
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
||||||
|
LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
|
||||||
|
callRatisRpc(pipeline.getNodes(),
|
||||||
|
(raftClient, peer) -> {
|
||||||
|
RaftClientReply reply = raftClient.groupAdd(group, peer.getId());
|
||||||
|
if (reply == null || !reply.isSuccess()) {
|
||||||
|
String msg = "Pipeline initialization failed for pipeline:"
|
||||||
|
+ pipeline.getId() + " node:" + peer.getId();
|
||||||
|
LOG.error(msg);
|
||||||
|
throw new IOException(msg);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private void callRatisRpc(List<DatanodeDetails> datanodes,
|
||||||
|
CheckedBiConsumer< RaftClient, RaftPeer, IOException> rpc)
|
||||||
|
throws IOException {
|
||||||
|
if (datanodes.isEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final String rpcType = conf
|
||||||
|
.get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
|
||||||
|
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
|
||||||
|
final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(conf);
|
||||||
|
final List< IOException > exceptions =
|
||||||
|
Collections.synchronizedList(new ArrayList<>());
|
||||||
|
final int maxOutstandingRequests =
|
||||||
|
HddsClientUtils.getMaxOutstandingRequests(conf);
|
||||||
|
final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
|
||||||
|
SecurityConfig(conf));
|
||||||
|
final TimeDuration requestTimeout =
|
||||||
|
RatisHelper.getClientRequestTimeout(conf);
|
||||||
|
try {
|
||||||
|
forkJoinPool.submit(() -> {
|
||||||
|
datanodes.parallelStream().forEach(d -> {
|
||||||
|
final RaftPeer p = RatisHelper.toRaftPeer(d);
|
||||||
|
try (RaftClient client = RatisHelper
|
||||||
|
.newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p,
|
||||||
|
retryPolicy, maxOutstandingRequests, tlsConfig,
|
||||||
|
requestTimeout)) {
|
||||||
|
rpc.accept(client, p);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
String errMsg =
|
||||||
|
"Failed invoke Ratis rpc " + rpc + " for " + d.getUuid();
|
||||||
|
LOG.error(errMsg, ioe);
|
||||||
|
exceptions.add(new IOException(errMsg, ioe));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}).get();
|
||||||
|
} catch (ExecutionException | RejectedExecutionException ex) {
|
||||||
|
LOG.error(ex.getClass().getName() + " exception occurred during " +
|
||||||
|
"createPipeline", ex);
|
||||||
|
throw new IOException(ex.getClass().getName() + " exception occurred " +
|
||||||
|
"during createPipeline", ex);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new IOException("Interrupt exception occurred during " +
|
||||||
|
"createPipeline", ex);
|
||||||
|
}
|
||||||
|
if (!exceptions.isEmpty()) {
|
||||||
|
throw MultipleIOException.createIOException(exceptions);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,66 +17,37 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdds.scm.pipeline;
|
package org.apache.hadoop.hdds.scm.pipeline;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
||||||
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
|
||||||
import org.apache.ratis.RatisHelper;
|
import org.apache.ratis.RatisHelper;
|
||||||
import org.apache.ratis.client.RaftClient;
|
import org.apache.ratis.client.RaftClient;
|
||||||
import org.apache.ratis.grpc.GrpcTlsConfig;
|
import org.apache.ratis.grpc.GrpcTlsConfig;
|
||||||
import org.apache.ratis.protocol.RaftClientReply;
|
|
||||||
import org.apache.ratis.protocol.RaftGroup;
|
import org.apache.ratis.protocol.RaftGroup;
|
||||||
import org.apache.ratis.protocol.RaftGroupId;
|
import org.apache.ratis.protocol.RaftGroupId;
|
||||||
import org.apache.ratis.protocol.RaftPeer;
|
import org.apache.ratis.protocol.RaftPeer;
|
||||||
import org.apache.ratis.retry.RetryPolicy;
|
import org.apache.ratis.retry.RetryPolicy;
|
||||||
import org.apache.ratis.rpc.SupportedRpcType;
|
import org.apache.ratis.rpc.SupportedRpcType;
|
||||||
import org.apache.ratis.util.TimeDuration;
|
import org.apache.ratis.util.TimeDuration;
|
||||||
import org.apache.ratis.util.function.CheckedBiConsumer;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility class for Ratis pipelines. Contains methods to create and destroy
|
* Utility class for Ratis pipelines. Contains methods to create and destroy
|
||||||
* ratis pipelines.
|
* ratis pipelines.
|
||||||
*/
|
*/
|
||||||
final class RatisPipelineUtils {
|
public final class RatisPipelineUtils {
|
||||||
|
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(RatisPipelineUtils.class);
|
LoggerFactory.getLogger(RatisPipelineUtils.class);
|
||||||
|
|
||||||
private RatisPipelineUtils() {
|
private RatisPipelineUtils() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sends ratis command to create pipeline on all the datanodes.
|
|
||||||
*
|
|
||||||
* @param pipeline - Pipeline to be created
|
|
||||||
* @param ozoneConf - Ozone Confinuration
|
|
||||||
* @throws IOException if creation fails
|
|
||||||
*/
|
|
||||||
public static void createPipeline(Pipeline pipeline, Configuration ozoneConf)
|
|
||||||
throws IOException {
|
|
||||||
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
|
||||||
LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
|
|
||||||
callRatisRpc(pipeline.getNodes(), ozoneConf,
|
|
||||||
(raftClient, peer) -> {
|
|
||||||
RaftClientReply reply = raftClient.groupAdd(group, peer.getId());
|
|
||||||
if (reply == null || !reply.isSuccess()) {
|
|
||||||
String msg = "Pipeline initialization failed for pipeline:"
|
|
||||||
+ pipeline.getId() + " node:" + peer.getId();
|
|
||||||
LOG.error(msg);
|
|
||||||
throw new IOException(msg);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes pipeline from SCM. Sends ratis command to destroy pipeline on all
|
* Removes pipeline from SCM. Sends ratis command to destroy pipeline on all
|
||||||
* the datanodes.
|
* the datanodes.
|
||||||
|
@ -125,42 +96,4 @@ final class RatisPipelineUtils {
|
||||||
client
|
client
|
||||||
.groupRemove(RaftGroupId.valueOf(pipelineID.getId()), true, p.getId());
|
.groupRemove(RaftGroupId.valueOf(pipelineID.getId()), true, p.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void callRatisRpc(List<DatanodeDetails> datanodes,
|
|
||||||
Configuration ozoneConf,
|
|
||||||
CheckedBiConsumer<RaftClient, RaftPeer, IOException> rpc)
|
|
||||||
throws IOException {
|
|
||||||
if (datanodes.isEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
final String rpcType = ozoneConf
|
|
||||||
.get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
|
|
||||||
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
|
|
||||||
final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
|
|
||||||
final List<IOException> exceptions =
|
|
||||||
Collections.synchronizedList(new ArrayList<>());
|
|
||||||
final int maxOutstandingRequests =
|
|
||||||
HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
|
|
||||||
final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
|
|
||||||
SecurityConfig(ozoneConf));
|
|
||||||
final TimeDuration requestTimeout =
|
|
||||||
RatisHelper.getClientRequestTimeout(ozoneConf);
|
|
||||||
datanodes.parallelStream().forEach(d -> {
|
|
||||||
final RaftPeer p = RatisHelper.toRaftPeer(d);
|
|
||||||
try (RaftClient client = RatisHelper
|
|
||||||
.newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p,
|
|
||||||
retryPolicy, maxOutstandingRequests, tlsConfig, requestTimeout)) {
|
|
||||||
rpc.accept(client, p);
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
String errMsg =
|
|
||||||
"Failed invoke Ratis rpc " + rpc + " for " + d.getUuid();
|
|
||||||
LOG.error(errMsg, ioe);
|
|
||||||
exceptions.add(new IOException(errMsg, ioe));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (!exceptions.isEmpty()) {
|
|
||||||
throw MultipleIOException.createIOException(exceptions);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,8 @@ public class SCMPipelineManager implements PipelineManager {
|
||||||
this.lock = new ReentrantReadWriteLock();
|
this.lock = new ReentrantReadWriteLock();
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.stateManager = new PipelineStateManager(conf);
|
this.stateManager = new PipelineStateManager(conf);
|
||||||
this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, conf);
|
this.pipelineFactory = new PipelineFactory(nodeManager, stateManager,
|
||||||
|
conf);
|
||||||
// TODO: See if thread priority needs to be set for these threads
|
// TODO: See if thread priority needs to be set for these threads
|
||||||
scheduler = new Scheduler("RatisPipelineUtilsThread", false, 1);
|
scheduler = new Scheduler("RatisPipelineUtilsThread", false, 1);
|
||||||
this.backgroundPipelineCreator =
|
this.backgroundPipelineCreator =
|
||||||
|
@ -419,5 +420,7 @@ public class SCMPipelineManager implements PipelineManager {
|
||||||
if(metrics != null) {
|
if(metrics != null) {
|
||||||
metrics.unRegister();
|
metrics.unRegister();
|
||||||
}
|
}
|
||||||
|
// shutdown pipeline provider.
|
||||||
|
pipelineFactory.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,4 +72,9 @@ public class SimplePipelineProvider implements PipelineProvider {
|
||||||
.setNodes(nodes)
|
.setNodes(nodes)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown() {
|
||||||
|
// Do nothing.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,13 @@
|
||||||
package org.apache.hadoop.hdds.scm;
|
package org.apache.hadoop.hdds.scm;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdds.HddsConfigKeys;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.hdds.server.ServerUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.ExpectedException;
|
import org.junit.rules.ExpectedException;
|
||||||
|
@ -27,12 +33,15 @@ import org.junit.rules.Timeout;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
|
||||||
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -154,4 +163,67 @@ public class TestHddsServerUtils {
|
||||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link ServerUtils#getScmDbDir}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testGetScmDbDir() {
|
||||||
|
final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
|
||||||
|
final File dbDir = new File(testDir, "scmDbDir");
|
||||||
|
final File metaDir = new File(testDir, "metaDir"); // should be ignored.
|
||||||
|
final Configuration conf = new OzoneConfiguration();
|
||||||
|
conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
|
||||||
|
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
|
||||||
|
|
||||||
|
try {
|
||||||
|
assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
|
||||||
|
assertTrue(dbDir.exists()); // should have been created.
|
||||||
|
} finally {
|
||||||
|
FileUtils.deleteQuietly(dbDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
|
||||||
|
* when OZONE_SCM_DB_DIRS is undefined.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testGetScmDbDirWithFallback() {
|
||||||
|
final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
|
||||||
|
final File metaDir = new File(testDir, "metaDir");
|
||||||
|
final Configuration conf = new OzoneConfiguration();
|
||||||
|
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
|
||||||
|
try {
|
||||||
|
assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
|
||||||
|
assertTrue(metaDir.exists()); // should have been created.
|
||||||
|
} finally {
|
||||||
|
FileUtils.deleteQuietly(metaDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNoScmDbDirConfigured() {
|
||||||
|
thrown.expect(IllegalArgumentException.class);
|
||||||
|
ServerUtils.getScmDbDir(new OzoneConfiguration());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetStaleNodeInterval() {
|
||||||
|
final Configuration conf = new OzoneConfiguration();
|
||||||
|
|
||||||
|
// Reset OZONE_SCM_STALENODE_INTERVAL to 300s that
|
||||||
|
// larger than max limit value.
|
||||||
|
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 300, TimeUnit.SECONDS);
|
||||||
|
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100);
|
||||||
|
// the max limit value will be returned
|
||||||
|
assertEquals(100000, HddsServerUtil.getStaleNodeInterval(conf));
|
||||||
|
|
||||||
|
// Reset OZONE_SCM_STALENODE_INTERVAL to 10ms that
|
||||||
|
// smaller than min limit value.
|
||||||
|
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10,
|
||||||
|
TimeUnit.MILLISECONDS);
|
||||||
|
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100);
|
||||||
|
// the min limit value will be returned
|
||||||
|
assertEquals(90000, HddsServerUtil.getStaleNodeInterval(conf));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,13 @@ package org.apache.hadoop.hdds.scm.block;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.HddsConfigKeys;
|
import org.apache.hadoop.hdds.HddsConfigKeys;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
@ -132,6 +138,43 @@ public class TestBlockManager {
|
||||||
Assert.assertNotNull(block);
|
Assert.assertNotNull(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAllocateBlockInParallel() throws Exception {
|
||||||
|
eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
|
||||||
|
GenericTestUtils.waitFor(() -> {
|
||||||
|
return !blockManager.isScmInSafeMode();
|
||||||
|
}, 10, 1000 * 5);
|
||||||
|
int threadCount = 20;
|
||||||
|
List<ExecutorService> executors = new ArrayList<>(threadCount);
|
||||||
|
for (int i = 0; i < threadCount; i++) {
|
||||||
|
executors.add(Executors.newSingleThreadExecutor());
|
||||||
|
}
|
||||||
|
List<CompletableFuture<AllocatedBlock>> futureList =
|
||||||
|
new ArrayList<>(threadCount);
|
||||||
|
for (int i = 0; i < threadCount; i++) {
|
||||||
|
final CompletableFuture<AllocatedBlock> future =
|
||||||
|
new CompletableFuture<>();
|
||||||
|
CompletableFuture.supplyAsync(() -> {
|
||||||
|
try {
|
||||||
|
future.complete(blockManager
|
||||||
|
.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
|
||||||
|
new ExcludeList()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
future.completeExceptionally(e);
|
||||||
|
}
|
||||||
|
return future;
|
||||||
|
}, executors.get(i));
|
||||||
|
futureList.add(future);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
CompletableFuture
|
||||||
|
.allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
|
||||||
|
.get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
Assert.fail("testAllocateBlockInParallel failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAllocateOversizedBlock() throws Exception {
|
public void testAllocateOversizedBlock() throws Exception {
|
||||||
eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
|
eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus);
|
||||||
|
|
|
@ -43,14 +43,20 @@ import org.junit.rules.ExpectedException;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
|
@ -144,6 +150,43 @@ public class TestSCMContainerManager {
|
||||||
Assert.assertTrue(pipelineList.size() > 5);
|
Assert.assertTrue(pipelineList.size() > 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAllocateContainerInParallel() throws Exception {
|
||||||
|
int threadCount = 20;
|
||||||
|
List<ExecutorService> executors = new ArrayList<>(threadCount);
|
||||||
|
for (int i = 0; i < threadCount; i++) {
|
||||||
|
executors.add(Executors.newSingleThreadExecutor());
|
||||||
|
}
|
||||||
|
List<CompletableFuture<ContainerInfo>> futureList =
|
||||||
|
new ArrayList<>(threadCount);
|
||||||
|
for (int i = 0; i < threadCount; i++) {
|
||||||
|
final CompletableFuture<ContainerInfo> future = new CompletableFuture<>();
|
||||||
|
CompletableFuture.supplyAsync(() -> {
|
||||||
|
try {
|
||||||
|
ContainerInfo containerInfo = containerManager
|
||||||
|
.allocateContainer(xceiverClientManager.getType(),
|
||||||
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
|
|
||||||
|
Assert.assertNotNull(containerInfo);
|
||||||
|
Assert.assertNotNull(containerInfo.getPipelineID());
|
||||||
|
future.complete(containerInfo);
|
||||||
|
return containerInfo;
|
||||||
|
} catch (IOException e) {
|
||||||
|
future.completeExceptionally(e);
|
||||||
|
}
|
||||||
|
return future;
|
||||||
|
}, executors.get(i));
|
||||||
|
futureList.add(future);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
CompletableFuture
|
||||||
|
.allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
|
||||||
|
.get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
Assert.fail("testAllocateBlockInParallel failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetContainer() throws IOException {
|
public void testGetContainer() throws IOException {
|
||||||
ContainerInfo containerInfo = containerManager.allocateContainer(
|
ContainerInfo containerInfo = containerManager.allocateContainer(
|
||||||
|
|
|
@ -68,7 +68,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
|
||||||
.HEALTHY;
|
.HEALTHY;
|
||||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
|
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
|
||||||
import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
|
import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
|
||||||
import static org.hamcrest.core.StringStartsWith.startsWith;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
@ -234,37 +233,6 @@ public class TestSCMNodeManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Asserts that if user provides a value less than 5 times the heartbeat
|
|
||||||
* interval as the StaleNode Value, we throw since that is a QoS that we
|
|
||||||
* cannot maintain.
|
|
||||||
*
|
|
||||||
* @throws IOException
|
|
||||||
* @throws InterruptedException
|
|
||||||
* @throws TimeoutException
|
|
||||||
*/
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testScmSanityOfUserConfig1()
|
|
||||||
throws IOException, AuthenticationException {
|
|
||||||
OzoneConfiguration conf = getConf();
|
|
||||||
final int interval = 100;
|
|
||||||
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
|
|
||||||
MILLISECONDS);
|
|
||||||
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
|
|
||||||
|
|
||||||
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
|
|
||||||
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
|
|
||||||
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, interval, MILLISECONDS);
|
|
||||||
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
|
|
||||||
// This string is a multiple of the interval value
|
|
||||||
thrown.expectMessage(
|
|
||||||
startsWith("100 is not within min = 500 or max = 100000"));
|
|
||||||
createNodeManager(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Asserts that if Stale Interval value is more than 5 times the value of HB
|
* Asserts that if Stale Interval value is more than 5 times the value of HB
|
||||||
* processing thread it is a sane value.
|
* processing thread it is a sane value.
|
||||||
|
|
|
@ -37,4 +37,9 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
|
||||||
protected void initializePipeline(Pipeline pipeline) throws IOException {
|
protected void initializePipeline(Pipeline pipeline) throws IOException {
|
||||||
// do nothing as the datanodes do not exists
|
// do nothing as the datanodes do not exists
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown() {
|
||||||
|
// Do nothing.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -609,7 +609,13 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
boolean pathAlreadyEncoded = false;
|
boolean pathAlreadyEncoded = false;
|
||||||
try {
|
try {
|
||||||
fspathUriDecoded = URLDecoder.decode(fspathUri.getPath(), "UTF-8");
|
fspathUriDecoded = URLDecoder.decode(fspathUri.getPath(), "UTF-8");
|
||||||
|
//below condition check added as part of fixing HDFS-14323 to make
|
||||||
|
//sure pathAlreadyEncoded is not set in the case the input url does
|
||||||
|
//not have any encoded sequence already.This will help pulling data
|
||||||
|
//from 2.x hadoop cluster to 3.x using 3.x distcp client operation
|
||||||
|
if(!fspathUri.getPath().equals(fspathUriDecoded)) {
|
||||||
pathAlreadyEncoded = true;
|
pathAlreadyEncoded = true;
|
||||||
|
}
|
||||||
} catch (IllegalArgumentException ex) {
|
} catch (IllegalArgumentException ex) {
|
||||||
LOG.trace("Cannot decode URL encoded file", ex);
|
LOG.trace("Cannot decode URL encoded file", ex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,4 +170,8 @@ public final class ErasureCodingWorker {
|
||||||
stripedReconstructionPool.shutdown();
|
stripedReconstructionPool.shutdown();
|
||||||
stripedReadPool.shutdown();
|
stripedReadPool.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public float getXmitWeight() {
|
||||||
|
return xmitWeight;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,11 @@ class StripedBlockReconstructor extends StripedReconstructor
|
||||||
LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
|
LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
|
||||||
getDatanode().getMetrics().incrECFailedReconstructionTasks();
|
getDatanode().getMetrics().incrECFailedReconstructionTasks();
|
||||||
} finally {
|
} finally {
|
||||||
getDatanode().decrementXmitsInProgress(getXmits());
|
float xmitWeight = getErasureCodingWorker().getXmitWeight();
|
||||||
|
// if the xmits is smaller than 1, the xmitsSubmitted should be set to 1
|
||||||
|
// because if it set to zero, we cannot to measure the xmits submitted
|
||||||
|
int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1);
|
||||||
|
getDatanode().decrementXmitsInProgress(xmitsSubmitted);
|
||||||
final DataNodeMetrics metrics = getDatanode().getMetrics();
|
final DataNodeMetrics metrics = getDatanode().getMetrics();
|
||||||
metrics.incrECReconstructionTasks();
|
metrics.incrECReconstructionTasks();
|
||||||
metrics.incrECReconstructionBytesRead(getBytesRead());
|
metrics.incrECReconstructionBytesRead(getBytesRead());
|
||||||
|
|
|
@ -275,4 +275,8 @@ abstract class StripedReconstructor {
|
||||||
DataNode getDatanode() {
|
DataNode getDatanode() {
|
||||||
return datanode;
|
return datanode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ErasureCodingWorker getErasureCodingWorker() {
|
||||||
|
return erasureCodingWorker;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -412,7 +412,7 @@ Usage:
|
||||||
| `-getDatanodeInfo` \<datanode\_host:ipc\_port\> | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
|
| `-getDatanodeInfo` \<datanode\_host:ipc\_port\> | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
|
||||||
| `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following<br/>1. Datanodes heart beating with Namenode<br/>2. Blocks waiting to be replicated<br/>3. Blocks currently being replicated<br/>4. Blocks waiting to be deleted |
|
| `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following<br/>1. Datanodes heart beating with Namenode<br/>2. Blocks waiting to be replicated<br/>3. Blocks currently being replicated<br/>4. Blocks waiting to be deleted |
|
||||||
| `-triggerBlockReport` `[-incremental]` \<datanode\_host:ipc\_port\> | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. |
|
| `-triggerBlockReport` `[-incremental]` \<datanode\_host:ipc\_port\> | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. |
|
||||||
| `-listOpenFiles` `[-blockingDecommission]` `[-path <path>]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. |
|
| `-listOpenFiles` `[-blockingDecommission]` `[-path <path>]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. Add -blockingDecommission option if you only want to list open files that are blocking the DataNode decommissioning. |
|
||||||
| `-help` [cmd] | Displays help for the given command or all commands if none is specified. |
|
| `-help` [cmd] | Displays help for the given command or all commands if none is specified. |
|
||||||
|
|
||||||
Runs a HDFS dfsadmin client.
|
Runs a HDFS dfsadmin client.
|
||||||
|
|
|
@ -514,6 +514,8 @@ public class TestReconstructStripedFile {
|
||||||
|
|
||||||
@Test(timeout = 180000)
|
@Test(timeout = 180000)
|
||||||
public void testErasureCodingWorkerXmitsWeight() throws Exception {
|
public void testErasureCodingWorkerXmitsWeight() throws Exception {
|
||||||
|
testErasureCodingWorkerXmitsWeight(0.5f,
|
||||||
|
(int) (ecPolicy.getNumDataUnits() * 0.5f));
|
||||||
testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits());
|
testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits());
|
||||||
testErasureCodingWorkerXmitsWeight(0f, 1);
|
testErasureCodingWorkerXmitsWeight(0f, 1);
|
||||||
testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits());
|
testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits());
|
||||||
|
@ -567,6 +569,10 @@ public class TestReconstructStripedFile {
|
||||||
} finally {
|
} finally {
|
||||||
barrier.await();
|
barrier.await();
|
||||||
DataNodeFaultInjector.set(oldInjector);
|
DataNodeFaultInjector.set(oldInjector);
|
||||||
|
for (final DataNode curDn : cluster.getDataNodes()) {
|
||||||
|
GenericTestUtils.waitFor(() -> curDn.getXceiverCount() > 1, 10, 60000);
|
||||||
|
assertEquals(0, curDn.getXmitsInProgress());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,7 +192,8 @@ public class TaskHeartbeatHandler extends AbstractService {
|
||||||
(currentTime > (entry.getValue().getLastProgress() + taskTimeOut));
|
(currentTime > (entry.getValue().getLastProgress() + taskTimeOut));
|
||||||
// when container in NM not started in a long time,
|
// when container in NM not started in a long time,
|
||||||
// we think the taskAttempt is stuck
|
// we think the taskAttempt is stuck
|
||||||
boolean taskStuck = (!entry.getValue().isReported()) &&
|
boolean taskStuck = (taskStuckTimeOut > 0) &&
|
||||||
|
(!entry.getValue().isReported()) &&
|
||||||
(currentTime >
|
(currentTime >
|
||||||
(entry.getValue().getLastProgress() + taskStuckTimeOut));
|
(entry.getValue().getLastProgress() + taskStuckTimeOut));
|
||||||
|
|
||||||
|
@ -225,7 +226,7 @@ public class TaskHeartbeatHandler extends AbstractService {
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
ConcurrentMap getRunningAttempts(){
|
ConcurrentMap<TaskAttemptId, ReportTime> getRunningAttempts(){
|
||||||
return runningAttempts;
|
return runningAttempts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -968,16 +968,20 @@ public class RMContainerAllocator extends RMContainerRequestor
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont,
|
public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont,
|
||||||
TaskAttemptId attemptID) {
|
TaskAttemptId attemptId) {
|
||||||
if (cont.getExitStatus() == ContainerExitStatus.ABORTED
|
TaskAttemptEvent event;
|
||||||
|| cont.getExitStatus() == ContainerExitStatus.PREEMPTED) {
|
switch (cont.getExitStatus()) {
|
||||||
// killed by framework
|
case ContainerExitStatus.ABORTED:
|
||||||
return new TaskAttemptEvent(attemptID,
|
case ContainerExitStatus.PREEMPTED:
|
||||||
TaskAttemptEventType.TA_KILL);
|
case ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER:
|
||||||
} else {
|
// killed by YARN
|
||||||
return new TaskAttemptEvent(attemptID,
|
event = new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
event = new TaskAttemptEvent(attemptId,
|
||||||
TaskAttemptEventType.TA_CONTAINER_COMPLETED);
|
TaskAttemptEventType.TA_CONTAINER_COMPLETED);
|
||||||
}
|
}
|
||||||
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.app;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.mockito.ArgumentMatchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.never;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
||||||
|
@ -48,7 +49,7 @@ import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
public class TestTaskHeartbeatHandler {
|
public class TestTaskHeartbeatHandler {
|
||||||
|
|
||||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
@SuppressWarnings("unchecked")
|
||||||
@Test
|
@Test
|
||||||
public void testTaskTimeout() throws InterruptedException {
|
public void testTaskTimeout() throws InterruptedException {
|
||||||
EventHandler mockHandler = mock(EventHandler.class);
|
EventHandler mockHandler = mock(EventHandler.class);
|
||||||
|
@ -81,6 +82,46 @@ public class TestTaskHeartbeatHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public void testTaskTimeoutDisable() throws InterruptedException {
|
||||||
|
EventHandler mockHandler = mock(EventHandler.class);
|
||||||
|
Clock clock = SystemClock.getInstance();
|
||||||
|
TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1);
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.setLong(MRJobConfig.TASK_STUCK_TIMEOUT_MS, 0); // no timeout
|
||||||
|
conf.setInt(MRJobConfig.TASK_TIMEOUT, 0); // no timeout
|
||||||
|
// set TASK_PROGRESS_REPORT_INTERVAL to a value smaller than TASK_TIMEOUT
|
||||||
|
// so that TASK_TIMEOUT is not overridden
|
||||||
|
conf.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 0);
|
||||||
|
conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms
|
||||||
|
|
||||||
|
hb.init(conf);
|
||||||
|
hb.start();
|
||||||
|
try {
|
||||||
|
ApplicationId appId = ApplicationId.newInstance(0L, 5);
|
||||||
|
JobId jobId = MRBuilderUtils.newJobId(appId, 4);
|
||||||
|
TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP);
|
||||||
|
TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2);
|
||||||
|
hb.register(taid);
|
||||||
|
|
||||||
|
ConcurrentMap<TaskAttemptId, TaskHeartbeatHandler.ReportTime>
|
||||||
|
runningAttempts = hb.getRunningAttempts();
|
||||||
|
for (Map.Entry<TaskAttemptId, TaskHeartbeatHandler.ReportTime> entry
|
||||||
|
: runningAttempts.entrySet()) {
|
||||||
|
assertFalse(entry.getValue().isReported());
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread.sleep(100);
|
||||||
|
|
||||||
|
// Timeout is disabled, so the task should not be canceled
|
||||||
|
verify(mockHandler, never()).handle(any(Event.class));
|
||||||
|
} finally {
|
||||||
|
hb.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
@Test
|
@Test
|
||||||
public void testTaskStuck() throws InterruptedException {
|
public void testTaskStuck() throws InterruptedException {
|
||||||
|
|
|
@ -2430,6 +2430,8 @@ public class TestRMContainerAllocator {
|
||||||
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
|
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
|
||||||
ApplicationAttemptId applicationAttemptId =
|
ApplicationAttemptId applicationAttemptId =
|
||||||
ApplicationAttemptId.newInstance(applicationId, 1);
|
ApplicationAttemptId.newInstance(applicationId, 1);
|
||||||
|
|
||||||
|
// ABORTED
|
||||||
ContainerId containerId =
|
ContainerId containerId =
|
||||||
ContainerId.newContainerId(applicationAttemptId, 1);
|
ContainerId.newContainerId(applicationAttemptId, 1);
|
||||||
ContainerStatus status = ContainerStatus.newInstance(
|
ContainerStatus status = ContainerStatus.newInstance(
|
||||||
|
@ -2448,6 +2450,7 @@ public class TestRMContainerAllocator {
|
||||||
abortedStatus, attemptId);
|
abortedStatus, attemptId);
|
||||||
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
|
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
|
||||||
|
|
||||||
|
// PREEMPTED
|
||||||
ContainerId containerId2 =
|
ContainerId containerId2 =
|
||||||
ContainerId.newContainerId(applicationAttemptId, 2);
|
ContainerId.newContainerId(applicationAttemptId, 2);
|
||||||
ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
|
ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
|
||||||
|
@ -2464,6 +2467,25 @@ public class TestRMContainerAllocator {
|
||||||
TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent(
|
TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent(
|
||||||
preemptedStatus, attemptId);
|
preemptedStatus, attemptId);
|
||||||
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
|
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
|
||||||
|
|
||||||
|
// KILLED_BY_CONTAINER_SCHEDULER
|
||||||
|
ContainerId containerId3 =
|
||||||
|
ContainerId.newContainerId(applicationAttemptId, 3);
|
||||||
|
ContainerStatus status3 = ContainerStatus.newInstance(containerId3,
|
||||||
|
ContainerState.RUNNING, "", 0);
|
||||||
|
|
||||||
|
ContainerStatus killedByContainerSchedulerStatus =
|
||||||
|
ContainerStatus.newInstance(containerId3, ContainerState.RUNNING, "",
|
||||||
|
ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER);
|
||||||
|
|
||||||
|
TaskAttemptEvent event3 = allocator.createContainerFinishedEvent(status3,
|
||||||
|
attemptId);
|
||||||
|
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
|
||||||
|
event3.getType());
|
||||||
|
|
||||||
|
TaskAttemptEvent abortedEvent3 = allocator.createContainerFinishedEvent(
|
||||||
|
killedByContainerSchedulerStatus, attemptId);
|
||||||
|
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent3.getType());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -282,6 +282,7 @@
|
||||||
<description>The max timeout before receiving remote task's first heartbeat.
|
<description>The max timeout before receiving remote task's first heartbeat.
|
||||||
This parameter is in order to avoid waiting for the container
|
This parameter is in order to avoid waiting for the container
|
||||||
to start indefinitely, which made task stuck in the NEW state.
|
to start indefinitely, which made task stuck in the NEW state.
|
||||||
|
A value of 0 disables the timeout.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
|
@ -528,6 +528,23 @@ public class OzoneBucket extends WithMetadata {
|
||||||
recursive);
|
recursive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List the status for a file or a directory and its contents.
|
||||||
|
*
|
||||||
|
* @param keyName Absolute path of the entry to be listed
|
||||||
|
* @param recursive For a directory if true all the descendants of a
|
||||||
|
* particular directory are listed
|
||||||
|
* @param startKey Key from which listing needs to start. If startKey exists
|
||||||
|
* its status is included in the final list.
|
||||||
|
* @param numEntries Number of entries to list from the start key
|
||||||
|
* @return list of file status
|
||||||
|
*/
|
||||||
|
public List<OzoneFileStatus> listStatus(String keyName, boolean recursive,
|
||||||
|
String startKey, long numEntries) throws IOException {
|
||||||
|
return proxy
|
||||||
|
.listStatus(volumeName, name, keyName, recursive, startKey, numEntries);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An Iterator to iterate over {@link OzoneKey} list.
|
* An Iterator to iterate over {@link OzoneKey} list.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -604,4 +604,21 @@ public interface ClientProtocol {
|
||||||
OzoneOutputStream createFile(String volumeName, String bucketName,
|
OzoneOutputStream createFile(String volumeName, String bucketName,
|
||||||
String keyName, long size, ReplicationType type, ReplicationFactor factor,
|
String keyName, long size, ReplicationType type, ReplicationFactor factor,
|
||||||
boolean overWrite, boolean recursive) throws IOException;
|
boolean overWrite, boolean recursive) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List the status for a file or a directory and its contents.
|
||||||
|
*
|
||||||
|
* @param volumeName Volume name
|
||||||
|
* @param bucketName Bucket name
|
||||||
|
* @param keyName Absolute path of the entry to be listed
|
||||||
|
* @param recursive For a directory if true all the descendants of a
|
||||||
|
* particular directory are listed
|
||||||
|
* @param startKey Key from which listing needs to start. If startKey exists
|
||||||
|
* its status is included in the final list.
|
||||||
|
* @param numEntries Number of entries to list from the start key
|
||||||
|
* @return list of file status
|
||||||
|
*/
|
||||||
|
List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
|
||||||
|
String keyName, boolean recursive, String startKey, long numEntries)
|
||||||
|
throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class RestClient implements ClientProtocol {
|
||||||
private final URI ozoneRestUri;
|
private final URI ozoneRestUri;
|
||||||
private final CloseableHttpClient httpClient;
|
private final CloseableHttpClient httpClient;
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
private final OzoneAcl.OzoneACLRights userRights;
|
// private final OzoneAcl.OzoneACLRights userRights;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates RestClient instance with the given configuration.
|
* Creates RestClient instance with the given configuration.
|
||||||
|
@ -161,8 +161,8 @@ public class RestClient implements ClientProtocol {
|
||||||
.build())
|
.build())
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
|
// this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
|
||||||
OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
|
// OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
|
||||||
|
|
||||||
// TODO: Add new configuration parameter to configure RestServerSelector.
|
// TODO: Add new configuration parameter to configure RestServerSelector.
|
||||||
RestServerSelector defaultSelector = new DefaultRestServerSelector();
|
RestServerSelector defaultSelector = new DefaultRestServerSelector();
|
||||||
|
@ -1113,4 +1113,12 @@ public class RestClient implements ClientProtocol {
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
"Ozone REST protocol does not " + "support this operation.");
|
"Ozone REST protocol does not " + "support this operation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
|
||||||
|
String keyName, boolean recursive, String startKey, long numEntries)
|
||||||
|
throws IOException {
|
||||||
|
throw new UnsupportedOperationException(
|
||||||
|
"Ozone REST protocol does not " + "support this operation.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,6 @@ import org.apache.hadoop.ozone.om.protocolPB
|
||||||
.OzoneManagerProtocolClientSideTranslatorPB;
|
.OzoneManagerProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.ozone.OzoneAcl;
|
import org.apache.hadoop.ozone.OzoneAcl;
|
||||||
import org.apache.hadoop.ozone.om.OMConfigKeys;
|
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
.OzoneManagerProtocolProtos.ServicePort;
|
.OzoneManagerProtocolProtos.ServicePort;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
@ -85,6 +84,9 @@ import org.apache.hadoop.hdds.scm.protocolPB
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB
|
import org.apache.hadoop.hdds.scm.protocolPB
|
||||||
.StorageContainerLocationProtocolPB;
|
.StorageContainerLocationProtocolPB;
|
||||||
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
|
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
|
||||||
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -121,8 +123,8 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||||
private final int bytesPerChecksum;
|
private final int bytesPerChecksum;
|
||||||
private boolean verifyChecksum;
|
private boolean verifyChecksum;
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
private final OzoneAcl.OzoneACLRights userRights;
|
private final ACLType userRights;
|
||||||
private final OzoneAcl.OzoneACLRights groupRights;
|
private final ACLType groupRights;
|
||||||
private final long streamBufferFlushSize;
|
private final long streamBufferFlushSize;
|
||||||
private final long streamBufferMaxSize;
|
private final long streamBufferMaxSize;
|
||||||
private final long blockSize;
|
private final long blockSize;
|
||||||
|
@ -141,10 +143,11 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||||
Preconditions.checkNotNull(conf);
|
Preconditions.checkNotNull(conf);
|
||||||
this.conf = new OzoneConfiguration(conf);
|
this.conf = new OzoneConfiguration(conf);
|
||||||
this.ugi = UserGroupInformation.getCurrentUser();
|
this.ugi = UserGroupInformation.getCurrentUser();
|
||||||
this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
|
// Get default acl rights for user and group.
|
||||||
OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
|
OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class);
|
||||||
this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS,
|
this.userRights = aclConfig.getUserDefaultRights();
|
||||||
OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
|
this.groupRights = aclConfig.getGroupDefaultRights();
|
||||||
|
|
||||||
this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
|
this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
|
||||||
this.conf, clientId.toString(), ugi);
|
this.conf, clientId.toString(), ugi);
|
||||||
long scmVersion =
|
long scmVersion =
|
||||||
|
@ -256,13 +259,13 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||||
OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes();
|
OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes();
|
||||||
List<OzoneAcl> listOfAcls = new ArrayList<>();
|
List<OzoneAcl> listOfAcls = new ArrayList<>();
|
||||||
//User ACL
|
//User ACL
|
||||||
listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER,
|
listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
|
||||||
owner, userRights));
|
owner, userRights));
|
||||||
//Group ACLs of the User
|
//Group ACLs of the User
|
||||||
List<String> userGroups = Arrays.asList(UserGroupInformation
|
List<String> userGroups = Arrays.asList(UserGroupInformation
|
||||||
.createRemoteUser(owner).getGroupNames());
|
.createRemoteUser(owner).getGroupNames());
|
||||||
userGroups.stream().forEach((group) -> listOfAcls.add(
|
userGroups.stream().forEach((group) -> listOfAcls.add(
|
||||||
new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights)));
|
new OzoneAcl(ACLIdentityType.GROUP, group, groupRights)));
|
||||||
//ACLs from VolumeArgs
|
//ACLs from VolumeArgs
|
||||||
if(volArgs.getAcls() != null) {
|
if(volArgs.getAcls() != null) {
|
||||||
listOfAcls.addAll(volArgs.getAcls());
|
listOfAcls.addAll(volArgs.getAcls());
|
||||||
|
@ -403,13 +406,13 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||||
|
|
||||||
List<OzoneAcl> listOfAcls = new ArrayList<>();
|
List<OzoneAcl> listOfAcls = new ArrayList<>();
|
||||||
//User ACL
|
//User ACL
|
||||||
listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER,
|
listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
|
||||||
ugi.getUserName(), userRights));
|
ugi.getUserName(), userRights));
|
||||||
//Group ACLs of the User
|
//Group ACLs of the User
|
||||||
List<String> userGroups = Arrays.asList(UserGroupInformation
|
List<String> userGroups = Arrays.asList(UserGroupInformation
|
||||||
.createRemoteUser(ugi.getUserName()).getGroupNames());
|
.createRemoteUser(ugi.getUserName()).getGroupNames());
|
||||||
userGroups.stream().forEach((group) -> listOfAcls.add(
|
userGroups.stream().forEach((group) -> listOfAcls.add(
|
||||||
new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights)));
|
new OzoneAcl(ACLIdentityType.GROUP, group, groupRights)));
|
||||||
//ACLs from BucketArgs
|
//ACLs from BucketArgs
|
||||||
if(bucketArgs.getAcls() != null) {
|
if(bucketArgs.getAcls() != null) {
|
||||||
listOfAcls.addAll(bucketArgs.getAcls());
|
listOfAcls.addAll(bucketArgs.getAcls());
|
||||||
|
@ -993,6 +996,19 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||||
factor);
|
factor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
|
||||||
|
String keyName, boolean recursive, String startKey, long numEntries)
|
||||||
|
throws IOException {
|
||||||
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
|
||||||
|
.setVolumeName(volumeName)
|
||||||
|
.setBucketName(bucketName)
|
||||||
|
.setKeyName(keyName)
|
||||||
|
.build();
|
||||||
|
return ozoneManagerClient
|
||||||
|
.listStatus(keyArgs, recursive, startKey, numEntries);
|
||||||
|
}
|
||||||
|
|
||||||
private OzoneInputStream createInputStream(OmKeyInfo keyInfo,
|
private OzoneInputStream createInputStream(OmKeyInfo keyInfo,
|
||||||
String requestId) throws IOException {
|
String requestId) throws IOException {
|
||||||
LengthInputStream lengthInputStream = KeyInputStream
|
LengthInputStream lengthInputStream = KeyInputStream
|
||||||
|
|
|
@ -189,6 +189,7 @@ public final class OmUtils {
|
||||||
case ListMultiPartUploadParts:
|
case ListMultiPartUploadParts:
|
||||||
case GetFileStatus:
|
case GetFileStatus:
|
||||||
case LookupFile:
|
case LookupFile:
|
||||||
|
case ListStatus:
|
||||||
return true;
|
return true;
|
||||||
case CreateVolume:
|
case CreateVolume:
|
||||||
case SetVolumeProperty:
|
case SetVolumeProperty:
|
||||||
|
|
|
@ -19,6 +19,11 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone;
|
package org.apache.hadoop.ozone;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -32,9 +37,9 @@ import java.util.Objects;
|
||||||
* </ul>
|
* </ul>
|
||||||
*/
|
*/
|
||||||
public class OzoneAcl {
|
public class OzoneAcl {
|
||||||
private OzoneACLType type;
|
private ACLIdentityType type;
|
||||||
private String name;
|
private String name;
|
||||||
private OzoneACLRights rights;
|
private List<ACLType> rights;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor for OzoneAcl.
|
* Constructor for OzoneAcl.
|
||||||
|
@ -47,16 +52,37 @@ public class OzoneAcl {
|
||||||
*
|
*
|
||||||
* @param type - Type
|
* @param type - Type
|
||||||
* @param name - Name of user
|
* @param name - Name of user
|
||||||
* @param rights - Rights
|
* @param acl - Rights
|
||||||
*/
|
*/
|
||||||
public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
|
public OzoneAcl(ACLIdentityType type, String name, ACLType acl) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.rights = rights;
|
this.rights = new ArrayList<>();
|
||||||
|
this.rights.add(acl);
|
||||||
this.type = type;
|
this.type = type;
|
||||||
if (type == OzoneACLType.WORLD && name.length() != 0) {
|
if (type == ACLIdentityType.WORLD && name.length() != 0) {
|
||||||
throw new IllegalArgumentException("Unexpected name part in world type");
|
throw new IllegalArgumentException("Unexpected name part in world type");
|
||||||
}
|
}
|
||||||
if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
|
if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP))
|
||||||
|
&& (name.length() == 0)) {
|
||||||
|
throw new IllegalArgumentException("User or group name is required");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor for OzoneAcl.
|
||||||
|
*
|
||||||
|
* @param type - Type
|
||||||
|
* @param name - Name of user
|
||||||
|
* @param acls - Rights
|
||||||
|
*/
|
||||||
|
public OzoneAcl(ACLIdentityType type, String name, List<ACLType> acls) {
|
||||||
|
this.name = name;
|
||||||
|
this.rights = acls;
|
||||||
|
this.type = type;
|
||||||
|
if (type == ACLIdentityType.WORLD && name.length() != 0) {
|
||||||
|
throw new IllegalArgumentException("Unexpected name part in world type");
|
||||||
|
}
|
||||||
|
if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP))
|
||||||
&& (name.length() == 0)) {
|
&& (name.length() == 0)) {
|
||||||
throw new IllegalArgumentException("User or group name is required");
|
throw new IllegalArgumentException("User or group name is required");
|
||||||
}
|
}
|
||||||
|
@ -78,17 +104,20 @@ public class OzoneAcl {
|
||||||
throw new IllegalArgumentException("ACLs are not in expected format");
|
throw new IllegalArgumentException("ACLs are not in expected format");
|
||||||
}
|
}
|
||||||
|
|
||||||
OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
|
ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase());
|
||||||
OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
|
List<ACLType> acls = new ArrayList<>();
|
||||||
|
for (char ch : parts[2].toCharArray()) {
|
||||||
|
acls.add(ACLType.getACLRight(String.valueOf(ch)));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO : Support sanitation of these user names by calling into
|
// TODO : Support sanitation of these user names by calling into
|
||||||
// userAuth Interface.
|
// userAuth Interface.
|
||||||
return new OzoneAcl(aclType, parts[1], rights);
|
return new OzoneAcl(aclType, parts[1], acls);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
|
return type + ":" + name + ":" + ACLType.getACLString(rights);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -120,7 +149,7 @@ public class OzoneAcl {
|
||||||
*
|
*
|
||||||
* @return - Rights
|
* @return - Rights
|
||||||
*/
|
*/
|
||||||
public OzoneACLRights getRights() {
|
public List<ACLType> getRights() {
|
||||||
return rights;
|
return rights;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +158,7 @@ public class OzoneAcl {
|
||||||
*
|
*
|
||||||
* @return type
|
* @return type
|
||||||
*/
|
*/
|
||||||
public OzoneACLType getType() {
|
public ACLIdentityType getType() {
|
||||||
return type;
|
return type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,9 +179,7 @@ public class OzoneAcl {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
OzoneAcl otherAcl = (OzoneAcl) obj;
|
OzoneAcl otherAcl = (OzoneAcl) obj;
|
||||||
return otherAcl.getName().equals(this.getName()) &&
|
return otherAcl.toString().equals(this.toString());
|
||||||
otherAcl.getRights() == this.getRights() &&
|
|
||||||
otherAcl.getType() == this.getType();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -177,57 +204,4 @@ public class OzoneAcl {
|
||||||
value = val;
|
value = val;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ACL rights.
|
|
||||||
*/
|
|
||||||
public enum OzoneACLRights {
|
|
||||||
READ, WRITE, READ_WRITE;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the ACL rights based on passed in String.
|
|
||||||
*
|
|
||||||
* @param type ACL right string
|
|
||||||
*
|
|
||||||
* @return OzoneACLRights
|
|
||||||
*/
|
|
||||||
public static OzoneACLRights getACLRight(String type) {
|
|
||||||
if (type == null || type.isEmpty()) {
|
|
||||||
throw new IllegalArgumentException("ACL right cannot be empty");
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
case OzoneConsts.OZONE_ACL_READ:
|
|
||||||
return OzoneACLRights.READ;
|
|
||||||
case OzoneConsts.OZONE_ACL_WRITE:
|
|
||||||
return OzoneACLRights.WRITE;
|
|
||||||
case OzoneConsts.OZONE_ACL_READ_WRITE:
|
|
||||||
case OzoneConsts.OZONE_ACL_WRITE_READ:
|
|
||||||
return OzoneACLRights.READ_WRITE;
|
|
||||||
default:
|
|
||||||
throw new IllegalArgumentException("ACL right is not recognized");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns String representation of ACL rights.
|
|
||||||
* @param acl OzoneACLRights
|
|
||||||
* @return String representation of acl
|
|
||||||
*/
|
|
||||||
public static String getACLRightsString(OzoneACLRights acl) {
|
|
||||||
switch(acl) {
|
|
||||||
case READ:
|
|
||||||
return OzoneConsts.OZONE_ACL_READ;
|
|
||||||
case WRITE:
|
|
||||||
return OzoneConsts.OZONE_ACL_WRITE;
|
|
||||||
case READ_WRITE:
|
|
||||||
return OzoneConsts.OZONE_ACL_READ_WRITE;
|
|
||||||
default:
|
|
||||||
throw new IllegalArgumentException("ACL right is not recognized");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
|
@ -58,7 +58,8 @@ public enum OMAction implements AuditAction {
|
||||||
GET_FILE_STATUS,
|
GET_FILE_STATUS,
|
||||||
CREATE_DIRECTORY,
|
CREATE_DIRECTORY,
|
||||||
CREATE_FILE,
|
CREATE_FILE,
|
||||||
LOOKUP_FILE;
|
LOOKUP_FILE,
|
||||||
|
LIST_STATUS;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getAction() {
|
public String getAction() {
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om;
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.OzoneAcl;
|
|
||||||
import org.apache.ratis.util.TimeDuration;
|
import org.apache.ratis.util.TimeDuration;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -78,17 +77,6 @@ public final class OMConfigKeys {
|
||||||
"ozone.om.user.max.volume";
|
"ozone.om.user.max.volume";
|
||||||
public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024;
|
public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024;
|
||||||
|
|
||||||
// OM Default user/group permissions
|
|
||||||
public static final String OZONE_OM_USER_RIGHTS =
|
|
||||||
"ozone.om.user.rights";
|
|
||||||
public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT =
|
|
||||||
OzoneAcl.OzoneACLRights.READ_WRITE;
|
|
||||||
|
|
||||||
public static final String OZONE_OM_GROUP_RIGHTS =
|
|
||||||
"ozone.om.group.rights";
|
|
||||||
public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT =
|
|
||||||
OzoneAcl.OzoneACLRights.READ_WRITE;
|
|
||||||
|
|
||||||
public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
|
public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
|
||||||
"ozone.key.deleting.limit.per.task";
|
"ozone.key.deleting.limit.per.task";
|
||||||
public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
|
public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
|
||||||
|
|
|
@ -37,7 +37,7 @@ import java.util.HashMap;
|
||||||
@SuppressWarnings("ProtocolBufferOrdinal")
|
@SuppressWarnings("ProtocolBufferOrdinal")
|
||||||
public class OmOzoneAclMap {
|
public class OmOzoneAclMap {
|
||||||
// per Acl Type user:rights map
|
// per Acl Type user:rights map
|
||||||
private ArrayList<Map<String, OzoneAclRights>> aclMaps;
|
private ArrayList<Map<String, List<OzoneAclRights>>> aclMaps;
|
||||||
|
|
||||||
OmOzoneAclMap() {
|
OmOzoneAclMap() {
|
||||||
aclMaps = new ArrayList<>();
|
aclMaps = new ArrayList<>();
|
||||||
|
@ -46,51 +46,75 @@ public class OmOzoneAclMap {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
|
private Map<String, List<OzoneAclRights>> getMap(OzoneAclType type) {
|
||||||
return aclMaps.get(type.ordinal());
|
return aclMaps.get(type.ordinal());
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a given acl type and user, get the stored acl
|
// For a given acl type and user, get the stored acl
|
||||||
private OzoneAclRights getAcl(OzoneAclType type, String user) {
|
private List<OzoneAclRights> getAcl(OzoneAclType type, String user) {
|
||||||
return getMap(type).get(user);
|
return getMap(type).get(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a new acl to the map
|
// Add a new acl to the map
|
||||||
public void addAcl(OzoneAclInfo acl) {
|
public void addAcl(OzoneAclInfo acl) {
|
||||||
getMap(acl.getType()).put(acl.getName(), acl.getRights());
|
getMap(acl.getType()).put(acl.getName(), acl.getRightsList());
|
||||||
}
|
}
|
||||||
|
|
||||||
// for a given acl, check if the user has access rights
|
// for a given acl, check if the user has access rights
|
||||||
public boolean hasAccess(OzoneAclInfo acl) {
|
public boolean hasAccess(OzoneAclInfo acl) {
|
||||||
OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
|
if (acl == null) {
|
||||||
if (storedRights != null) {
|
return false;
|
||||||
switch (acl.getRights()) {
|
}
|
||||||
case READ:
|
|
||||||
return (storedRights == OzoneAclRights.READ)
|
List<OzoneAclRights> storedRights = getAcl(acl.getType(), acl.getName());
|
||||||
|| (storedRights == OzoneAclRights.READ_WRITE);
|
if(storedRights == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (OzoneAclRights right : storedRights) {
|
||||||
|
switch (right) {
|
||||||
|
case CREATE:
|
||||||
|
return (right == OzoneAclRights.CREATE)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
|
case LIST:
|
||||||
|
return (right == OzoneAclRights.LIST)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
case WRITE:
|
case WRITE:
|
||||||
return (storedRights == OzoneAclRights.WRITE)
|
return (right == OzoneAclRights.WRITE)
|
||||||
|| (storedRights == OzoneAclRights.READ_WRITE);
|
|| (right == OzoneAclRights.ALL);
|
||||||
case READ_WRITE:
|
case READ:
|
||||||
return (storedRights == OzoneAclRights.READ_WRITE);
|
return (right == OzoneAclRights.READ)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
|
case DELETE:
|
||||||
|
return (right == OzoneAclRights.DELETE)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
|
case READ_ACL:
|
||||||
|
return (right == OzoneAclRights.READ_ACL)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
|
case WRITE_ACL:
|
||||||
|
return (right == OzoneAclRights.WRITE_ACL)
|
||||||
|
|| (right == OzoneAclRights.ALL);
|
||||||
|
case ALL:
|
||||||
|
return (right == OzoneAclRights.ALL);
|
||||||
|
case NONE:
|
||||||
|
return !(right == OzoneAclRights.NONE);
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert this map to OzoneAclInfo Protobuf List
|
// Convert this map to OzoneAclInfo Protobuf List
|
||||||
public List<OzoneAclInfo> ozoneAclGetProtobuf() {
|
public List<OzoneAclInfo> ozoneAclGetProtobuf() {
|
||||||
List<OzoneAclInfo> aclList = new LinkedList<>();
|
List<OzoneAclInfo> aclList = new LinkedList<>();
|
||||||
for (OzoneAclType type: OzoneAclType.values()) {
|
for (OzoneAclType type: OzoneAclType.values()) {
|
||||||
for (Map.Entry<String, OzoneAclRights> entry :
|
for (Map.Entry<String, List<OzoneAclRights>> entry :
|
||||||
aclMaps.get(type.ordinal()).entrySet()) {
|
aclMaps.get(type.ordinal()).entrySet()) {
|
||||||
OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
|
OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
|
||||||
.setName(entry.getKey())
|
.setName(entry.getKey())
|
||||||
.setType(type)
|
.setType(type)
|
||||||
.setRights(entry.getValue())
|
.addAllRights(entry.getValue())
|
||||||
.build();
|
.build();
|
||||||
aclList.add(aclInfo);
|
aclList.add(aclInfo);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.om.helpers;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class for OzoneFileSystem.
|
||||||
|
*/
|
||||||
|
public final class OzoneFSUtils {
|
||||||
|
|
||||||
|
private OzoneFSUtils() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns string representation of path after removing the leading slash.
|
||||||
|
*/
|
||||||
|
public static String pathToKey(Path path) {
|
||||||
|
return path.toString().substring(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns string representation of the input path parent. The function adds
|
||||||
|
* a trailing slash if it does not exist and returns an empty string if the
|
||||||
|
* parent is root.
|
||||||
|
*/
|
||||||
|
public static String getParent(String keyName) {
|
||||||
|
java.nio.file.Path parentDir = Paths.get(keyName).getParent();
|
||||||
|
if (parentDir == null) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
return addTrailingSlashIfNeeded(parentDir.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns immediate child of given ancestor in a particular
|
||||||
|
* descendant. For example if ancestor is /a/b and descendant is /a/b/c/d/e
|
||||||
|
* the function should return /a/b/c/. If the descendant itself is the
|
||||||
|
* immediate child then it is returned as is without adding a trailing slash.
|
||||||
|
* This is done to distinguish files from a directory as in ozone files do
|
||||||
|
* not carry a trailing slash.
|
||||||
|
*/
|
||||||
|
public static String getImmediateChild(String descendant, String ancestor) {
|
||||||
|
ancestor =
|
||||||
|
!ancestor.isEmpty() ? addTrailingSlashIfNeeded(ancestor) : ancestor;
|
||||||
|
if (!descendant.startsWith(ancestor)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
java.nio.file.Path descendantPath = Paths.get(descendant);
|
||||||
|
java.nio.file.Path ancestorPath = Paths.get(ancestor);
|
||||||
|
int ancestorPathNameCount =
|
||||||
|
ancestor.isEmpty() ? 0 : ancestorPath.getNameCount();
|
||||||
|
if (descendantPath.getNameCount() - ancestorPathNameCount > 1) {
|
||||||
|
return addTrailingSlashIfNeeded(
|
||||||
|
ancestor + descendantPath.getName(ancestorPathNameCount));
|
||||||
|
}
|
||||||
|
return descendant;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String addTrailingSlashIfNeeded(String key) {
|
||||||
|
if (!key.endsWith(OZONE_URI_DELIMITER)) {
|
||||||
|
return key + OZONE_URI_DELIMITER;
|
||||||
|
} else {
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isFile(String keyName) {
|
||||||
|
return !keyName.endsWith(OZONE_URI_DELIMITER);
|
||||||
|
}
|
||||||
|
}
|
|
@ -18,11 +18,11 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.om.helpers;
|
package org.apache.hadoop.ozone.om.helpers;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FSProtos.FileStatusProto;
|
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.protocolPB.PBHelper;
|
import org.apache.hadoop.fs.protocolPB.PBHelper;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -53,13 +53,14 @@ public class OzoneFileStatus extends FileStatus {
|
||||||
super(0, true, 0, 0, 0, getPath(keyName));
|
super(0, true, 0, 0, 0, getPath(keyName));
|
||||||
}
|
}
|
||||||
|
|
||||||
public FileStatusProto getProtobuf() throws IOException {
|
public OzoneFileStatusProto getProtobuf() throws IOException {
|
||||||
return PBHelper.convert(this);
|
return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this))
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static OzoneFileStatus getFromProtobuf(FileStatusProto response)
|
public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return new OzoneFileStatus(PBHelper.convert(response));
|
return new OzoneFileStatus(PBHelper.convert(response.getStatus()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getPath(String keyName) {
|
public static Path getPath(String keyName) {
|
||||||
|
|
|
@ -450,5 +450,19 @@ public interface OzoneManagerProtocol
|
||||||
* invalid arguments
|
* invalid arguments
|
||||||
*/
|
*/
|
||||||
OmKeyInfo lookupFile(OmKeyArgs keyArgs) throws IOException;
|
OmKeyInfo lookupFile(OmKeyArgs keyArgs) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List the status for a file or a directory and its contents.
|
||||||
|
*
|
||||||
|
* @param keyArgs Key args
|
||||||
|
* @param recursive For a directory if true all the descendants of a
|
||||||
|
* particular directory are listed
|
||||||
|
* @param startKey Key from which listing needs to start. If startKey exists
|
||||||
|
* its status is included in the final list.
|
||||||
|
* @param numEntries Number of entries to list from the start key
|
||||||
|
* @return list of file status
|
||||||
|
*/
|
||||||
|
List<OzoneFileStatus> listStatus(OmKeyArgs keyArgs, boolean recursive,
|
||||||
|
String startKey, long numEntries) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,13 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
|
||||||
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
|
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
|
||||||
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
|
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
|
||||||
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
|
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse;
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest;
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest;
|
||||||
|
@ -1281,14 +1287,13 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
|
||||||
.setBucketName(args.getBucketName())
|
.setBucketName(args.getBucketName())
|
||||||
.setKeyName(args.getKeyName())
|
.setKeyName(args.getKeyName())
|
||||||
.build();
|
.build();
|
||||||
OzoneManagerProtocolProtos.LookupFileRequest lookupFileRequest =
|
LookupFileRequest lookupFileRequest = LookupFileRequest.newBuilder()
|
||||||
OzoneManagerProtocolProtos.LookupFileRequest.newBuilder()
|
|
||||||
.setKeyArgs(keyArgs)
|
.setKeyArgs(keyArgs)
|
||||||
.build();
|
.build();
|
||||||
OMRequest omRequest = createOMRequest(Type.LookupFile)
|
OMRequest omRequest = createOMRequest(Type.LookupFile)
|
||||||
.setLookupFileRequest(lookupFileRequest)
|
.setLookupFileRequest(lookupFileRequest)
|
||||||
.build();
|
.build();
|
||||||
OzoneManagerProtocolProtos.LookupFileResponse resp =
|
LookupFileResponse resp =
|
||||||
handleError(submitRequest(omRequest)).getLookupFileResponse();
|
handleError(submitRequest(omRequest)).getLookupFileResponse();
|
||||||
return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
|
return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
|
||||||
}
|
}
|
||||||
|
@ -1304,8 +1309,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
|
||||||
.setType(args.getType())
|
.setType(args.getType())
|
||||||
.setFactor(args.getFactor())
|
.setFactor(args.getFactor())
|
||||||
.build();
|
.build();
|
||||||
OzoneManagerProtocolProtos.CreateFileRequest createFileRequest =
|
CreateFileRequest createFileRequest = CreateFileRequest.newBuilder()
|
||||||
OzoneManagerProtocolProtos.CreateFileRequest.newBuilder()
|
|
||||||
.setKeyArgs(keyArgs)
|
.setKeyArgs(keyArgs)
|
||||||
.setIsOverwrite(overWrite)
|
.setIsOverwrite(overWrite)
|
||||||
.setIsRecursive(recursive)
|
.setIsRecursive(recursive)
|
||||||
|
@ -1313,9 +1317,38 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
|
||||||
OMRequest omRequest = createOMRequest(Type.CreateFile)
|
OMRequest omRequest = createOMRequest(Type.CreateFile)
|
||||||
.setCreateFileRequest(createFileRequest)
|
.setCreateFileRequest(createFileRequest)
|
||||||
.build();
|
.build();
|
||||||
OzoneManagerProtocolProtos.CreateFileResponse resp =
|
CreateFileResponse resp =
|
||||||
handleError(submitRequest(omRequest)).getCreateFileResponse();
|
handleError(submitRequest(omRequest)).getCreateFileResponse();
|
||||||
return new OpenKeySession(resp.getID(),
|
return new OpenKeySession(resp.getID(),
|
||||||
OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
|
OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
|
||||||
|
String startKey, long numEntries) throws IOException {
|
||||||
|
KeyArgs keyArgs = KeyArgs.newBuilder()
|
||||||
|
.setVolumeName(args.getVolumeName())
|
||||||
|
.setBucketName(args.getBucketName())
|
||||||
|
.setKeyName(args.getKeyName())
|
||||||
|
.build();
|
||||||
|
ListStatusRequest listStatusRequest =
|
||||||
|
ListStatusRequest.newBuilder()
|
||||||
|
.setKeyArgs(keyArgs)
|
||||||
|
.setRecursive(recursive)
|
||||||
|
.setStartKey(startKey)
|
||||||
|
.setNumEntries(numEntries)
|
||||||
|
.build();
|
||||||
|
OMRequest omRequest = createOMRequest(Type.ListStatus)
|
||||||
|
.setListStatusRequest(listStatusRequest)
|
||||||
|
.build();
|
||||||
|
ListStatusResponse listStatusResponse =
|
||||||
|
handleError(submitRequest(omRequest)).getListStatusResponse();
|
||||||
|
List<OzoneFileStatus> statusList =
|
||||||
|
new ArrayList<>(listStatusResponse.getStatusesCount());
|
||||||
|
for (OzoneFileStatusProto fileStatus : listStatusResponse
|
||||||
|
.getStatusesList()) {
|
||||||
|
statusList.add(OzoneFileStatus.getFromProtobuf(fileStatus));
|
||||||
|
}
|
||||||
|
return statusList;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,9 +41,15 @@ import org.apache.hadoop.ozone.protocol.proto
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
|
.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
|
||||||
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
|
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utilities for converting protobuf classes.
|
* Utilities for converting protobuf classes.
|
||||||
*/
|
*/
|
||||||
|
@ -59,7 +65,7 @@ public final class OMPBHelper {
|
||||||
*/
|
*/
|
||||||
public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
|
public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
|
||||||
OzoneAclInfo.OzoneAclType aclType;
|
OzoneAclInfo.OzoneAclType aclType;
|
||||||
switch(acl.getType()) {
|
switch (acl.getType()) {
|
||||||
case USER:
|
case USER:
|
||||||
aclType = OzoneAclType.USER;
|
aclType = OzoneAclType.USER;
|
||||||
break;
|
break;
|
||||||
|
@ -69,27 +75,24 @@ public final class OMPBHelper {
|
||||||
case WORLD:
|
case WORLD:
|
||||||
aclType = OzoneAclType.WORLD;
|
aclType = OzoneAclType.WORLD;
|
||||||
break;
|
break;
|
||||||
|
case ANONYMOUS:
|
||||||
|
aclType = OzoneAclType.ANONYMOUS;
|
||||||
|
break;
|
||||||
|
case CLIENT_IP:
|
||||||
|
aclType = OzoneAclType.CLIENT_IP;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IllegalArgumentException("ACL type is not recognized");
|
throw new IllegalArgumentException("ACL type is not recognized");
|
||||||
}
|
}
|
||||||
OzoneAclInfo.OzoneAclRights aclRights;
|
List<OzoneAclRights> aclRights = new ArrayList<>();
|
||||||
switch(acl.getRights()) {
|
|
||||||
case READ:
|
for (ACLType right : acl.getRights()) {
|
||||||
aclRights = OzoneAclRights.READ;
|
aclRights.add(OzoneAclRights.valueOf(right.name()));
|
||||||
break;
|
|
||||||
case WRITE:
|
|
||||||
aclRights = OzoneAclRights.WRITE;
|
|
||||||
break;
|
|
||||||
case READ_WRITE:
|
|
||||||
aclRights = OzoneAclRights.READ_WRITE;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw new IllegalArgumentException("ACL right is not recognized");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return OzoneAclInfo.newBuilder().setType(aclType)
|
return OzoneAclInfo.newBuilder().setType(aclType)
|
||||||
.setName(acl.getName())
|
.setName(acl.getName())
|
||||||
.setRights(aclRights)
|
.addAllRights(aclRights)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,35 +101,31 @@ public final class OMPBHelper {
|
||||||
* @return OzoneAcl
|
* @return OzoneAcl
|
||||||
*/
|
*/
|
||||||
public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
|
public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
|
||||||
OzoneAcl.OzoneACLType aclType;
|
ACLIdentityType aclType;
|
||||||
switch(aclInfo.getType()) {
|
switch (aclInfo.getType()) {
|
||||||
case USER:
|
case USER:
|
||||||
aclType = OzoneAcl.OzoneACLType.USER;
|
aclType = ACLIdentityType.USER;
|
||||||
break;
|
break;
|
||||||
case GROUP:
|
case GROUP:
|
||||||
aclType = OzoneAcl.OzoneACLType.GROUP;
|
aclType = ACLIdentityType.GROUP;
|
||||||
break;
|
break;
|
||||||
case WORLD:
|
case WORLD:
|
||||||
aclType = OzoneAcl.OzoneACLType.WORLD;
|
aclType = ACLIdentityType.WORLD;
|
||||||
|
break;
|
||||||
|
case ANONYMOUS:
|
||||||
|
aclType = ACLIdentityType.ANONYMOUS;
|
||||||
|
break;
|
||||||
|
case CLIENT_IP:
|
||||||
|
aclType = ACLIdentityType.CLIENT_IP;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IllegalArgumentException("ACL type is not recognized");
|
throw new IllegalArgumentException("ACL type is not recognized");
|
||||||
}
|
}
|
||||||
OzoneAcl.OzoneACLRights aclRights;
|
|
||||||
switch(aclInfo.getRights()) {
|
|
||||||
case READ:
|
|
||||||
aclRights = OzoneAcl.OzoneACLRights.READ;
|
|
||||||
break;
|
|
||||||
case WRITE:
|
|
||||||
aclRights = OzoneAcl.OzoneACLRights.WRITE;
|
|
||||||
break;
|
|
||||||
case READ_WRITE:
|
|
||||||
aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw new IllegalArgumentException("ACL right is not recognized");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
List<IAccessAuthorizer.ACLType> aclRights = new ArrayList<>();
|
||||||
|
for (OzoneAclRights acl : aclInfo.getRightsList()) {
|
||||||
|
aclRights.add(ACLType.valueOf(acl.name()));
|
||||||
|
}
|
||||||
return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
|
return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Public API for Ozone ACLs. Security providers providing support for Ozone
|
* Public API for Ozone ACLs. Security providers providing support for Ozone
|
||||||
* ACLs should implement this.
|
* ACLs should implement this.
|
||||||
|
@ -84,7 +86,8 @@ public interface IAccessAuthorizer {
|
||||||
case OzoneConsts.OZONE_ACL_NONE:
|
case OzoneConsts.OZONE_ACL_NONE:
|
||||||
return ACLType.NONE;
|
return ACLType.NONE;
|
||||||
default:
|
default:
|
||||||
throw new IllegalArgumentException("ACL right is not recognized");
|
throw new IllegalArgumentException(type + " ACL right is not " +
|
||||||
|
"recognized");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -92,10 +95,18 @@ public interface IAccessAuthorizer {
|
||||||
/**
|
/**
|
||||||
* Returns String representation of ACL rights.
|
* Returns String representation of ACL rights.
|
||||||
*
|
*
|
||||||
* @param acl ACLType
|
* @param acls ACLType
|
||||||
* @return String representation of acl
|
* @return String representation of acl
|
||||||
*/
|
*/
|
||||||
public static String getACLRightsString(ACLType acl) {
|
public static String getACLString(List<ACLType> acls) {
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
acls.forEach(acl -> {
|
||||||
|
sb.append(getAclString(acl));
|
||||||
|
});
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String getAclString(ACLType acl) {
|
||||||
switch (acl) {
|
switch (acl) {
|
||||||
case READ:
|
case READ:
|
||||||
return OzoneConsts.OZONE_ACL_READ;
|
return OzoneConsts.OZONE_ACL_READ;
|
||||||
|
@ -129,7 +140,8 @@ public interface IAccessAuthorizer {
|
||||||
USER(OzoneConsts.OZONE_ACL_USER_TYPE),
|
USER(OzoneConsts.OZONE_ACL_USER_TYPE),
|
||||||
GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
|
GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
|
||||||
CLIENT_IP(OzoneConsts.OZONE_ACL_IP_TYPE),
|
CLIENT_IP(OzoneConsts.OZONE_ACL_IP_TYPE),
|
||||||
WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
|
WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE),
|
||||||
|
ANONYMOUS(OzoneConsts.OZONE_ACL_ANONYMOUS_TYPE);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.security.acl;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdds.conf.Config;
|
||||||
|
import org.apache.hadoop.hdds.conf.ConfigGroup;
|
||||||
|
import org.apache.hadoop.hdds.conf.ConfigTag;
|
||||||
|
import org.apache.hadoop.hdds.conf.ConfigType;
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ozone ACL config pojo.
|
||||||
|
* */
|
||||||
|
@ConfigGroup(prefix = "ozone.om")
|
||||||
|
public class OzoneAclConfig {
|
||||||
|
// OM Default user/group permissions
|
||||||
|
private ACLType userDefaultRights = ACLType.ALL;
|
||||||
|
private ACLType groupDefaultRights = ACLType.ALL;
|
||||||
|
|
||||||
|
@Config(key = "user.rights",
|
||||||
|
defaultValue = "ALL",
|
||||||
|
type = ConfigType.STRING,
|
||||||
|
tags = {ConfigTag.OM, ConfigTag.SECURITY},
|
||||||
|
description = "Default user permissions set for an object in " +
|
||||||
|
"OzoneManager."
|
||||||
|
)
|
||||||
|
public void setUserDefaultRights(String userRights) {
|
||||||
|
this.userDefaultRights = ACLType.valueOf(userRights);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Config(key = "group.rights",
|
||||||
|
defaultValue = "ALL",
|
||||||
|
type = ConfigType.STRING,
|
||||||
|
tags = {ConfigTag.OM, ConfigTag.SECURITY},
|
||||||
|
description = "Default group permissions set for an object in " +
|
||||||
|
"OzoneManager."
|
||||||
|
)
|
||||||
|
public void setGroupDefaultRights(String groupRights) {
|
||||||
|
this.groupDefaultRights = ACLType.valueOf(groupRights);
|
||||||
|
}
|
||||||
|
|
||||||
|
public ACLType getUserDefaultRights() {
|
||||||
|
return userDefaultRights;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ACLType getGroupDefaultRights() {
|
||||||
|
return groupDefaultRights;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -85,6 +85,7 @@ enum Type {
|
||||||
CreateDirectory = 71;
|
CreateDirectory = 71;
|
||||||
CreateFile = 72;
|
CreateFile = 72;
|
||||||
LookupFile = 73;
|
LookupFile = 73;
|
||||||
|
ListStatus = 74;
|
||||||
}
|
}
|
||||||
|
|
||||||
message OMRequest {
|
message OMRequest {
|
||||||
|
@ -141,6 +142,7 @@ message OMRequest {
|
||||||
optional CreateDirectoryRequest createDirectoryRequest = 71;
|
optional CreateDirectoryRequest createDirectoryRequest = 71;
|
||||||
optional CreateFileRequest createFileRequest = 72;
|
optional CreateFileRequest createFileRequest = 72;
|
||||||
optional LookupFileRequest lookupFileRequest = 73;
|
optional LookupFileRequest lookupFileRequest = 73;
|
||||||
|
optional ListStatusRequest listStatusRequest = 74;
|
||||||
}
|
}
|
||||||
|
|
||||||
message OMResponse {
|
message OMResponse {
|
||||||
|
@ -200,6 +202,7 @@ message OMResponse {
|
||||||
optional CreateDirectoryResponse createDirectoryResponse = 71;
|
optional CreateDirectoryResponse createDirectoryResponse = 71;
|
||||||
optional CreateFileResponse createFileResponse = 72;
|
optional CreateFileResponse createFileResponse = 72;
|
||||||
optional LookupFileResponse lookupFileResponse = 73;
|
optional LookupFileResponse lookupFileResponse = 73;
|
||||||
|
optional ListStatusResponse listStatusResponse = 74;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Status {
|
enum Status {
|
||||||
|
@ -448,15 +451,24 @@ message OzoneAclInfo {
|
||||||
USER = 1;
|
USER = 1;
|
||||||
GROUP = 2;
|
GROUP = 2;
|
||||||
WORLD = 3;
|
WORLD = 3;
|
||||||
|
ANONYMOUS = 4;
|
||||||
|
CLIENT_IP = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum OzoneAclRights {
|
enum OzoneAclRights {
|
||||||
READ = 1;
|
CREATE = 1;
|
||||||
WRITE = 2;
|
LIST = 2;
|
||||||
READ_WRITE = 3;
|
DELETE = 3;
|
||||||
|
READ = 4;
|
||||||
|
WRITE = 5;
|
||||||
|
READ_ACL = 6;
|
||||||
|
WRITE_ACL = 7;
|
||||||
|
ALL = 8;
|
||||||
|
NONE = 9;
|
||||||
}
|
}
|
||||||
required OzoneAclType type = 1;
|
required OzoneAclType type = 1;
|
||||||
required string name = 2;
|
required string name = 2;
|
||||||
required OzoneAclRights rights = 3;
|
repeated OzoneAclRights rights = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateBucketRequest {
|
message CreateBucketRequest {
|
||||||
|
@ -561,12 +573,16 @@ message KeyInfo {
|
||||||
optional FileEncryptionInfoProto fileEncryptionInfo = 12;
|
optional FileEncryptionInfoProto fileEncryptionInfo = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message OzoneFileStatusProto {
|
||||||
|
required hadoop.fs.FileStatusProto status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message GetFileStatusRequest {
|
message GetFileStatusRequest {
|
||||||
required KeyArgs keyArgs = 1;
|
required KeyArgs keyArgs = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetFileStatusResponse {
|
message GetFileStatusResponse {
|
||||||
required hadoop.fs.FileStatusProto status = 1;
|
required OzoneFileStatusProto status = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateDirectoryRequest {
|
message CreateDirectoryRequest {
|
||||||
|
@ -599,6 +615,17 @@ message LookupFileResponse {
|
||||||
optional KeyInfo keyInfo = 1;
|
optional KeyInfo keyInfo = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ListStatusRequest {
|
||||||
|
required KeyArgs keyArgs = 1;
|
||||||
|
required bool recursive = 2;
|
||||||
|
required string startKey = 3;
|
||||||
|
required uint64 numEntries = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListStatusResponse {
|
||||||
|
repeated OzoneFileStatusProto statuses = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message CreateKeyRequest {
|
message CreateKeyRequest {
|
||||||
required KeyArgs keyArgs = 1;
|
required KeyArgs keyArgs = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,16 +18,20 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone;
|
package org.apache.hadoop.ozone;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class is to test acl stoarge and retreival in ozone store.
|
* This class is to test acl storage and retrieval in ozone store.
|
||||||
*/
|
*/
|
||||||
public class TestOzoneAcls {
|
public class TestOzoneAcls {
|
||||||
|
|
||||||
|
@ -39,8 +43,8 @@ public class TestOzoneAcls {
|
||||||
testMatrix.put("user:bilbo:r", Boolean.TRUE);
|
testMatrix.put("user:bilbo:r", Boolean.TRUE);
|
||||||
testMatrix.put("user:bilbo:w", Boolean.TRUE);
|
testMatrix.put("user:bilbo:w", Boolean.TRUE);
|
||||||
testMatrix.put("user:bilbo:rw", Boolean.TRUE);
|
testMatrix.put("user:bilbo:rw", Boolean.TRUE);
|
||||||
testMatrix.put("user:bilbo:wr", Boolean.TRUE);
|
testMatrix.put("user:bilbo:a", Boolean.TRUE);
|
||||||
testMatrix.put(" user:bilbo:wr ", Boolean.TRUE);
|
testMatrix.put(" user:bilbo:a ", Boolean.TRUE);
|
||||||
|
|
||||||
|
|
||||||
// ACLs makes no judgement on the quality of
|
// ACLs makes no judgement on the quality of
|
||||||
|
@ -53,7 +57,16 @@ public class TestOzoneAcls {
|
||||||
testMatrix.put("", Boolean.FALSE);
|
testMatrix.put("", Boolean.FALSE);
|
||||||
testMatrix.put(null, Boolean.FALSE);
|
testMatrix.put(null, Boolean.FALSE);
|
||||||
testMatrix.put(" user:bilbo:", Boolean.FALSE);
|
testMatrix.put(" user:bilbo:", Boolean.FALSE);
|
||||||
testMatrix.put(" user:bilbo:rx", Boolean.FALSE);
|
testMatrix.put(" user:bilbo:rx", Boolean.TRUE);
|
||||||
|
testMatrix.put(" user:bilbo:rwdlncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" group:bilbo:rwdlncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" world::rwdlncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" user:bilbo:rncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" group:bilbo:ncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" world::ncxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" user:bilbo:rwcxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" group:bilbo:rwcxy", Boolean.TRUE);
|
||||||
|
testMatrix.put(" world::rwcxy", Boolean.TRUE);
|
||||||
testMatrix.put(" user:bilbo:mk", Boolean.FALSE);
|
testMatrix.put(" user:bilbo:mk", Boolean.FALSE);
|
||||||
testMatrix.put(" user::rw", Boolean.FALSE);
|
testMatrix.put(" user::rw", Boolean.FALSE);
|
||||||
testMatrix.put("user11:bilbo:rw", Boolean.FALSE);
|
testMatrix.put("user11:bilbo:rw", Boolean.FALSE);
|
||||||
|
@ -62,12 +75,12 @@ public class TestOzoneAcls {
|
||||||
testMatrix.put(" group:hobbit:r", Boolean.TRUE);
|
testMatrix.put(" group:hobbit:r", Boolean.TRUE);
|
||||||
testMatrix.put(" group:hobbit:w", Boolean.TRUE);
|
testMatrix.put(" group:hobbit:w", Boolean.TRUE);
|
||||||
testMatrix.put(" group:hobbit:rw", Boolean.TRUE);
|
testMatrix.put(" group:hobbit:rw", Boolean.TRUE);
|
||||||
testMatrix.put(" group:hobbit:wr", Boolean.TRUE);
|
testMatrix.put(" group:hobbit:a", Boolean.TRUE);
|
||||||
testMatrix.put(" group:*:rw", Boolean.TRUE);
|
testMatrix.put(" group:*:rw", Boolean.TRUE);
|
||||||
testMatrix.put(" group:~!:rw", Boolean.TRUE);
|
testMatrix.put(" group:~!:rw", Boolean.TRUE);
|
||||||
|
|
||||||
testMatrix.put(" group:hobbit:", Boolean.FALSE);
|
testMatrix.put(" group:hobbit:", Boolean.FALSE);
|
||||||
testMatrix.put(" group:hobbit:rx", Boolean.FALSE);
|
testMatrix.put(" group:hobbit:rx", Boolean.TRUE);
|
||||||
testMatrix.put(" group:hobbit:mk", Boolean.FALSE);
|
testMatrix.put(" group:hobbit:mk", Boolean.FALSE);
|
||||||
testMatrix.put(" group::", Boolean.FALSE);
|
testMatrix.put(" group::", Boolean.FALSE);
|
||||||
testMatrix.put(" group::rw", Boolean.FALSE);
|
testMatrix.put(" group::rw", Boolean.FALSE);
|
||||||
|
@ -77,14 +90,14 @@ public class TestOzoneAcls {
|
||||||
testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE);
|
testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE);
|
||||||
testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE);
|
testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE);
|
||||||
testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE);
|
testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE);
|
||||||
testMatrix.put("JUNK group:hobbit:wr", Boolean.FALSE);
|
testMatrix.put("JUNK group:hobbit:a", Boolean.FALSE);
|
||||||
testMatrix.put("JUNK group:*:rw", Boolean.FALSE);
|
testMatrix.put("JUNK group:*:rw", Boolean.FALSE);
|
||||||
testMatrix.put("JUNK group:~!:rw", Boolean.FALSE);
|
testMatrix.put("JUNK group:~!:rw", Boolean.FALSE);
|
||||||
|
|
||||||
testMatrix.put(" world::r", Boolean.TRUE);
|
testMatrix.put(" world::r", Boolean.TRUE);
|
||||||
testMatrix.put(" world::w", Boolean.TRUE);
|
testMatrix.put(" world::w", Boolean.TRUE);
|
||||||
testMatrix.put(" world::rw", Boolean.TRUE);
|
testMatrix.put(" world::rw", Boolean.TRUE);
|
||||||
testMatrix.put(" world::wr", Boolean.TRUE);
|
testMatrix.put(" world::a", Boolean.TRUE);
|
||||||
|
|
||||||
testMatrix.put(" world:bilbo:w", Boolean.FALSE);
|
testMatrix.put(" world:bilbo:w", Boolean.FALSE);
|
||||||
testMatrix.put(" world:bilbo:rw", Boolean.FALSE);
|
testMatrix.put(" world:bilbo:rw", Boolean.FALSE);
|
||||||
|
@ -97,7 +110,7 @@ public class TestOzoneAcls {
|
||||||
try {
|
try {
|
||||||
OzoneAcl.parseAcl(key);
|
OzoneAcl.parseAcl(key);
|
||||||
// should never get here since parseAcl will throw
|
// should never get here since parseAcl will throw
|
||||||
fail("An exception was expected but did not happen.");
|
fail("An exception was expected but did not happen. Key: " + key);
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
|
@ -109,33 +122,51 @@ public class TestOzoneAcls {
|
||||||
public void testAclValues() {
|
public void testAclValues() {
|
||||||
OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
|
OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
|
||||||
assertEquals(acl.getName(), "bilbo");
|
assertEquals(acl.getName(), "bilbo");
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
|
assertEquals(Arrays.asList(READ, WRITE), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
|
assertEquals(ACLIdentityType.USER, acl.getType());
|
||||||
|
|
||||||
acl = OzoneAcl.parseAcl("user:bilbo:wr");
|
acl = OzoneAcl.parseAcl("user:bilbo:a");
|
||||||
assertEquals("bilbo", acl.getName());
|
assertEquals("bilbo", acl.getName());
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
|
assertEquals(Arrays.asList(ALL), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
|
assertEquals(ACLIdentityType.USER, acl.getType());
|
||||||
|
|
||||||
acl = OzoneAcl.parseAcl("user:bilbo:r");
|
acl = OzoneAcl.parseAcl("user:bilbo:r");
|
||||||
assertEquals("bilbo", acl.getName());
|
assertEquals("bilbo", acl.getName());
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.READ, acl.getRights());
|
assertEquals(Arrays.asList(READ), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
|
assertEquals(ACLIdentityType.USER, acl.getType());
|
||||||
|
|
||||||
acl = OzoneAcl.parseAcl("user:bilbo:w");
|
acl = OzoneAcl.parseAcl("user:bilbo:w");
|
||||||
assertEquals("bilbo", acl.getName());
|
assertEquals("bilbo", acl.getName());
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.WRITE, acl.getRights());
|
assertEquals(Arrays.asList(WRITE), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
|
assertEquals(ACLIdentityType.USER, acl.getType());
|
||||||
|
|
||||||
acl = OzoneAcl.parseAcl("group:hobbit:wr");
|
acl = OzoneAcl.parseAcl("group:hobbit:a");
|
||||||
assertEquals(acl.getName(), "hobbit");
|
assertEquals(acl.getName(), "hobbit");
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
|
assertEquals(Arrays.asList(ALL), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.GROUP, acl.getType());
|
assertEquals(ACLIdentityType.GROUP, acl.getType());
|
||||||
|
|
||||||
acl = OzoneAcl.parseAcl("world::wr");
|
acl = OzoneAcl.parseAcl("world::a");
|
||||||
assertEquals(acl.getName(), "");
|
assertEquals(acl.getName(), "");
|
||||||
assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
|
assertEquals(Arrays.asList(ALL), acl.getRights());
|
||||||
assertEquals(OzoneAcl.OzoneACLType.WORLD, acl.getType());
|
assertEquals(ACLIdentityType.WORLD, acl.getType());
|
||||||
|
|
||||||
|
acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy");
|
||||||
|
assertEquals(acl.getName(), "bilbo");
|
||||||
|
assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE,
|
||||||
|
READ_ACL, WRITE_ACL), acl.getRights());
|
||||||
|
assertEquals(ACLIdentityType.USER, acl.getType());
|
||||||
|
|
||||||
|
acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy");
|
||||||
|
assertEquals(acl.getName(), "hadoop");
|
||||||
|
assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE,
|
||||||
|
READ_ACL, WRITE_ACL), acl.getRights());
|
||||||
|
assertEquals(ACLIdentityType.GROUP, acl.getType());
|
||||||
|
|
||||||
|
acl = OzoneAcl.parseAcl("world::rwdlncxy");
|
||||||
|
assertEquals(acl.getName(), "");
|
||||||
|
assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE,
|
||||||
|
READ_ACL, WRITE_ACL), acl.getRights());
|
||||||
|
assertEquals(ACLIdentityType.WORLD, acl.getType());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
SRC_DIR="$SCRIPT_DIR/runConfigurations"
|
||||||
|
DEST_DIR="$SCRIPT_DIR/../../../.idea/runConfigurations/"
|
||||||
|
mkdir -p "$DEST_DIR"
|
||||||
|
#shellcheck disable=SC2010
|
||||||
|
ls -1 "$SRC_DIR" | grep -v ozone-site.xml | xargs -n1 -I FILE cp "$SRC_DIR/FILE" "$DEST_DIR"
|
|
@ -0,0 +1,18 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# log4j configuration used during build and unit tests
|
||||||
|
log4j.rootLogger=INFO,stdout
|
||||||
|
log4j.threshold=ALL
|
||||||
|
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
|
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
|
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||||
|
log4j.logger.io.jagertraecing=DEBUG
|
|
@ -0,0 +1,66 @@
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>hdds.profiler.endpoint.enabled</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.block.client.address</name>
|
||||||
|
<value>localhost</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.enabled</name>
|
||||||
|
<value>True</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.datanode.id</name>
|
||||||
|
<value>/tmp/datanode.id</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.client.address</name>
|
||||||
|
<value>localhost</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.metadata.dirs</name>
|
||||||
|
<value>/tmp/metadata</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.names</name>
|
||||||
|
<value>localhost</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.om.address</name>
|
||||||
|
<value>localhost</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.enabled</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.container.size</name>
|
||||||
|
<value>10MB</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>ozone.scm.block.size</name>
|
||||||
|
<value>1MB</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hdds.datanode.storage.utilization.critical.threshold</name>
|
||||||
|
<value>0.99</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
|
@ -0,0 +1,33 @@
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="Datanode" type="Application" factoryName="Application">
|
||||||
|
<option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.HddsDatanodeService" />
|
||||||
|
<module name="hadoop-ozone-datanode" />
|
||||||
|
<option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
|
||||||
|
<option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
|
||||||
|
<extension name="coverage">
|
||||||
|
<pattern>
|
||||||
|
<option name="PATTERN" value="org.apache.hadoop.ozone.*" />
|
||||||
|
<option name="ENABLED" value="true" />
|
||||||
|
</pattern>
|
||||||
|
</extension>
|
||||||
|
<method v="2">
|
||||||
|
<option name="Make" enabled="true" />
|
||||||
|
</method>
|
||||||
|
</configuration>
|
||||||
|
</component>
|
|
@ -0,0 +1,33 @@
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="FreonStandalone" type="Application" factoryName="Application">
|
||||||
|
<option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.freon.Freon" />
|
||||||
|
<module name="hadoop-ozone-tools" />
|
||||||
|
<option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml rk" />
|
||||||
|
<option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
|
||||||
|
<extension name="coverage">
|
||||||
|
<pattern>
|
||||||
|
<option name="PATTERN" value="org.apache.hadoop.ozone.*" />
|
||||||
|
<option name="ENABLED" value="true" />
|
||||||
|
</pattern>
|
||||||
|
</extension>
|
||||||
|
<method v="2">
|
||||||
|
<option name="Make" enabled="true" />
|
||||||
|
</method>
|
||||||
|
</configuration>
|
||||||
|
</component>
|
|
@ -0,0 +1,33 @@
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="OzoneManager" type="Application" factoryName="Application">
|
||||||
|
<option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.om.OzoneManager" />
|
||||||
|
<module name="hadoop-ozone-ozone-manager" />
|
||||||
|
<option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml" />
|
||||||
|
<option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
|
||||||
|
<extension name="coverage">
|
||||||
|
<pattern>
|
||||||
|
<option name="PATTERN" value="org.apache.hadoop.ozone.*" />
|
||||||
|
<option name="ENABLED" value="true" />
|
||||||
|
</pattern>
|
||||||
|
</extension>
|
||||||
|
<method v="2">
|
||||||
|
<option name="Make" enabled="true" />
|
||||||
|
</method>
|
||||||
|
</configuration>
|
||||||
|
</component>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue