HDDS-1232. Recon Container DB service definition. Contributed by Aravindan Vijayan.

This commit is contained in:
Yiqun Lin 2019-03-08 16:59:41 +08:00
parent 373705fcea
commit fb851c9481
22 changed files with 785 additions and 24 deletions

View File

@ -61,4 +61,9 @@ public class LevelDBStoreIterator implements MetaStoreIterator<KeyValue> {
public void seekToLast() {
levelDBIterator.seekToLast();
}
@Override
public void prefixSeek(byte[] prefix) {
levelDBIterator.seek(prefix);
}
}

View File

@ -36,4 +36,9 @@ public interface MetaStoreIterator<T> extends Iterator<T> {
*/
void seekToLast();
/**
* seek with prefix.
*/
void prefixSeek(byte[] prefix);
}

View File

@ -63,4 +63,9 @@ public class RocksDBStoreIterator implements MetaStoreIterator<KeyValue> {
rocksDBIterator.seekToLast();
}
@Override
public void prefixSeek(byte[] prefix) {
rocksDBIterator.seek(prefix);
}
}

View File

@ -2144,4 +2144,98 @@
milliseconds.
</description>
</property>
<property>
<name>ozone.recon.http.enabled</name>
<value>true</value>
<tag>RECON, MANAGEMENT</tag>
<description>
Property to enable or disable Recon web user interface.
</description>
</property>
<property>
<name>ozone.recon.http-address</name>
<value>0.0.0.0:9888</value>
<tag>RECON, MANAGEMENT</tag>
<description>
The address and the base port where the Recon web UI will listen on.
If the port is 0, then the server will start on a free port. However, it
is best to specify a well-known port, so it is easy to connect and see
the Recon management UI.
</description>
</property>
<property>
<name>ozone.recon.http-bind-host</name>
<value>0.0.0.0</value>
<tag>RECON, MANAGEMENT</tag>
<description>
The actual address the Recon server will bind to. If this optional
the address is set, it overrides only the hostname portion of
ozone.recon.http-address.
</description>
</property>
<property>
<name>ozone.recon.https-bind-host</name>
<value>0.0.0.0</value>
<tag>RECON, MANAGEMENT, SECURITY</tag>
<description>
The actual address the Recon web server will bind to using HTTPS.
If this optional address is set, it overrides only the hostname portion of
ozone.recon.https-address.
</description>
</property>
<property>
<name>ozone.recon.https-address</name>
<value>0.0.0.0:9889</value>
<tag>RECON, MANAGEMENT, SECURITY</tag>
<description>
The address and the base port where the Recon web UI will listen
on using HTTPS. If the port is 0 then the server will start on a free
port.
</description>
</property>
<property>
<name>ozone.recon.keytab.file</name>
<value/>
<tag>RECON, SECURITY</tag>
<description>
The keytab file for Kerberos authentication in Recon.
</description>
</property>
<property>
<name>ozone.recon.authentication.kerberos.principal</name>
<value/>
<tag>RECON</tag>
<description>The server principal used by Ozone Recon server. This is
typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal
begins with the prefix HTTP/ by convention.
</description>
</property>
<property>
<name>ozone.recon.container.db.cache.size.mb</name>
<value>128</value>
<tag>RECON, PERFORMANCE</tag>
<description>
The size of Recon DB cache in MB that used for caching files.
This value is set to an abnormally low value in the default configuration.
That is to make unit testing easy. Generally, this value should be set to
something like 16GB or more, if you intend to use Recon at scale.
A large value for this key allows a proportionally larger amount of Recon
container DB to be cached in memory. This makes Recon Container-Key
operations faster.
</description>
</property>
<property>
<name>ozone.recon.db.dirs</name>
<value/>
<tag>OZONE, RECON, STORAGE, PERFORMANCE</tag>
<description>
Directory where the Recon Server stores its metadata. This should
be specified as a single directory. If the directory does not
exist then the Recon will attempt to create it.
If undefined, then the Recon will log a warning and fallback to
ozone.metadata.dirs.
</description>
</property>
</configuration>

View File

@ -163,6 +163,58 @@ public class TestMetadataStore {
}
@Test
public void testIteratorPrefixSeek() throws Exception {
Configuration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
File dbDir = GenericTestUtils.getRandomizedTestDir();
MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
.setConf(conf)
.setCreateIfMissing(true)
.setDbFile(dbDir)
.build();
for (int i = 0; i < 5; i++) {
dbStore.put(getBytes("a" + i), getBytes("a-value" + i));
}
for (int i = 0; i < 5; i++) {
dbStore.put(getBytes("b" + i), getBytes("b-value" + i));
}
for (int i = 0; i < 5; i++) {
dbStore.put(getBytes("c" + i), getBytes("c-value" + i));
}
for (int i = 5; i < 10; i++) {
dbStore.put(getBytes("b" + i), getBytes("b-value" + i));
}
for (int i = 5; i < 10; i++) {
dbStore.put(getBytes("a" + i), getBytes("a-value" + i));
}
MetaStoreIterator<KeyValue> metaStoreIterator = dbStore.iterator();
metaStoreIterator.prefixSeek(getBytes("b"));
int i = 0;
while (metaStoreIterator.hasNext()) {
KeyValue val = metaStoreIterator.next();
String key = getString(val.getKey());
if (key.startsWith("b")) {
assertEquals("b-value" + i, getString(val.getValue()));
} else {
break;
}
i++;
}
assertTrue(i == 10);
dbStore.close();
dbStore.destroy();
FileUtils.deleteDirectory(dbDir);
}
@Test
public void testMetaStoreConfigDifferentFromType() throws IOException {

View File

@ -125,13 +125,29 @@ public final class ServerUtils {
* @return
*/
public static File getScmDbDir(Configuration conf) {
final Collection<String> metadirs = conf.getTrimmedStringCollection(
ScmConfigKeys.OZONE_SCM_DB_DIRS);
File metadataDir = getDirWithFallBackToOzoneMetadata(conf, ScmConfigKeys
.OZONE_SCM_DB_DIRS, "SCM");
if (metadataDir != null) {
return metadataDir;
}
LOG.warn("{} is not configured. We recommend adding this setting. " +
"Falling back to {} instead.",
ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS);
return getOzoneMetaDirPath(conf);
}
public static File getDirWithFallBackToOzoneMetadata(Configuration conf,
String key,
String componentName) {
final Collection<String> metadirs = conf.getTrimmedStringCollection(key);
if (metadirs.size() > 1) {
throw new IllegalArgumentException(
"Bad config setting " + ScmConfigKeys.OZONE_SCM_DB_DIRS +
". SCM does not support multiple metadata dirs currently");
"Bad config setting " + key +
". " + componentName +
" does not support multiple metadata dirs currently");
}
if (metadirs.size() == 1) {
@ -143,11 +159,7 @@ public final class ServerUtils {
}
return dbDirPath;
}
LOG.warn("{} is not configured. We recommend adding this setting. " +
"Falling back to {} instead.",
ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS);
return getOzoneMetaDirPath(conf);
return null;
}
/**

View File

@ -103,6 +103,13 @@
<classifier>classpath</classifier>
<destFileName>hadoop-ozone-datanode.classpath</destFileName>
</artifactItem>
<artifactItem>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-recon</artifactId>
<version>${ozone.version}</version>
<classifier>classpath</classifier>
<destFileName>hadoop-ozone-recon.classpath</destFileName>
</artifactItem>
</artifactItems>
</configuration>
</execution>

View File

@ -52,6 +52,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-s3gateway</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-recon</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-client</artifactId>

View File

@ -21,6 +21,7 @@ import org.apache.hadoop.conf.TestConfigurationFieldsBase;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys;
/**
@ -34,6 +35,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
configurationClasses =
new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
OMConfigKeys.class, HddsConfigKeys.class,
ReconServerConfigKeys.class,
S3GatewayConfigKeys.class};
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;

View File

@ -22,7 +22,7 @@
</parent>
<name>Apache Hadoop Ozone Recon</name>
<modelVersion>4.0.0</modelVersion>
<artifactId>ozone-recon</artifactId>
<artifactId>hadoop-ozone-recon</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -50,5 +50,16 @@
<artifactId>guice-assistedinject</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<version>1.10.19</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,34 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
/**
* Recon Server constants file.
*/
public final class ReconConstants {
private ReconConstants() {
// Never Constructed
}
public static final String RECON_CONTAINER_DB = "recon-" +
CONTAINER_DB_SUFFIX;
}

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.ozone.recon;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.recon.spi.ReconContainerDBProvider;
import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl;
import org.apache.hadoop.utils.MetadataStore;
import com.google.inject.AbstractModule;
import com.google.inject.Singleton;
@ -30,6 +34,9 @@ public class ReconControllerModule extends AbstractModule {
protected void configure() {
bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class);
bind(ReconHttpServer.class).in(Singleton.class);
bind(MetadataStore.class).toProvider(ReconContainerDBProvider.class);
bind(ContainerDBServiceProvider.class)
.to(ContainerDBServiceProviderImpl.class);
}

View File

@ -37,52 +37,52 @@ public class ReconHttpServer extends BaseHttpServer {
@Override
protected String getHttpAddressKey() {
return ReconServerConfiguration.OZONE_RECON_HTTP_ADDRESS_KEY;
return ReconServerConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY;
}
@Override
protected String getHttpsAddressKey() {
return ReconServerConfiguration.OZONE_RECON_HTTPS_ADDRESS_KEY;
return ReconServerConfigKeys.OZONE_RECON_HTTPS_ADDRESS_KEY;
}
@Override
protected String getHttpBindHostKey() {
return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_HOST_KEY;
return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_KEY;
}
@Override
protected String getHttpsBindHostKey() {
return ReconServerConfiguration.OZONE_RECON_HTTPS_BIND_HOST_KEY;
return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_HOST_KEY;
}
@Override
protected String getBindHostDefault() {
return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_HOST_DEFAULT;
return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_DEFAULT;
}
@Override
protected int getHttpBindPortDefault() {
return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_PORT_DEFAULT;
return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_PORT_DEFAULT;
}
@Override
protected int getHttpsBindPortDefault() {
return ReconServerConfiguration.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT;
return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT;
}
@Override
protected String getKeytabFile() {
return ReconServerConfiguration.OZONE_RECON_KEYTAB_FILE;
return ReconServerConfigKeys.OZONE_RECON_KEYTAB_FILE;
}
@Override
protected String getSpnegoPrincipal() {
return ReconServerConfiguration
return ReconServerConfigKeys
.OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
}
@Override
protected String getEnabledKey() {
return ReconServerConfiguration.OZONE_RECON_HTTP_ENABLED_KEY;
return ReconServerConfigKeys.OZONE_RECON_HTTP_ENABLED_KEY;
}
}

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public final class ReconServerConfiguration {
public final class ReconServerConfigKeys {
public static final String OZONE_RECON_HTTP_ENABLED_KEY =
"ozone.recon.http.enabled";
@ -45,12 +45,16 @@ public final class ReconServerConfiguration {
public static final int OZONE_RECON_HTTPS_BIND_PORT_DEFAULT = 9889;
public static final String OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
"ozone.recon.authentication.kerberos.principal";
public static final String OZONE_RECON_DOMAIN_NAME =
"ozone.recon.domain.name";
public static final String OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB =
"ozone.recon.container.db.cache.size.mb";
public static final int OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT = 128;
public static final String OZONE_RECON_DB_DIRS = "ozone.recon.db.dirs";
/**
* Private constructor for utility class.
*/
private ReconServerConfiguration() {
private ReconServerConfigKeys() {
}
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.api.types;
/**
* Class to encapsulate the Key information needed for the Recon container DB.
* Currently, it is containerId and key prefix.
*/
public class ContainerKeyPrefix {
private long containerId;
private String keyPrefix;
public ContainerKeyPrefix(long containerId, String keyPrefix) {
this.containerId = containerId;
this.keyPrefix = keyPrefix;
}
public long getContainerId() {
return containerId;
}
public void setContainerId(long containerId) {
this.containerId = containerId;
}
public String getKeyPrefix() {
return keyPrefix;
}
public void setKeyPrefix(String keyPrefix) {
this.keyPrefix = keyPrefix;
}
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.spi;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
/**
* The Recon Container DB Service interface.
*/
@InterfaceStability.Unstable
public interface ContainerDBServiceProvider {
/**
* Store the container to Key prefix mapping into the Recon Container DB.
*
* @param containerKeyPrefix the containerId, key-prefix tuple.
* @param count Count of Keys with that prefix.
*/
void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix,
Integer count) throws IOException;
/**
* Get the stored key prefix count for the given containerId, key prefix.
*
* @param containerKeyPrefix the containerId, key-prefix tuple.
* @return count of keys with that prefix.
*/
Integer getCountForForContainerKeyPrefix(
ContainerKeyPrefix containerKeyPrefix) throws IOException;
/**
* Get the stored key prefixes for the given containerId.
*
* @param containerId the given containerId.
* @return Map of Key prefix -> count.
*/
Map<String, Integer> getKeyPrefixesForContainer(long containerId);
}

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.spi;
import static org.apache.hadoop.ozone.recon.ReconConstants.
RECON_CONTAINER_DB;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.
OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.
OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.
OZONE_RECON_DB_DIRS;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.utils.MetadataStore;
import org.apache.hadoop.utils.MetadataStoreBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.Inject;
import com.google.inject.Provider;
/**
* Provider for the Recon container DB (Metadata store).
*/
public class ReconContainerDBProvider implements
Provider<MetadataStore> {
@VisibleForTesting
private static final Logger LOG =
LoggerFactory.getLogger(ReconContainerDBProvider.class);
@Inject
private OzoneConfiguration configuration;
@Override
public MetadataStore get() {
File metaDir = ServerUtils.getDirWithFallBackToOzoneMetadata(configuration,
OZONE_RECON_DB_DIRS, "Recon");
File containerDBPath = new File(metaDir, RECON_CONTAINER_DB);
int cacheSize = configuration.getInt(OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB,
OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT);
try {
return MetadataStoreBuilder.newBuilder()
.setConf(configuration)
.setDbFile(containerDBPath)
.setCacheSize(cacheSize * OzoneConsts.MB)
.build();
} catch (IOException ioEx) {
LOG.error("Unable to initialize Recon container metadata store.", ioEx);
}
return null;
}
}

View File

@ -0,0 +1,138 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.spi.impl;
import static org.apache.commons.compress.utils.CharsetNames.UTF_8;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
import org.apache.hadoop.utils.MetaStoreIterator;
import org.apache.hadoop.utils.MetadataStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.primitives.Longs;
/**
* Implementation of the Recon Container DB Service.
*/
@Singleton
public class ContainerDBServiceProviderImpl
implements ContainerDBServiceProvider {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerDBServiceProviderImpl.class);
private final static String KEY_DELIMITER = "_";
@Inject
private MetadataStore containerDBStore;
/**
* Concatenate the containerId and Key Prefix using a delimiter and store the
* count into the container DB store.
*
* @param containerKeyPrefix the containerId, key-prefix tuple.
* @param count Count of the keys matching that prefix.
* @throws IOException
*/
@Override
public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix,
Integer count)
throws IOException {
byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix
.getContainerId());
byte[] keyPrefixBytes = (KEY_DELIMITER + containerKeyPrefix.getKeyPrefix())
.getBytes(UTF_8);
byte[] dbKey = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes);
byte[] dbValue = ByteBuffer.allocate(Integer.BYTES).putInt(count).array();
containerDBStore.put(dbKey, dbValue);
}
/**
* Put together the key from the passed in object and get the count from
* the container DB store.
*
* @param containerKeyPrefix the containerId, key-prefix tuple.
* @return count of keys matching the containerId, key-prefix.
* @throws IOException
*/
@Override
public Integer getCountForForContainerKeyPrefix(
ContainerKeyPrefix containerKeyPrefix) throws IOException {
byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix
.getContainerId());
byte[] keyPrefixBytes = (KEY_DELIMITER + containerKeyPrefix
.getKeyPrefix()).getBytes(UTF_8);
byte[] dbKey = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes);
byte[] dbValue = containerDBStore.get(dbKey);
return ByteBuffer.wrap(dbValue).getInt();
}
/**
* Use the DB's prefix seek iterator to start the scan from the given
* container ID prefix.
*
* @param containerId the given containerId.
* @return Map of (Key-Prefix,Count of Keys).
*/
@Override
public Map<String, Integer> getKeyPrefixesForContainer(long containerId) {
Map<String, Integer> prefixes = new HashMap<>();
MetaStoreIterator<MetadataStore.KeyValue> containerIterator =
containerDBStore.iterator();
byte[] containerIdPrefixBytes = Longs.toByteArray(containerId);
containerIterator.prefixSeek(containerIdPrefixBytes);
while (containerIterator.hasNext()) {
MetadataStore.KeyValue keyValue = containerIterator.next();
byte[] containerKey = keyValue.getKey();
long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray(
containerKey, 0, Long.BYTES)).getLong();
//The prefix seek only guarantees that the iterator's head will be
// positioned at the first prefix match. We still have to check the key
// prefix.
if (containerIdFromDB == containerId) {
byte[] keyPrefix = ArrayUtils.subarray(containerKey,
containerIdPrefixBytes.length + 1,
containerKey.length);
try {
prefixes.put(new String(keyPrefix, UTF_8),
ByteBuffer.wrap(keyValue.getValue()).getInt());
} catch (UnsupportedEncodingException e) {
LOG.warn("Unable to read key prefix from container DB.", e);
}
} else {
break; //Break when the first mismatch occurs.
}
}
return prefixes;
}
}

View File

@ -0,0 +1,22 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The classes in this package define the Service Provider implementations for
* Recon. This provides connectivity to underlying Ozone subsystems.
*/
package org.apache.hadoop.ozone.recon.spi.impl;

View File

@ -0,0 +1,148 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.spi.impl;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
import org.apache.hadoop.utils.MetaStoreIterator;
import org.apache.hadoop.utils.MetadataStore;
import org.apache.hadoop.utils.MetadataStoreBuilder;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.runner.RunWith;
import org.mockito.runners.MockitoJUnitRunner;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
/**
* Unit Tests for ContainerDBServiceProviderImpl.
*/
@RunWith(MockitoJUnitRunner.class)
public class TestContainerDBServiceProviderImpl {
@Rule
public TemporaryFolder tempFolder = new TemporaryFolder();
private MetadataStore containerDBStore;
private ContainerDBServiceProvider containerDbServiceProvider
= new ContainerDBServiceProviderImpl();
private Injector injector;
@Before
public void setUp() throws IOException {
tempFolder.create();
File dbDir = tempFolder.getRoot();
containerDBStore = MetadataStoreBuilder.newBuilder()
.setConf(new OzoneConfiguration())
.setCreateIfMissing(true)
.setDbFile(dbDir)
.build();
injector = Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
bind(MetadataStore.class).toInstance(containerDBStore);
bind(ContainerDBServiceProvider.class)
.toInstance(containerDbServiceProvider);
}
});
}
@After
public void tearDown() throws Exception {
tempFolder.delete();
}
@Test
public void testStoreContainerKeyMapping() throws Exception {
long containerId = System.currentTimeMillis();
Map<String, Integer> prefixCounts = new HashMap<>();
prefixCounts.put("V1/B1/K1", 1);
prefixCounts.put("V1/B1/K2", 2);
prefixCounts.put("V1/B2/K3", 3);
for (String prefix : prefixCounts.keySet()) {
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(
containerId, prefix);
containerDbServiceProvider.storeContainerKeyMapping(
containerKeyPrefix, prefixCounts.get(prefix));
}
int count = 0;
MetaStoreIterator<MetadataStore.KeyValue> iterator =
containerDBStore.iterator();
while (iterator.hasNext()) {
iterator.next();
count++;
}
assertTrue(count == 3);
}
@Test
public void testGetCountForForContainerKeyPrefix() throws Exception {
long containerId = System.currentTimeMillis();
containerDbServiceProvider.storeContainerKeyMapping(new
ContainerKeyPrefix(containerId, "V1/B1/K1"), 2);
Integer count = containerDbServiceProvider.
getCountForForContainerKeyPrefix(new ContainerKeyPrefix(containerId,
"V1/B1/K1"));
assertTrue(count == 2);
}
@Test
public void testGetKeyPrefixesForContainer() throws Exception {
long containerId = System.currentTimeMillis();
containerDbServiceProvider.storeContainerKeyMapping(new
ContainerKeyPrefix(containerId, "V1/B1/K1"), 1);
containerDbServiceProvider.storeContainerKeyMapping(new
ContainerKeyPrefix(containerId, "V1/B1/K2"), 2);
long nextContainerId = System.currentTimeMillis();
containerDbServiceProvider.storeContainerKeyMapping(new
ContainerKeyPrefix(nextContainerId, "V1/B2/K1"), 3);
Map<String, Integer> keyPrefixMap = containerDbServiceProvider
.getKeyPrefixesForContainer(containerId);
assertTrue(keyPrefixMap.size() == 2);
assertTrue(keyPrefixMap.get("V1/B1/K1") == 1);
assertTrue(keyPrefixMap.get("V1/B1/K2") == 2);
keyPrefixMap = containerDbServiceProvider
.getKeyPrefixesForContainer(nextContainerId);
assertTrue(keyPrefixMap.size() == 1);
assertTrue(keyPrefixMap.get("V1/B2/K1") == 3);
}
}

View File

@ -0,0 +1,21 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package for recon server impl tests.
*/
package org.apache.hadoop.ozone.recon.spi.impl;

View File

@ -159,6 +159,11 @@
<artifactId>hadoop-hdds-tools</artifactId>
<version>${hdds.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-recon</artifactId>
<version>${ozone.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-container-service</artifactId>