HDDS-759. Create config settings for SCM and OM DB directories. Contributed by Arpit Agarwal.

This commit is contained in:
Arpit Agarwal 2018-10-31 11:23:15 -07:00
parent 478b2cba0d
commit 08bb0362e0
36 changed files with 342 additions and 82 deletions

View File

@ -97,4 +97,11 @@ private HddsConfigKeys() {
"hdds.lock.max.concurrency";
public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
// This configuration setting is used as a fallback location by all
// Ozone/HDDS services for their metadata. It is useful as a single
// config point for test/PoC clusters.
//
// In any real cluster where performance matters, the SCM, OM and DN
// metadata locations must be configured explicitly.
public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
}

View File

@ -305,7 +305,7 @@ public static boolean isHddsEnabled(Configuration conf) {
public static String getDatanodeIdFilePath(Configuration conf) {
String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
if (dataNodeIDPath == null) {
String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String metaPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
if (Strings.isNullOrEmpty(metaPath)) {
// this means meta data is not found, in theory should not happen at
// this point because should've failed earlier.

View File

@ -31,6 +31,11 @@
@InterfaceStability.Unstable
public final class ScmConfigKeys {
// Location of SCM DB files. For now we just support a single
// metadata dir but in future we may support multiple for redundancy or
// performance.
public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs";
public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
"scm.container.client.idle.threshold";
public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =

View File

@ -74,9 +74,6 @@ public final class OzoneConfigKeys {
"ozone.trace.enabled";
public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
public static final String OZONE_METADATA_DIRS =
"ozone.metadata.dirs";
public static final String OZONE_METADATA_STORE_IMPL =
"ozone.metastore.impl";
public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =

View File

@ -490,22 +490,31 @@
Default user permissions used in OM.
</description>
</property>
<property>
<name>ozone.om.db.dirs</name>
<value/>
<tag>OZONE, OM, STORAGE, PERFORMANCE</tag>
<description>
Directory where the OzoneManager stores its metadata. This should
be specified as a single directory. If the directory does not
exist then the OM will attempt to create it.
If undefined, then the OM will log a warning and fallback to
ozone.metadata.dirs.
</description>
</property>
<property>
<name>ozone.metadata.dirs</name>
<value/>
<tag>OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
<tag>OZONE, OM, SCM, CONTAINER, STORAGE</tag>
<description>
Ozone metadata is shared among OM, which acts as the namespace
manager for ozone, SCM which acts as the block manager and data nodes
which maintain the name of the key(Key Name and BlockIDs). This
replicated and distributed metadata store is maintained under the
directory pointed by this key. Since metadata can be I/O intensive, at
least on OM and SCM we recommend having SSDs. If you have the luxury
of mapping this path to SSDs on all machines in the cluster, that will
be excellent.
This setting is the fallback location for SCM, OM and DataNodes
to store their metadata. This setting may be used in test/PoC clusters
to simplify configuration.
If Ratis metadata directories are not specified, Ratis server will emit a
warning and use this path for storing its metadata too.
For production clusters or any time you care about performance, it is
recommended that ozone.om.db.dirs, ozone.scm.db.dirs and
dfs.container.ratis.datanode.storage.dir be configured separately.
</description>
</property>
<property>
@ -533,7 +542,19 @@
Check the rocksdb documentation for more details.
</description>
</property>
<property>
<name>ozone.scm.db.dirs</name>
<value/>
<tag>OZONE, SCM, STORAGE, PERFORMANCE</tag>
<description>
Directory where the StorageContainerManager stores its metadata.
This should be specified as a single directory. If the directory
does not exist then the SCM will attempt to create it.
If undefined, then the SCM will log a warning and fallback to
ozone.metadata.dirs.
</description>
</property>
<property>
<name>ozone.scm.block.client.address</name>
<value/>

View File

@ -18,9 +18,10 @@
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.slf4j.Logger;
@ -331,19 +332,16 @@ public static int getContainerPort(Configuration conf) {
}
public static String getOzoneDatanodeRatisDirectory(Configuration conf) {
final String ratisDir = File.separator + "ratis";
String storageDir = conf.get(
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
if (Strings.isNullOrEmpty(storageDir)) {
storageDir = conf.get(OzoneConfigKeys
.OZONE_METADATA_DIRS);
Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
"cannot be null, Please check your configs.");
storageDir = storageDir.concat(ratisDir);
LOG.warn("Storage directory for Ratis is not configured." +
"Mapping Ratis storage under {}. It is a good idea " +
"to map this to an SSD disk.", storageDir);
"to map this to an SSD disk. Falling back to {}",
storageDir, HddsConfigKeys.OZONE_METADATA_DIRS);
File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
storageDir = (new File (metaDirPath, "ratis")).getPath();
}
return storageDir;
}

View File

@ -18,6 +18,7 @@
import com.google.protobuf.BlockingService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto
@ -117,7 +118,7 @@ public static OzoneConfiguration getConf() {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils
.getRandomizedTempPath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
.getRandomizedTempPath());
return conf;
}

View File

@ -20,6 +20,7 @@
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ipc.RPC;
@ -113,7 +114,7 @@ public void setUp() throws Exception {
if (!dataDir.mkdirs()) {
LOG.info("Data dir create failed.");
}
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
new File(testRoot, "scm").getAbsolutePath());
path = Paths.get(path.toString(),
TestDatanodeStateMachine.class.getSimpleName() + ".id").toString();

View File

@ -20,10 +20,10 @@
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
@ -62,7 +62,7 @@ public void setUp() throws Exception {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
.getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
folder.newFolder().getAbsolutePath());
}

View File

@ -17,17 +17,18 @@
package org.apache.hadoop.hdds.server;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.http.client.methods.HttpRequestBase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.Collection;
/**
* Generic utilities for all HDDS/Ozone servers.
@ -116,6 +117,38 @@ public static void releaseConnection(HttpRequestBase request) {
}
}
/**
* Get the location where SCM should store its metadata directories.
* Fall back to OZONE_METADATA_DIRS if not defined.
*
* @param conf
* @return
*/
public static File getScmDbDir(Configuration conf) {
final Collection<String> metadirs = conf.getTrimmedStringCollection(
ScmConfigKeys.OZONE_SCM_DB_DIRS);
if (metadirs.size() > 1) {
throw new IllegalArgumentException(
"Bad config setting " + ScmConfigKeys.OZONE_SCM_DB_DIRS +
". SCM does not support multiple metadata dirs currently");
}
if (metadirs.size() == 1) {
final File dbDirPath = new File(metadirs.iterator().next());
if (!dbDirPath.exists() && !dbDirPath.mkdirs()) {
throw new IllegalArgumentException("Unable to create directory " +
dbDirPath + " specified in configuration setting " +
ScmConfigKeys.OZONE_SCM_DB_DIRS);
}
return dbDirPath;
}
LOG.warn("{} is not configured. We recommend adding this setting. " +
"Falling back to {} instead.",
ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS);
return getOzoneMetaDirPath(conf);
}
/**
* Checks and creates Ozone Metadir Path if it does not exist.
@ -125,9 +158,13 @@ public static void releaseConnection(HttpRequestBase request) {
* @return File MetaDir
*/
public static File getOzoneMetaDirPath(Configuration conf) {
String metaDirPath = conf.getTrimmed(OzoneConfigKeys
.OZONE_METADATA_DIRS);
Preconditions.checkNotNull(metaDirPath);
String metaDirPath = conf.getTrimmed(HddsConfigKeys.OZONE_METADATA_DIRS);
if (metaDirPath == null || metaDirPath.isEmpty()) {
throw new IllegalArgumentException(
HddsConfigKeys.OZONE_METADATA_DIRS + " must be defined.");
}
File dirPath = new File(metaDirPath);
if (!dirPath.exists() && !dirPath.mkdirs()) {
throw new IllegalArgumentException("Unable to create paths. Path: " +
@ -138,7 +175,7 @@ public static File getOzoneMetaDirPath(Configuration conf) {
public static void setOzoneMetaDirPath(OzoneConfiguration conf,
String path) {
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path);
}
}

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdfs.DFSUtil;
@ -69,7 +70,6 @@
.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.DELETED_BLOCK_DB;
/**
@ -105,10 +105,10 @@ public DeletedBlockLogImpl(Configuration conf,
maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
File metaDir = getOzoneMetaDirPath(conf);
String scmMetaDataDir = metaDir.getPath();
File deletedLogDbPath = new File(scmMetaDataDir, DELETED_BLOCK_DB);
int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
final File metaDir = ServerUtils.getScmDbDir(conf);
final String scmMetaDataDir = metaDir.getPath();
final File deletedLogDbPath = new File(scmMetaDataDir, DELETED_BLOCK_DB);
final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
// Load store of all transactions.
deletedStore = MetadataStoreBuilder.newBuilder()

View File

@ -41,6 +41,7 @@
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.lease.Lease;
@ -73,7 +74,6 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
.FAILED_TO_CHANGE_CONTAINER_STATE;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
/**
@ -111,7 +111,7 @@ public SCMContainerManager(final Configuration conf,
final NodeManager nodeManager, PipelineManager pipelineManager,
final EventPublisher eventPublisher) throws IOException {
final File metaDir = getOzoneMetaDirPath(conf);
final File metaDir = ServerUtils.getScmDbDir(conf);
final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB);
final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);

View File

@ -27,6 +27,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.utils.MetadataStore;
@ -46,7 +47,6 @@
.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm
.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
/**
@ -74,8 +74,8 @@ public SCMPipelineManager(Configuration conf, NodeManager nodeManager,
this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, conf);
int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
File metaDir = getOzoneMetaDirPath(conf);
File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
final File metaDir = ServerUtils.getScmDbDir(conf);
final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
this.pipelineStore =
MetadataStoreBuilder.newBuilder()
.setCreateIfMissing(true)

View File

@ -19,13 +19,13 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.ozone.common.Storage;
import java.io.IOException;
import java.util.Properties;
import java.util.UUID;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
@ -40,7 +40,7 @@ public class SCMStorage extends Storage {
* @throws IOException if any directories are inaccessible.
*/
public SCMStorage(OzoneConfiguration conf) throws IOException {
super(NodeType.SCM, getOzoneMetaDirPath(conf), STORAGE_DIR);
super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR);
}
public void setScmId(String scmId) throws IOException {

View File

@ -18,8 +18,12 @@
package org.apache.hadoop.hdds.scm;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
@ -27,6 +31,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.net.InetSocketAddress;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
@ -34,6 +39,7 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Unit tests for {@link HddsServerUtil}
@ -150,4 +156,48 @@ public void testClientFailsWithMultipleScmNames() {
thrown.expect(IllegalArgumentException.class);
HddsServerUtil.getScmAddressForDataNodes(conf);
}
/**
* Test {@link ServerUtils#getScmDbDir}.
*/
@Test
public void testGetScmDbDir() {
final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
final File dbDir = new File(testDir, "scmDbDir");
final File metaDir = new File(testDir, "metaDir"); // should be ignored.
final Configuration conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
try {
assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
assertTrue(dbDir.exists()); // should have been created.
} finally {
FileUtils.deleteQuietly(dbDir);
}
}
/**
* Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
* when OZONE_SCM_DB_DIRS is undefined.
*/
@Test
public void testGetScmDbDirWithFallback() {
final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
final File metaDir = new File(testDir, "metaDir");
final Configuration conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
try {
assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
assertTrue(metaDir.exists()); // should have been created.
} finally {
FileUtils.deleteQuietly(metaDir);
}
}
@Test
public void testNoScmDbDirConfigured() {
thrown.expect(IllegalArgumentException.class);
ServerUtils.getScmDbDir(new OzoneConfiguration());
}
}

View File

@ -20,6 +20,7 @@
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
@ -34,7 +35,6 @@
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
@ -79,7 +79,7 @@ public void setUp() throws Exception {
String path = GenericTestUtils
.getTempPath(TestBlockManager.class.getSimpleName());
testDir = Paths.get(path).toFile();
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path);
eventQueue = new EventQueue();
boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) {

View File

@ -19,6 +19,7 @@
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@ -60,7 +61,6 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.when;
@ -81,7 +81,7 @@ public void setup() throws Exception {
TestDeletedBlockLog.class.getSimpleName());
conf = new OzoneConfiguration();
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
containerManager = Mockito.mock(SCMContainerManager.class);
deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
dnList = new ArrayList<>(3);

View File

@ -21,6 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers
@ -28,7 +29,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
@ -65,7 +65,7 @@ public static void setUp() throws Exception {
testDir = GenericTestUtils
.getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
configuration
.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
nodeManager = new MockNodeManager(true, 10);
PipelineManager pipelineManager =
new SCMPipelineManager(configuration, nodeManager, eventQueue);

View File

@ -21,6 +21,7 @@
import java.util.HashSet;
import java.util.List;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@ -40,7 +41,6 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
@ -74,7 +74,7 @@ public void test() throws IOException {
this.getClass().getSimpleName());
//GIVEN
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
EventQueue eventQueue = new EventQueue();
PipelineManager pipelineManager =
new SCMPipelineManager(conf, nodeManager, eventQueue);

View File

@ -18,6 +18,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@ -34,7 +35,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
@ -77,7 +77,7 @@ public static void setUp() throws Exception {
testDir = GenericTestUtils
.getTestDir(TestSCMContainerManager.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,

View File

@ -21,6 +21,7 @@
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@ -36,7 +37,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.test.PathUtils;
import org.junit.Ignore;
@ -130,7 +130,7 @@ public void testContainerPlacementCapacity() throws IOException,
final File testDir = PathUtils.getTestDir(
TestContainerPlacement.class);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);

View File

@ -26,6 +26,7 @@
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@ -48,7 +49,6 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
@ -74,7 +74,7 @@ public void setup() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
storageDir = GenericTestUtils.getTempPath(
TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, storageDir);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
eventQueue = new EventQueue();
nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue);
PipelineManager pipelineManager =

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.node;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
@ -30,7 +31,6 @@
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@ -104,7 +104,7 @@ public void cleanup() {
*/
OzoneConfiguration getConf() {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
TimeUnit.MILLISECONDS);

View File

@ -75,6 +75,8 @@
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.mockito.Mockito.mock;
import java.io.File;
@ -82,7 +84,6 @@
import java.util.UUID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
.createEndpoint;
import static org.hamcrest.Matchers.lessThanOrEqualTo;

View File

@ -17,12 +17,20 @@
package org.apache.hadoop.ozone;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
@ -36,6 +44,7 @@
* communication.
*/
public final class OmUtils {
private static final Logger LOG = LoggerFactory.getLogger(OmUtils.class);
private OmUtils() {
}
@ -91,4 +100,37 @@ public static int getOmRestPort(Configuration conf) {
getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY);
return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
}
/**
* Get the location where OM should store its metadata directories.
* Fall back to OZONE_METADATA_DIRS if not defined.
*
* @param conf
* @return
*/
public static File getOmDbDir(Configuration conf) {
final Collection<String> dbDirs = conf.getTrimmedStringCollection(
OMConfigKeys.OZONE_OM_DB_DIRS);
if (dbDirs.size() > 1) {
throw new IllegalArgumentException(
"Bad configuration setting " + OMConfigKeys.OZONE_OM_DB_DIRS +
". OM does not support multiple metadata dirs currently.");
}
if (dbDirs.size() == 1) {
final File dbDirPath = new File(dbDirs.iterator().next());
if (!dbDirPath.exists() && !dbDirPath.mkdirs()) {
throw new IllegalArgumentException("Unable to create directory " +
dbDirPath + " specified in configuration setting " +
OMConfigKeys.OZONE_OM_DB_DIRS);
}
return dbDirPath;
}
LOG.warn("{} is not configured. We recommend adding this setting. " +
"Falling back to {} instead.",
OMConfigKeys.OZONE_OM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS);
return ServerUtils.getOzoneMetaDirPath(conf);
}
}

View File

@ -28,6 +28,9 @@ public final class OMConfigKeys {
private OMConfigKeys() {
}
// Location where the OM stores its DB files. In the future we may support
// multiple entries for performance (sharding)..
public static final String OZONE_OM_DB_DIRS = "ozone.om.db.dirs";
public static final String OZONE_OM_HANDLER_COUNT_KEY =
"ozone.om.handler.count.key";

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.Timeout;
import java.io.File;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Unit tests for {@link OmUtils}.
*/
public class TestOmUtils {
@Rule
public Timeout timeout = new Timeout(60_000);
@Rule
public ExpectedException thrown= ExpectedException.none();
/**
* Test {@link OmUtils#getOmDbDir}.
*/
@Test
public void testGetOmDbDir() {
final File testDir = PathUtils.getTestDir(TestOmUtils.class);
final File dbDir = new File(testDir, "omDbDir");
final File metaDir = new File(testDir, "metaDir"); // should be ignored.
final Configuration conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
try {
assertEquals(dbDir, OmUtils.getOmDbDir(conf));
assertTrue(dbDir.exists()); // should have been created.
} finally {
FileUtils.deleteQuietly(dbDir);
}
}
/**
* Test {@link OmUtils#getOmDbDir} with fallback to OZONE_METADATA_DIRS
* when OZONE_OM_DB_DIRS is undefined.
*/
@Test
public void testGetOmDbDirWithFallback() {
final File testDir = PathUtils.getTestDir(TestOmUtils.class);
final File metaDir = new File(testDir, "metaDir");
final Configuration conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
try {
assertEquals(metaDir, OmUtils.getOmDbDir(conf));
assertTrue(metaDir.exists()); // should have been created.
} finally {
FileUtils.deleteQuietly(metaDir);
}
}
@Test
public void testNoOmDbDirConfigured() {
thrown.expect(IllegalArgumentException.class);
OmUtils.getOmDbDir(new OzoneConfiguration());
}
}

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@ -28,7 +29,6 @@
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;
@ -54,7 +54,7 @@ public static void setUp() throws Exception {
conf = new OzoneConfiguration();
testDir = GenericTestUtils
.getTestDir(TestSCMPipelineManager.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) {
throw new IOException("Unable to create test directory path");

View File

@ -24,6 +24,7 @@
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -389,7 +390,7 @@ private void initializeConfiguration() throws IOException {
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, ozoneEnabled);
Path metaDir = Paths.get(path, "ozone-meta");
Files.createDirectories(metaDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
configureTrace();
}
@ -466,7 +467,7 @@ private List<HddsDatanodeService> createHddsDatanodes(
Files.createDirectories(metaDir);
Files.createDirectories(dataDir);
Files.createDirectories(ratisDir);
dnConf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.toString());
dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
ratisDir.toString());

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
@ -47,7 +48,6 @@
import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.junit.Assert.*;
/**
@ -65,8 +65,7 @@ public class TestMiniOzoneCluster {
@BeforeClass
public static void setup() {
conf = new OzoneConfiguration();
conf.set(OZONE_METADATA_DIRS,
TEST_ROOT.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, TEST_ROOT.toString());
conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
WRITE_TMP.mkdirs();
READ_TMP.mkdirs();
@ -156,7 +155,7 @@ public void testContainerRandomPort() throws IOException {
Configuration ozoneConf = SCMTestUtils.getConf();
File testDir = PathUtils.getTestDir(TestOzoneContainer.class);
ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
ozoneConf.set(OZONE_METADATA_DIRS,
ozoneConf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
TEST_ROOT.toString());
// Each instance of SM will create an ozone container

View File

@ -34,6 +34,7 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@ -388,7 +389,7 @@ public void testSCMInitialization() throws Exception {
final String path = GenericTestUtils.getTempPath(
UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
StartupOption.INIT.setClusterId("testClusterId");
// This will initialize SCM
@ -410,7 +411,7 @@ public void testSCMReinitialization() throws Exception {
final String path = GenericTestUtils.getTempPath(
UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
//This will set the cluster id in the version file
MiniOzoneCluster cluster =
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
@ -430,7 +431,7 @@ public void testSCMInitializationFailure() throws IOException {
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
exception.expect(SCMException.class);
exception.expectMessage("SCM not initialized.");
@ -455,7 +456,7 @@ public void testScmInfo() throws Exception {
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
SCMStorage scmStore = new SCMStorage(conf);
String clusterId = UUID.randomUUID().toString();

View File

@ -18,6 +18,7 @@
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdfs.DFSUtil;
@ -1323,7 +1324,7 @@ public void testOmInitializationFailure() throws Exception {
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
Path metaDirPath = Paths.get(path, "om-meta");
config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
config.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,

View File

@ -22,11 +22,12 @@
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
/**
* OMStorage is responsible for management of the StorageDirectories used by
@ -42,7 +43,7 @@ public class OMStorage extends Storage {
* @throws IOException if any directories are inaccessible.
*/
public OMStorage(OzoneConfiguration conf) throws IOException {
super(NodeType.OM, getOzoneMetaDirPath(conf), STORAGE_DIR);
super(NodeType.OM, OmUtils.getOmDbDir(conf), STORAGE_DIR);
}
public void setScmId(String scmId) throws IOException {

View File

@ -22,7 +22,9 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@ -54,7 +56,6 @@
import java.util.Map;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
@ -114,7 +115,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
private final Table s3Table;
public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
File metaDir = getOzoneMetaDirPath(conf);
File metaDir = OmUtils.getOmDbDir(conf);
this.lock = new OzoneManagerLock(conf);
this.openKeyExpireThresholdMS = 1000 * conf.getInt(
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,

View File

@ -16,6 +16,7 @@
*/
package org.apache.hadoop.ozone.om;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -171,7 +172,7 @@ public void testOmDB() throws Exception {
String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + OM_DB_NAME;
String[] args = {"-p", dbPath, "-o", dbOutPath};

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
@ -192,7 +193,7 @@ public void testConvertContainerDB() throws Exception {
UUID.randomUUID() + "/out_sql.db");
// TODO : the following will fail due to empty Datanode list, need to fix.
//String dnUUID = cluster.getDataNodes().get(0).getUuid();
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + SCM_CONTAINER_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
Connection conn;