From a1a3ba652995bc3559cfd6ba5070db241eb1cb07 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 26 Sep 2017 18:09:32 -0700 Subject: [PATCH] HDFS-12454. Ozone : the sample ozone-site.xml in OzoneGettingStarted does not work. Contributed by Chen Liang. --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 4 +- .../org/apache/hadoop/scm/ScmConfigKeys.java | 2 + .../states/datanode/InitDatanodeState.java | 3 +- .../states/datanode/RunningDatanodeState.java | 3 +- .../server/ratis/XceiverServerRatis.java | 4 +- .../container/ozoneimpl/OzoneContainer.java | 2 +- .../hadoop/ozone/web/utils/OzoneUtils.java | 27 +++++++++- .../src/main/resources/ozone-default.xml | 2 +- .../site/markdown/OzoneGettingStarted.md.vm | 51 ++++++++++--------- .../apache/hadoop/ozone/MiniOzoneCluster.java | 8 +-- .../hadoop/ozone/TestMiniOzoneCluster.java | 6 +-- .../common/TestDatanodeStateMachine.java | 2 +- .../ozone/container/common/TestEndPoint.java | 4 +- .../hadoop/ozone/ksm/TestKSMSQLCli.java | 2 +- .../hadoop/ozone/scm/TestContainerSQLCli.java | 8 +-- .../ozone/scm/block/TestBlockManager.java | 2 +- .../ozone/scm/block/TestDeletedBlockLog.java | 4 +- .../scm/container/TestContainerMapping.java | 2 +- .../scm/node/TestContainerPlacement.java | 2 +- .../ozone/scm/node/TestNodeManager.java | 2 +- .../scm/node/TestSCMNodePoolManager.java | 2 +- 21 files changed, 88 insertions(+), 54 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index b133f11a470..d51c533327d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -77,8 +77,8 @@ public final class OzoneConfigKeys { "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - public static final String OZONE_CONTAINER_METADATA_DIRS = - "ozone.container.metadata.dirs"; + public static final String OZONE_METADATA_DIRS = + "ozone.metadata.dirs"; public static final String OZONE_METADATA_STORE_IMPL = "ozone.metastore.impl"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java index 0a7b777db0f..83345176d70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java @@ -171,6 +171,8 @@ public final class ScmConfigKeys { // if this value is not set then container startup will fail. public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id"; + public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = "datanode.id"; + public static final String OZONE_SCM_DB_CACHE_SIZE_MB = "ozone.scm.db.cache.size.mb"; public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index 0552a2f5913..6ca94bfc3ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachin import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.states.DatanodeState; +import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.scm.ScmConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,7 +105,7 @@ public class InitDatanodeState implements DatanodeState, * and persist the ID to a local file. */ private void persistContainerDatanodeID() throws IOException { - String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID); + String dataNodeIDPath = OzoneUtils.getDatanodeIDPath(conf); if (Strings.isNullOrEmpty(dataNodeIDPath)) { LOG.error("A valid file path is needed for config setting {}", ScmConfigKeys.OZONE_SCM_DATANODE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 590df2df914..fc9af5b0933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpoin import org.apache.hadoop.ozone.container.common.states.endpoint.RegisterEndpointTask; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.util.Time; import org.slf4j.Logger; @@ -112,7 +113,7 @@ public class RunningDatanodeState implements DatanodeState { */ private StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto getContainerNodeID() { - String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID); + String dataNodeIDPath = OzoneUtils.getDatanodeIDPath(conf); if (dataNodeIDPath == null || dataNodeIDPath.isEmpty()) { LOG.error("A valid file path is needed for config setting {}", ScmConfigKeys.OZONE_SCM_DATANODE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 5bd12115678..8803058615b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -97,8 +97,8 @@ public final class XceiverServerRatis implements XceiverServerSpi { if (Strings.isNullOrEmpty(storageDir)) { storageDir = ozoneConf.get(OzoneConfigKeys - .OZONE_CONTAINER_METADATA_DIRS); - Preconditions.checkNotNull(storageDir, "ozone.container.metadata.dirs " + + .OZONE_METADATA_DIRS); + Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " + "cannot be null, Please check your configs."); storageDir = storageDir.concat(ratisDir); LOG.warn("Storage directory for Ratis is not configured. Mapping Ratis " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index a4904b2eddb..717a3bf65c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -85,7 +85,7 @@ public class OzoneContainer { this.ozoneConfig = ozoneConfig; List locations = new LinkedList<>(); String[] paths = ozoneConfig.getStrings( - OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + OzoneConfigKeys.OZONE_METADATA_DIRS); if (paths != null && paths.length > 0) { for (String p : paths) { locations.add(StorageLocation.parse( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index 2fe64d50b64..64596264ef8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.web.utils; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -28,6 +29,7 @@ import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.client.rest.headers.Header; +import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.util.Time; import javax.ws.rs.core.HttpHeaders; @@ -38,6 +40,7 @@ import java.io.File; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.charset.Charset; +import java.nio.file.Paths; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; @@ -316,7 +319,7 @@ public final class OzoneUtils { */ public static File getScmMetadirPath(Configuration conf) { String metaDirPath = conf.getTrimmed(OzoneConfigKeys - .OZONE_CONTAINER_METADATA_DIRS); + .OZONE_METADATA_DIRS); Preconditions.checkNotNull(metaDirPath); File dirPath = new File(metaDirPath); if (!dirPath.exists() && !dirPath.mkdirs()) { @@ -326,6 +329,28 @@ public final class OzoneUtils { return dirPath; } + /** + * Get the path for datanode id file. + * + * @param conf - Configuration + * @return the path of datanode id as string + */ + public static String getDatanodeIDPath(Configuration conf) { + String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID); + if (Strings.isNullOrEmpty(dataNodeIDPath)) { + String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); + if (Strings.isNullOrEmpty(metaPath)) { + // this means meta data is not found, in theory should not happen at + // this point because should've failed earlier. + throw new IllegalArgumentException("Unable to locate meta data" + + "directory when getting datanode id path"); + } + dataNodeIDPath = Paths.get(metaPath, + ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString(); + } + return dataNodeIDPath; + } + /** * Convert time in millisecond to a human readable format required in ozone. * @return a human readable string for the input time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml index f98285a1ea6..0997f2a5e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml @@ -59,7 +59,7 @@ - ozone.container.metadata.dirs + ozone.metadata.dirs Ozone metadata dir path. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm index 456dfed71a8..b9abd8484e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm @@ -63,17 +63,17 @@ place and not mingled with HDFS settings. True ``` - * _*ozone.container.metadata.dirs*_ Ozone is designed with modern hardware + * _*ozone.metadata.dirs*_ Ozone is designed with modern hardware in mind. It tries to use SSDs effectively. So users can specify where the - datanode metadata must reside. Usually you pick your fastest disk (SSD if - you have them on your datanodes). Datanodes will write the container metadata - to these disks. This is a required setting, if this is missing datanodes will + metadata must reside. Usually you pick your fastest disk (SSD if + you have them on your nodes). KSM, SCM and datanode will write the metadata + to these disks. This is a required setting, if this is missing Ozone will fail to come up. Here is an example, ``` - ozone.container.metadata.dirs - /data/disk1/container/meta + ozone.metadata.dirs + /data/disk1/meta ``` @@ -135,10 +135,11 @@ Here is a quick summary of settings needed by Ozone. | Setting | Value | Comment | |--------------------------------|------------------------------|------------------------------------------------------------------| | ozone.enabled | True | This enables SCM and containers in HDFS cluster. | -| ozone.container.metadata.dirs | file path | The container metadata will be stored here in the datanode. | +| ozone.metadata.dirs | file path | The metadata will be stored here. | | ozone.scm.names | SCM server name | Hostname:port or or IP:port address of SCM. | -| ozone.scm.datanode.id | file path | Data node ID is the location of datanode's ID file | -| ozone.scm.block.client.address | SCM server name | Used by services like KSM | +| ozone.scm.block.client.address | SCM server name and port | Used by services like KSM | +| ozone.scm.client.address | SCM server name and port | Used by client side | +| ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | | ozone.ksm.address | KSM server name | Used by Ozone handler and Ozone file system. | Here is a working example of`ozone-site.xml`. @@ -153,30 +154,34 @@ Here is a quick summary of settings needed by Ozone. - ozone.container.metadata.dirs - /data/disk1/scm/meta + ozone.metadata.dirs + /data/disk1/ozone/meta - ozone.scm.names - scm.hadoop.apache.org + 127.0.0.1 - ozone.scm.datanode.id - /data/disk1/scm/meta/node/datanode.id - - - - ozone.scm.block.client.address - scm.hadoop.apache.org + ozone.scm.client.address + 127.0.0.1:9860 - ozone.ksm.address - ksm.hadoop.apache.org - + ozone.scm.block.client.address + 127.0.0.1:9863 + + + + ozone.scm.datanode.address + 127.0.0.1:9861 + + + + ozone.ksm.address + 127.0.0.1:9874 + ``` diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 3e90a343f16..d43fa52d61e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -113,9 +113,9 @@ public final class MiniOzoneCluster extends MiniDFSCluster setConf(i, dnConf, OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, getInstanceStorageDir(i, -1).getCanonicalPath()); String containerMetaDirs = dnConf.get( - OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS) + "-dn-" + i; + OzoneConfigKeys.OZONE_METADATA_DIRS) + "-dn-" + i; Path containerMetaDirPath = Paths.get(containerMetaDirs); - setConf(i, dnConf, OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + setConf(i, dnConf, OzoneConfigKeys.OZONE_METADATA_DIRS, containerMetaDirs); Path containerRootPath = containerMetaDirPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX); @@ -476,7 +476,7 @@ public final class MiniOzoneCluster extends MiniDFSCluster if (scmMetadataDir.isPresent()) { // if user specifies a path in the test, it is assumed that user takes // care of creating and cleaning up that directory after the tests. - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmMetadataDir.get()); return; } @@ -487,7 +487,7 @@ public final class MiniOzoneCluster extends MiniDFSCluster Files.createDirectories(scmPath); Path containerPath = scmPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX); Files.createDirectories(containerPath); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, scmPath + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath .toString()); // TODO : Fix this, we need a more generic mechanism to map diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index b0fb8fb3f2c..39db79857c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -44,7 +44,7 @@ import java.util.List; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.junit.Assert.*; /** @@ -62,7 +62,7 @@ public class TestMiniOzoneCluster { @BeforeClass public static void setup() { conf = new OzoneConfiguration(); - conf.set(OZONE_CONTAINER_METADATA_DIRS, + conf.set(OZONE_METADATA_DIRS, TEST_ROOT.toString()); conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); WRITE_TMP.mkdirs(); @@ -183,7 +183,7 @@ public class TestMiniOzoneCluster { Configuration ozoneConf = SCMTestUtils.getConf(); File testDir = PathUtils.getTestDir(TestOzoneContainer.class); ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - ozoneConf.set(OZONE_CONTAINER_METADATA_DIRS, + ozoneConf.set(OZONE_METADATA_DIRS, TEST_ROOT.toString()); // Each instance of SM will create an ozone container diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 10280a2a8db..d976aad4bb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -102,7 +102,7 @@ public class TestDatanodeStateMachine { } conf.set(DFS_DATANODE_DATA_DIR_KEY, new File(testRoot, "data").getAbsolutePath()); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, new File(testRoot, "scm").getAbsolutePath()); path = Paths.get(path.toString(), TestDatanodeStateMachine.class.getSimpleName() + ".id").toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 65cf654b036..3310801730c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -64,7 +64,7 @@ import java.util.UUID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_CONTAINER_METADATA_DIRS; + .OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.container.common.SCMTestUtils .getDatanodeID; import static org.apache.hadoop.ozone.protocol.proto @@ -298,7 +298,7 @@ public class TestEndPoint { int rpcTimeout) throws Exception { Configuration conf = SCMTestUtils.getConf(); conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - conf.set(OZONE_CONTAINER_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java index 4ca981f5016..07fbcbab8a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java @@ -159,7 +159,7 @@ public class TestKSMSQLCli { public void testKSMDB() throws Exception { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + KSM_DB_NAME; String[] args = {"-p", dbPath, "-o", dbOutPath}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 6ffbb4ba1b6..1101f39da64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -171,7 +171,7 @@ public class TestContainerSQLCli { @Test public void testConvertBlockDB() throws Exception { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + BLOCK_DB; String[] args = {"-p", dbPath, "-o", dbOutPath}; @@ -193,7 +193,7 @@ public class TestContainerSQLCli { @Test public void testConvertNodepoolDB() throws Exception { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + NODEPOOL_DB; String[] args = {"-p", dbPath, "-o", dbOutPath}; @@ -220,7 +220,7 @@ public class TestContainerSQLCli { @Test public void testConvertOpenContainerDB() throws Exception { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + OPEN_CONTAINERS_DB; String[] args = {"-p", dbPath, "-o", dbOutPath}; @@ -254,7 +254,7 @@ public class TestContainerSQLCli { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; // TODO : the following will fail due to empty Datanode list, need to fix. //String dnUUID = cluster.getDataNodes().get(0).getDatanodeUuid(); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + CONTAINER_DB; String[] args = {"-p", dbPath, "-o", dbOutPath}; Connection conn; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java index 483b0c125fa..fa42f2180ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java @@ -63,7 +63,7 @@ public class TestBlockManager { String path = GenericTestUtils .getTempPath(TestBlockManager.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, path); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path); testDir = Paths.get(path).toFile(); boolean folderExisted = testDir.exists() || testDir.mkdirs(); if (!folderExisted) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java index c1c87ab02c1..aef4f39e9e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java @@ -40,7 +40,7 @@ import java.util.Random; import java.util.UUID; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; /** @@ -58,7 +58,7 @@ public class TestDeletedBlockLog { TestDeletedBlockLog.class.getSimpleName()); conf = new OzoneConfiguration(); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - conf.set(OZONE_CONTAINER_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); deletedBlockLog = new DeletedBlockLogImpl(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java index 79e6af6e28e..df13dec2c30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java @@ -55,7 +55,7 @@ public class TestContainerMapping { testDir = GenericTestUtils .getTestDir(TestContainerMapping.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); boolean folderExisted = testDir.exists() || testDir.mkdirs(); if (!folderExisted) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java index 430d34b6025..7a433f924a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java @@ -115,7 +115,7 @@ public class TestContainerPlacement { final File testDir = PathUtils.getTestDir( TestContainerPlacement.class); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java index 30bc5a80657..c29616932be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java @@ -102,7 +102,7 @@ public class TestNodeManager { */ OzoneConfiguration getConf() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); return conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java index 2fd2c8303bd..0debb1271fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java @@ -57,7 +57,7 @@ public class TestSCMNodePoolManager { SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf) throws IOException { - conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);