HDFS-13424. Ozone: Refactor MiniOzoneClassicCluster. Contributed by Nanda Kumar.

This commit is contained in:
Mukul Kumar Singh 2018-04-16 20:18:27 +05:30 committed by Owen O'Malley
parent 025058f251
commit ae8ac7f082
53 changed files with 1140 additions and 1145 deletions

View File

@ -51,18 +51,42 @@ public class HddsDatanodeService implements ServicePlugin {
HddsDatanodeService.class); HddsDatanodeService.class);
private Configuration conf; private OzoneConfiguration conf;
private DatanodeDetails datanodeDetails; private DatanodeDetails datanodeDetails;
private DatanodeStateMachine datanodeStateMachine; private DatanodeStateMachine datanodeStateMachine;
private List<ServicePlugin> plugins; private List<ServicePlugin> plugins;
/**
* Default constructor.
*/
public HddsDatanodeService() {
this(null);
}
/**
* Constructs {@link HddsDatanodeService} using the provided {@code conf}
* value.
*
* @param conf OzoneConfiguration
*/
public HddsDatanodeService(Configuration conf) {
if (conf == null) {
this.conf = new OzoneConfiguration();
} else {
this.conf = new OzoneConfiguration(conf);
}
}
/**
* Starts HddsDatanode services.
*
* @param service The service instance invoking this method
*/
@Override @Override
public void start(Object service) { public void start(Object service) {
OzoneConfiguration.activate(); OzoneConfiguration.activate();
if (service instanceof Configurable) { if (service instanceof Configurable) {
conf = new OzoneConfiguration(((Configurable) service).getConf()); conf = new OzoneConfiguration(((Configurable) service).getConf());
} else {
conf = new OzoneConfiguration();
} }
if (HddsUtils.isHddsEnabled(conf)) { if (HddsUtils.isHddsEnabled(conf)) {
try { try {
@ -109,6 +133,11 @@ private DatanodeDetails initializeDatanodeDetails()
return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build(); return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build();
} }
} }
/**
* Starts all the service plugins which are configured using
* OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY.
*/
private void startPlugins() { private void startPlugins() {
try { try {
plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY, plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY,
@ -130,7 +159,12 @@ private void startPlugins() {
} }
} }
public Configuration getConf() { /**
* Returns the OzoneConfiguration used by this HddsDatanodeService.
*
* @return OzoneConfiguration
*/
public OzoneConfiguration getConf() {
return conf; return conf;
} }
/** /**
@ -149,8 +183,13 @@ public DatanodeStateMachine getDatanodeStateMachine() {
return datanodeStateMachine; return datanodeStateMachine;
} }
public void join() throws InterruptedException { public void join() {
datanodeStateMachine.join(); try {
datanodeStateMachine.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted during StorageContainerManager join.");
}
} }
@Override @Override
@ -172,20 +211,31 @@ public void stop() {
@Override @Override
public void close() throws IOException { public void close() throws IOException {
if (plugins != null) {
for (ServicePlugin plugin : plugins) {
try {
plugin.close();
} catch (Throwable t) {
LOG.warn("ServicePlugin {} could not be closed", plugin, t);
}
}
}
} }
public static HddsDatanodeService createHddsDatanodeService(String args[]) { public static HddsDatanodeService createHddsDatanodeService(
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG); Configuration conf) {
return new HddsDatanodeService(); return new HddsDatanodeService(conf);
} }
public static void main(String args[]) { public static void main(String args[]) {
try { try {
HddsDatanodeService hddsDatanodeService = createHddsDatanodeService(args); StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
HddsDatanodeService hddsDatanodeService =
createHddsDatanodeService(new OzoneConfiguration());
hddsDatanodeService.start(null); hddsDatanodeService.start(null);
hddsDatanodeService.join(); hddsDatanodeService.join();
} catch (Throwable e) { } catch (Throwable e) {
LOG.error("Exception in while starting HddsDatanodeService.", e); LOG.error("Exception in HddsDatanodeService.", e);
terminate(1, e); terminate(1, e);
} }
} }

View File

@ -61,7 +61,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>test</scope> <scope>test</scope>
<type>test-jar</type> <type>test-jar</type>
</dependency> </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>org.openjdk.jmh</groupId> <groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-core</artifactId> <artifactId>jmh-core</artifactId>

View File

@ -18,15 +18,10 @@
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.junit.After; import org.junit.After;
@ -54,10 +49,10 @@ public class TestContainerStateManager {
@Before @Before
public void setup() throws IOException { public void setup() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
xceiverClientManager = new XceiverClientManager(conf); xceiverClientManager = new XceiverClientManager(conf);
scm = cluster.getStorageContainerManager(); scm = cluster.getStorageContainerManager();
scmContainerMapping = scm.getScmContainerManager(); scmContainerMapping = scm.getScmContainerManager();
@ -68,7 +63,6 @@ public void setup() throws IOException {
public void cleanUp() { public void cleanUp() {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
cluster.close();
} }
} }

View File

@ -1,616 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import java.io.File;
import java.util.Optional;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.container.common
.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.ozone.ksm.KeySpaceManager;
import org.apache.hadoop.hdds.scm.SCMStorage;
import org.apache.hadoop.ozone.ksm.KSMStorage;
import org.apache.hadoop.ozone.web.client.OzoneRestClient;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.MiniOzoneTestHelper.*;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.DFS_CONTAINER_IPC_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.DFS_CONTAINER_IPC_RANDOM_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.DFS_CONTAINER_RATIS_IPC_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
import static org.junit.Assert.assertFalse;
/**
* MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
* running tests. The cluster consists of a StorageContainerManager, Namenode
* and multiple DataNodes. This class subclasses {@link MiniDFSCluster} for
* convenient reuse of logic for starting DataNodes.
*/
@InterfaceAudience.Private
public final class MiniOzoneClassicCluster extends MiniDFSCluster
implements MiniOzoneCluster {
private static final Logger LOG =
LoggerFactory.getLogger(MiniOzoneClassicCluster.class);
private static final String USER_AUTH = "hdfs";
private final OzoneConfiguration conf;
private final StorageContainerManager scm;
private KeySpaceManager ksm;
private final Path tempPath;
/**
* Creates a new MiniOzoneCluster.
*
* @param builder cluster builder
* @param scm StorageContainerManager, already running
* @throws IOException if there is an I/O error
*/
private MiniOzoneClassicCluster(Builder builder, StorageContainerManager scm,
KeySpaceManager ksm)
throws IOException {
super(builder);
this.conf = builder.conf;
this.scm = scm;
this.ksm = ksm;
tempPath = Paths.get(builder.getPath(), builder.getRunID());
}
@Override
protected void setupDatanodeAddress(
int i, Configuration dnConf, boolean setupHostsFile,
boolean checkDnAddrConf) throws IOException {
super.setupDatanodeAddress(i, dnConf, setupHostsFile, checkDnAddrConf);
String path = GenericTestUtils.getTempPath(
MiniOzoneClassicCluster.class.getSimpleName() + "datanode");
dnConf.setStrings(ScmConfigKeys.OZONE_SCM_DATANODE_ID,
path + "/" + i + "-datanode.id");
setConf(i, dnConf, OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
getInstanceStorageDir(i, -1).getCanonicalPath());
String containerMetaDirs = dnConf.get(
OzoneConfigKeys.OZONE_METADATA_DIRS) + "-dn-" + i;
Path containerMetaDirPath = Paths.get(containerMetaDirs);
setConf(i, dnConf, OzoneConfigKeys.OZONE_METADATA_DIRS,
containerMetaDirs);
Path containerRootPath =
containerMetaDirPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
Files.createDirectories(containerRootPath);
}
static void setConf(int i, Configuration conf, String key, String value) {
conf.set(key, value);
LOG.info("dn{}: set {} = {}", i, key, value);
}
@Override
public void close() {
shutdown();
try {
FileUtils.deleteDirectory(tempPath.toFile());
} catch (IOException e) {
String errorMessage = "Cleaning up metadata directories failed." + e;
assertFalse(errorMessage, true);
}
try {
final String localStorage =
conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
FileUtils.deleteDirectory(new File(localStorage));
} catch (IOException e) {
LOG.error("Cleaning up local storage failed", e);
}
}
@Override
public boolean restartDataNode(int i) throws IOException {
return restartDataNode(i, true);
}
/*
* Restart a particular datanode, wait for it to become active
*/
@Override
public boolean restartDataNode(int i, boolean keepPort) throws IOException {
LOG.info("restarting datanode:{} keepPort:{}", i, keepPort);
if (keepPort) {
DataNodeProperties dnProp = dataNodes.get(i);
OzoneContainer container = getOzoneContainer(dnProp
.getDatanode());
Configuration config = dnProp.getConf();
int currentPort = container.getContainerServerPort();
config.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
int ratisPort = container.getRatisContainerServerPort();
config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
}
boolean status = super.restartDataNode(i, keepPort);
try {
this.waitActive();
this.waitFirstBRCompleted(0, 3000);
waitDatanodeOzoneReady(i);
} catch (TimeoutException | InterruptedException e) {
Thread.interrupted();
}
return status;
}
@Override
public void shutdown() {
super.shutdown();
LOG.info("Shutting down the Mini Ozone Cluster");
if (ksm != null) {
LOG.info("Shutting down the keySpaceManager");
ksm.stop();
ksm.join();
}
if (scm != null) {
LOG.info("Shutting down the StorageContainerManager");
scm.stop();
scm.join();
}
}
@Override
public StorageContainerManager getStorageContainerManager() {
return this.scm;
}
public OzoneConfiguration getConf() {
return conf;
}
@Override
public KeySpaceManager getKeySpaceManager() {
return this.ksm;
}
/**
* Creates an {@link OzoneRestClient} connected to this cluster's REST
* service. Callers take ownership of the client and must close it when done.
*
* @return OzoneRestClient connected to this cluster's REST service
* @throws OzoneException if Ozone encounters an error creating the client
*/
@Override
public OzoneRestClient createOzoneRestClient() throws OzoneException {
Preconditions.checkState(!getDataNodes().isEmpty(),
"Cannot create OzoneRestClient if the cluster has no DataNodes.");
// An Ozone request may originate at any DataNode, so pick one at random.
int dnIndex = new Random().nextInt(getDataNodes().size());
String uri = String.format("http://127.0.0.1:%d",
MiniOzoneTestHelper.getOzoneRestPort(getDataNodes().get(dnIndex)));
LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}",
dnIndex, uri, USER_AUTH);
try {
return new OzoneRestClient(uri, USER_AUTH);
} catch (URISyntaxException e) {
// We control the REST service URI, so it should never be invalid.
throw new IllegalStateException("Unexpected URISyntaxException", e);
}
}
/**
* Creates an RPC proxy connected to this cluster's StorageContainerManager
* for accessing container location information. Callers take ownership of
* the proxy and must close it when done.
*
* @return RPC proxy for accessing container location information
* @throws IOException if there is an I/O error
*/
@Override
public StorageContainerLocationProtocolClientSideTranslatorPB
createStorageContainerLocationClient() throws IOException {
long version = RPC.getProtocolVersion(
StorageContainerLocationProtocolPB.class);
InetSocketAddress address = scm.getClientRpcAddress();
LOG.info(
"Creating StorageContainerLocationProtocol RPC client with address {}",
address);
return new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
}
/**
* Waits for the Ozone cluster to be ready for processing requests.
*/
@Override
public void waitOzoneReady() throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(() -> {
final int healthy = scm.getNodeCount(HEALTHY);
final boolean isReady = healthy >= numDataNodes;
LOG.info("{}. Got {} of {} DN Heartbeats.",
isReady? "Cluster is ready" : "Waiting for cluster to be ready",
healthy, numDataNodes);
return isReady;
}, 1000, 60 * 1000); //wait for 1 min.
}
/**
* Waits for a particular Datanode to be ready for processing ozone requests.
*/
@Override
public void waitDatanodeOzoneReady(int dnIndex)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(() -> {
DatanodeStateMachine.DatanodeStates state =
MiniOzoneTestHelper.getStateMachine(dataNodes.get(dnIndex)
.getDatanode()).getContext().getState();
final boolean rebootComplete =
(state == DatanodeStateMachine.DatanodeStates.RUNNING);
LOG.info("{} Current state:{}", rebootComplete, state);
return rebootComplete;
}, 1000, 60 * 1000); //wait for 1 min.
}
/**
* Waits for SCM to be out of Chill Mode. Many tests can be run iff we are out
* of Chill mode.
*
* @throws TimeoutException
* @throws InterruptedException
*/
@Override
public void waitTobeOutOfChillMode() throws TimeoutException,
InterruptedException {
GenericTestUtils.waitFor(() -> {
if (scm.getScmNodeManager().isOutOfChillMode()) {
return true;
}
LOG.info("Waiting for cluster to be ready. No datanodes found");
return false;
}, 100, 45000);
}
@Override
public void waitForHeartbeatProcessed() throws TimeoutException,
InterruptedException {
GenericTestUtils.waitFor(() ->
scm.getScmNodeManager().waitForHeartbeatProcessed(), 100,
4 * 1000);
GenericTestUtils.waitFor(() ->
scm.getScmNodeManager().getStats().getCapacity().get() > 0, 100,
4 * 1000);
}
/**
* Builder for configuring the MiniOzoneCluster to run.
*/
public static class Builder
extends MiniDFSCluster.Builder {
private final OzoneConfiguration conf;
private static final int DEFAULT_HB_SECONDS = 1;
private static final int DEFAULT_PROCESSOR_MS = 100;
private final String path;
private final UUID runID;
private Optional<String> ozoneHandlerType = java.util.Optional.empty();
private Optional<Boolean> enableTrace = Optional.of(false);
private Optional<Integer> hbSeconds = Optional.empty();
private Optional<Integer> hbProcessorInterval = Optional.empty();
private Optional<String> scmMetadataDir = Optional.empty();
private Optional<String> clusterId = Optional.empty();
private Optional<String> scmId = Optional.empty();
private Optional<String> ksmId = Optional.empty();
private Boolean ozoneEnabled = true;
private Boolean waitForChillModeFinish = true;
private Boolean randomContainerPort = true;
// Use relative smaller number of handlers for testing
private int numOfKsmHandlers = 20;
private int numOfScmHandlers = 20;
/**
* Creates a new Builder.
*
* @param conf configuration
*/
public Builder(OzoneConfiguration conf) {
super(conf);
// Mini Ozone cluster will not come up if the port is not true, since
// Ratis will exit if the server port cannot be bound. We can remove this
// hard coding once we fix the Ratis default behaviour.
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
this.conf = conf;
path = GenericTestUtils.getTempPath(
MiniOzoneClassicCluster.class.getSimpleName() +
UUID.randomUUID().toString());
runID = UUID.randomUUID();
}
public Builder setRandomContainerPort(boolean randomPort) {
this.randomContainerPort = randomPort;
return this;
}
@Override
public Builder numDataNodes(int val) {
super.numDataNodes(val);
return this;
}
@Override
public Builder storageCapacities(long[] capacities) {
super.storageCapacities(capacities);
return this;
}
public Builder setHandlerType(String handler) {
ozoneHandlerType = Optional.of(handler);
return this;
}
public Builder setTrace(Boolean trace) {
enableTrace = Optional.of(trace);
return this;
}
public Builder setSCMHBInterval(int seconds) {
hbSeconds = Optional.of(seconds);
return this;
}
public Builder setSCMHeartbeatProcessingInterval(int milliseconds) {
hbProcessorInterval = Optional.of(milliseconds);
return this;
}
public Builder setSCMMetadataDir(String scmMetadataDirPath) {
scmMetadataDir = Optional.of(scmMetadataDirPath);
return this;
}
public Builder disableOzone() {
ozoneEnabled = false;
return this;
}
public Builder doNotwaitTobeOutofChillMode() {
waitForChillModeFinish = false;
return this;
}
public Builder setNumOfKSMHandlers(int numOfHandlers) {
numOfKsmHandlers = numOfHandlers;
return this;
}
public Builder setNumOfSCMHandlers(int numOfHandlers) {
numOfScmHandlers = numOfHandlers;
return this;
}
public Builder setClusterId(String cId) {
clusterId = Optional.of(cId);
return this;
}
public Builder setScmId(String sId) {
scmId = Optional.of(sId);
return this;
}
public Builder setKsmId(String kId) {
ksmId = Optional.of(kId);
return this;
}
public String getPath() {
return path;
}
public String getRunID() {
return runID.toString();
}
@Override
public MiniOzoneClassicCluster build() throws IOException {
configureHandler();
configureTrace();
configureSCMheartbeat();
configScmMetadata();
initializeScm();
initializeKSM();
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0");
conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.HddsDatanodeService");
conf.set(HDDS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
// Configure KSM and SCM handlers
conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
conf.setInt(KSMConfigKeys.OZONE_KSM_HANDLER_COUNT_KEY, numOfKsmHandlers);
// Use random ports for ozone containers in mini cluster,
// in order to launch multiple container servers per node.
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
randomContainerPort);
StorageContainerManager scm = StorageContainerManager.createSCM(
null, conf);
scm.start();
KeySpaceManager ksm = KeySpaceManager.createKSM(null, conf);
ksm.start();
String addressString = scm.getDatanodeRpcAddress().getHostString() +
":" + scm.getDatanodeRpcAddress().getPort();
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, addressString);
MiniOzoneClassicCluster cluster =
new MiniOzoneClassicCluster(this, scm, ksm);
try {
cluster.waitOzoneReady();
if (waitForChillModeFinish) {
cluster.waitTobeOutOfChillMode();
}
cluster.waitForHeartbeatProcessed();
} catch (Exception e) {
// A workaround to propagate MiniOzoneCluster failures without
// changing the method signature (which would require cascading
// changes to hundreds of unrelated HDFS tests).
throw new IOException("Failed to start MiniOzoneCluster", e);
}
return cluster;
}
private void configScmMetadata() throws IOException {
if (scmMetadataDir.isPresent()) {
// if user specifies a path in the test, it is assumed that user takes
// care of creating and cleaning up that directory after the tests.
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
scmMetadataDir.get());
return;
}
// If user has not specified a path, create a UUID for this miniCluster
// and create SCM under that directory.
Path scmPath = Paths.get(path, runID.toString(), "cont-meta");
Files.createDirectories(scmPath);
Path containerPath = scmPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
Files.createDirectories(containerPath);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath
.toString());
}
private void initializeScm() throws IOException {
SCMStorage scmStore = new SCMStorage(conf);
if (!clusterId.isPresent()) {
clusterId = Optional.of(runID.toString());
}
scmStore.setClusterId(clusterId.get());
if (!scmId.isPresent()) {
scmId = Optional.of(UUID.randomUUID().toString());
}
scmStore.setScmId(scmId.get());
scmStore.initialize();
}
private void initializeKSM() throws IOException {
KSMStorage ksmStore = new KSMStorage(conf);
ksmStore.setClusterId(clusterId.get());
ksmStore.setScmId(scmId.get());
ksmStore.setKsmId(ksmId.orElse(UUID.randomUUID().toString()));
ksmStore.initialize();
}
private void configureHandler() {
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, this.ozoneEnabled);
if (!ozoneHandlerType.isPresent()) {
throw new IllegalArgumentException(
"The Ozone handler type must be specified.");
} else {
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
ozoneHandlerType.get());
}
}
private void configureTrace() {
if (enableTrace.isPresent()) {
conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY,
enableTrace.get());
GenericTestUtils.setRootLogLevel(Level.TRACE);
}
GenericTestUtils.setRootLogLevel(Level.INFO);
}
private void configureSCMheartbeat() {
if (hbSeconds.isPresent()) {
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
hbSeconds.get(), TimeUnit.SECONDS);
} else {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
DEFAULT_HB_SECONDS,
TimeUnit.SECONDS);
}
if (hbProcessorInterval.isPresent()) {
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
hbProcessorInterval.get(),
TimeUnit.MILLISECONDS);
} else {
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
DEFAULT_PROCESSOR_MS,
TimeUnit.MILLISECONDS);
}
}
}
}

View File

@ -17,46 +17,303 @@
*/ */
package org.apache.hadoop.ozone; package org.apache.hadoop.ozone;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.ksm.KeySpaceManager; import org.apache.hadoop.ozone.ksm.KeySpaceManager;
import org.apache.hadoop.ozone.web.client.OzoneRestClient;
import org.apache.hadoop.hdds.scm.protocolPB import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB; .StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.test.GenericTestUtils;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
/** /**
* Interface used for MiniOzoneClusters. * Interface used for MiniOzoneClusters.
*/ */
public interface MiniOzoneCluster extends AutoCloseable, Closeable { public interface MiniOzoneCluster {
void close();
boolean restartDataNode(int i) throws IOException; /**
* Returns the configuration object associated with the MiniOzoneCluster.
*
* @return Configuration
*/
Configuration getConf();
boolean restartDataNode(int i, boolean keepPort) throws IOException; /**
* Waits for the cluster to be ready, this call blocks till all the
* configured {@link HddsDatanodeService} registers with
* {@link StorageContainerManager}.
*
* @throws TimeoutException In case of timeout
* @throws InterruptedException In case of interrupt while waiting
*/
void waitForClusterToBeReady() throws TimeoutException, InterruptedException;
void shutdown(); /**
* Waits/blocks till the cluster is out of chill mode.
*
* @throws TimeoutException TimeoutException In case of timeout
* @throws InterruptedException In case of interrupt while waiting
*/
void waitTobeOutOfChillMode() throws TimeoutException, InterruptedException;
/**
* Returns {@link StorageContainerManager} associated with this
* {@link MiniOzoneCluster} instance.
*
* @return {@link StorageContainerManager} instance
*/
StorageContainerManager getStorageContainerManager(); StorageContainerManager getStorageContainerManager();
/**
* Returns {@link KeySpaceManager} associated with this
* {@link MiniOzoneCluster} instance.
*
* @return {@link KeySpaceManager} instance
*/
KeySpaceManager getKeySpaceManager(); KeySpaceManager getKeySpaceManager();
OzoneRestClient createOzoneRestClient() throws OzoneException; /**
* Returns the list of {@link HddsDatanodeService} which are part of this
* {@link MiniOzoneCluster} instance.
*
* @return List of {@link HddsDatanodeService}
*/
List<HddsDatanodeService> getHddsDatanodes();
/**
* Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}.
*
* @return {@link OzoneClient}
* @throws IOException
*/
OzoneClient getClient() throws IOException;
/**
* Returns an RPC based {@link OzoneClient} to access the
* {@link MiniOzoneCluster}.
*
* @return {@link OzoneClient}
* @throws IOException
*/
OzoneClient getRpcClient() throws IOException;
/**
* Returns an REST based {@link OzoneClient} to access the
* {@link MiniOzoneCluster}.
*
* @return {@link OzoneClient}
* @throws IOException
*/
OzoneClient getRestClient() throws IOException;
/**
* Returns StorageContainerLocationClient to communicate with
* {@link StorageContainerManager} associated with the MiniOzoneCluster.
*
* @return StorageContainerLocation Client
* @throws IOException
*/
StorageContainerLocationProtocolClientSideTranslatorPB StorageContainerLocationProtocolClientSideTranslatorPB
createStorageContainerLocationClient() throws IOException; getStorageContainerLocationClient() throws IOException;
void waitOzoneReady() throws TimeoutException, InterruptedException; /**
* Restarts StorageContainerManager instance.
*
* @throws IOException
*/
void restartStorageContainerManager() throws IOException;
void waitDatanodeOzoneReady(int dnIndex) /**
throws TimeoutException, InterruptedException; * Restarts KeySpaceManager instance.
*
* @throws IOException
*/
void restartKeySpaceManager() throws IOException;
void waitTobeOutOfChillMode() throws TimeoutException, /**
InterruptedException; * Restart a particular HddsDatanode.
*
* @param i index of HddsDatanode in the MiniOzoneCluster
*/
void restartHddsDatanode(int i);
void waitForHeartbeatProcessed() throws TimeoutException, /**
InterruptedException; * Shutdown a particular HddsDatanode.
*
* @param i index of HddsDatanode in the MiniOzoneCluster
*/
void shutdownHddsDatanode(int i);
/**
* Shutdown the MiniOzoneCluster.
*/
void shutdown();
/**
* Returns the Builder to construct MiniOzoneCluster.
*
* @param conf OzoneConfiguration
*
* @return MiniOzoneCluster builder
*/
static Builder newBuilder(OzoneConfiguration conf) {
return new MiniOzoneClusterImpl.Builder(conf);
}
abstract class Builder {
protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100;
protected final OzoneConfiguration conf;
protected final String path;
protected String clusterId;
protected Optional<Boolean> enableTrace = Optional.of(false);
protected Optional<Integer> hbInterval = Optional.empty();
protected Optional<Integer> hbProcessorInterval = Optional.empty();
protected Optional<String> scmId = Optional.empty();
protected Optional<String> ksmId = Optional.empty();
protected Boolean ozoneEnabled = true;
protected Boolean randomContainerPort = true;
// Use relative smaller number of handlers for testing
protected int numOfKsmHandlers = 20;
protected int numOfScmHandlers = 20;
protected int numOfDatanodes = 1;
protected Builder(OzoneConfiguration conf) {
this.conf = conf;
this.clusterId = UUID.randomUUID().toString();
this.path = GenericTestUtils.getTempPath(
MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId);
}
/**
* Sets the cluster Id.
*
* @param id cluster Id
*
* @return MiniOzoneCluster.Builder
*/
public Builder setClusterId(String id) {
clusterId = id;
return this;
}
/**
* Sets the SCM id.
*
* @param id SCM Id
*
* @return MiniOzoneCluster.Builder
*/
public Builder setScmId(String id) {
scmId = Optional.of(id);
return this;
}
/**
* Sets the KSM id.
*
* @param id KSM Id
*
* @return MiniOzoneCluster.Builder
*/
public Builder setKsmId(String id) {
ksmId = Optional.of(id);
return this;
}
/**
* If set to true container service will be started in a random port.
*
* @param randomPort enable random port
*
* @return MiniOzoneCluster.Builder
*/
public Builder setRandomContainerPort(boolean randomPort) {
randomContainerPort = randomPort;
return this;
}
/**
* Sets the number of HddsDatanodes to be started as part of
* MiniOzoneCluster.
*
* @param val number of datanodes
*
* @return MiniOzoneCluster.Builder
*/
public Builder setNumDatanodes(int val) {
numOfDatanodes = val;
return this;
}
/**
* Sets the number of HeartBeat Interval of Datanodes, the value should be
* in MilliSeconds.
*
* @param val HeartBeat interval in milliseconds
*
* @return MiniOzoneCluster.Builder
*/
public Builder setHbInterval(int val) {
hbInterval = Optional.of(val);
return this;
}
/**
* Sets the number of HeartBeat Processor Interval of Datanodes,
* the value should be in MilliSeconds.
*
* @param val HeartBeat Processor interval in milliseconds
*
* @return MiniOzoneCluster.Builder
*/
public Builder setHbProcessorInterval (int val) {
hbProcessorInterval = Optional.of(val);
return this;
}
/**
* When set to true, enables trace level logging.
*
* @param trace true or false
*
* @return MiniOzoneCluster.Builder
*/
public Builder setTrace(Boolean trace) {
enableTrace = Optional.of(trace);
return this;
}
/**
* Modifies the configuration such that Ozone will be disabled.
*
* @return MiniOzoneCluster.Builder
*/
public Builder disableOzone() {
ozoneEnabled = false;
return this;
}
/**
* Constructs and returns MiniOzoneCluster.
*
* @return {@link MiniOzoneCluster}
*
* @throws IOException
*/
public abstract MiniOzoneCluster build() throws IOException;
}
} }

View File

@ -0,0 +1,425 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.ozone.ksm.KeySpaceManager;
import org.apache.hadoop.hdds.scm.SCMStorage;
import org.apache.hadoop.ozone.ksm.KSMStorage;
import org.apache.hadoop.ozone.web.client.OzoneRestClient;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
/**
* MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
* running tests. The cluster consists of a KeySpaceManager,
* StorageContainerManager and multiple DataNodes.
*/
@InterfaceAudience.Private
public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
private static final Logger LOG =
LoggerFactory.getLogger(MiniOzoneClusterImpl.class);
private final OzoneConfiguration conf;
private final StorageContainerManager scm;
private final KeySpaceManager ksm;
private final List<HddsDatanodeService> hddsDatanodes;
/**
* Creates a new MiniOzoneCluster.
*
* @throws IOException if there is an I/O error
*/
private MiniOzoneClusterImpl(OzoneConfiguration conf,
KeySpaceManager ksm,
StorageContainerManager scm,
List<HddsDatanodeService> hddsDatanodes) {
this.conf = conf;
this.ksm = ksm;
this.scm = scm;
this.hddsDatanodes = hddsDatanodes;
}
public OzoneConfiguration getConf() {
return conf;
}
/**
* Waits for the Ozone cluster to be ready for processing requests.
*/
@Override
public void waitForClusterToBeReady()
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(() -> {
final int healthy = scm.getNodeCount(HEALTHY);
final boolean isReady = healthy == hddsDatanodes.size();
LOG.info("{}. Got {} of {} DN Heartbeats.",
isReady? "Cluster is ready" : "Waiting for cluster to be ready",
healthy, hddsDatanodes.size());
return isReady;
}, 1000, 60 * 1000); //wait for 1 min.
}
/**
* Waits for SCM to be out of Chill Mode. Many tests can be run iff we are out
* of Chill mode.
*
* @throws TimeoutException
* @throws InterruptedException
*/
@Override
public void waitTobeOutOfChillMode()
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(() -> {
if (scm.getScmNodeManager().isOutOfChillMode()) {
return true;
}
LOG.info("Waiting for cluster to be ready. No datanodes found");
return false;
}, 100, 45000);
}
@Override
public StorageContainerManager getStorageContainerManager() {
return this.scm;
}
@Override
public KeySpaceManager getKeySpaceManager() {
return this.ksm;
}
@Override
public List<HddsDatanodeService> getHddsDatanodes() {
return hddsDatanodes;
}
@Override
public OzoneClient getClient() throws IOException {
return OzoneClientFactory.getClient(conf);
}
@Override
public OzoneClient getRpcClient() throws IOException {
return OzoneClientFactory.getRpcClient(conf);
}
/**
* Creates an {@link OzoneRestClient} connected to this cluster's REST
* service. Callers take ownership of the client and must close it when done.
*
* @return OzoneRestClient connected to this cluster's REST service
* @throws OzoneException if Ozone encounters an error creating the client
*/
@Override
public OzoneClient getRestClient() throws IOException {
return OzoneClientFactory.getRestClient(conf);
}
/**
* Returns an RPC proxy connected to this cluster's StorageContainerManager
* for accessing container location information. Callers take ownership of
* the proxy and must close it when done.
*
* @return RPC proxy for accessing container location information
* @throws IOException if there is an I/O error
*/
@Override
public StorageContainerLocationProtocolClientSideTranslatorPB
getStorageContainerLocationClient() throws IOException {
long version = RPC.getProtocolVersion(
StorageContainerLocationProtocolPB.class);
InetSocketAddress address = scm.getClientRpcAddress();
LOG.info(
"Creating StorageContainerLocationProtocol RPC client with address {}",
address);
return new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
}
@Override
public void restartStorageContainerManager() throws IOException {
scm.stop();
scm.start();
}
@Override
public void restartKeySpaceManager() throws IOException {
ksm.stop();
ksm.start();
}
@Override
public void restartHddsDatanode(int i) {
HddsDatanodeService datanodeService = hddsDatanodes.get(i);
datanodeService.stop();
datanodeService.join();
datanodeService.start(null);
}
@Override
public void shutdownHddsDatanode(int i) {
hddsDatanodes.get(i).stop();
}
@Override
public void shutdown() {
try {
LOG.info("Shutting down the Mini Ozone Cluster");
File baseDir = new File(GenericTestUtils.getTempPath(
MiniOzoneClusterImpl.class.getSimpleName() + "-" +
scm.getScmInfo().getClusterId()));
FileUtils.deleteDirectory(baseDir);
if (ksm != null) {
LOG.info("Shutting down the keySpaceManager");
ksm.stop();
ksm.join();
}
if (scm != null) {
LOG.info("Shutting down the StorageContainerManager");
scm.stop();
scm.join();
}
if (!hddsDatanodes.isEmpty()) {
LOG.info("Shutting down the HddsDatanodes");
for (HddsDatanodeService hddsDatanode : hddsDatanodes) {
hddsDatanode.stop();
hddsDatanode.join();
}
}
} catch (IOException e) {
LOG.error("Exception while shutting down the cluster.", e);
}
}
/**
* Builder for configuring the MiniOzoneCluster to run.
*/
public static class Builder extends MiniOzoneCluster.Builder {
/**
* Creates a new Builder.
*
* @param conf configuration
*/
public Builder(OzoneConfiguration conf) {
super(conf);
}
@Override
public MiniOzoneCluster build() throws IOException {
DefaultMetricsSystem.setMiniClusterMode(true);
initializeConfiguration();
StorageContainerManager scm = createSCM();
scm.start();
KeySpaceManager ksm = createKSM();
ksm.start();
List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
hddsDatanodes.forEach((datanode) -> datanode.start(null));
return new MiniOzoneClusterImpl(conf, ksm, scm, hddsDatanodes);
}
/**
* Initializes the configureation required for starting MiniOzoneCluster.
*
* @throws IOException
*/
private void initializeConfiguration() throws IOException {
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, ozoneEnabled);
Path metaDir = Paths.get(path, "ozone-meta");
Files.createDirectories(metaDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
configureTrace();
}
/**
* Creates a new StorageContainerManager instance.
*
* @return {@link StorageContainerManager}
*
* @throws IOException
*/
private StorageContainerManager createSCM() throws IOException {
configureSCM();
SCMStorage scmStore = new SCMStorage(conf);
scmStore.setClusterId(clusterId);
if (!scmId.isPresent()) {
scmId = Optional.of(UUID.randomUUID().toString());
}
scmStore.setScmId(scmId.get());
scmStore.initialize();
return StorageContainerManager.createSCM(null, conf);
}
/**
* Creates a new KeySpaceManager instance.
*
* @return {@link KeySpaceManager}
*
* @throws IOException
*/
private KeySpaceManager createKSM() throws IOException {
configureKSM();
KSMStorage ksmStore = new KSMStorage(conf);
ksmStore.setClusterId(clusterId);
ksmStore.setScmId(scmId.get());
ksmStore.setKsmId(ksmId.orElse(UUID.randomUUID().toString()));
ksmStore.initialize();
return KeySpaceManager.createKSM(null, conf);
}
/**
* Creates HddsDatanodeService(s) instance.
*
* @return List of HddsDatanodeService
*
* @throws IOException
*/
private List<HddsDatanodeService> createHddsDatanodes(
StorageContainerManager scm) throws IOException {
configureHddsDatanodes();
String scmAddress = scm.getDatanodeRpcAddress().getHostString() +
":" + scm.getDatanodeRpcAddress().getPort();
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
List<HddsDatanodeService> hddsDatanodes = new ArrayList<>();
for (int i = 0; i < numOfDatanodes; i++) {
Configuration dnConf = new OzoneConfiguration(conf);
String datanodeBaseDir = path + "/datanode-" + Integer.toString(i);
Path metaDir = Paths.get(datanodeBaseDir, "meta");
Path dataDir = Paths.get(datanodeBaseDir, "data", "containers");
Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis");
Files.createDirectories(metaDir);
Files.createDirectories(dataDir);
Files.createDirectories(ratisDir);
dnConf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.toString());
dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
ratisDir.toString());
hddsDatanodes.add(
HddsDatanodeService.createHddsDatanodeService(dnConf));
}
return hddsDatanodes;
}
private void configureSCM() {
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
configureSCMheartbeat();
}
private void configureSCMheartbeat() {
if (hbInterval.isPresent()) {
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
hbInterval.get(), TimeUnit.MILLISECONDS);
} else {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
DEFAULT_HB_INTERVAL_MS,
TimeUnit.MILLISECONDS);
}
if (hbProcessorInterval.isPresent()) {
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
hbProcessorInterval.get(),
TimeUnit.MILLISECONDS);
} else {
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
DEFAULT_HB_PROCESSOR_INTERVAL_MS,
TimeUnit.MILLISECONDS);
}
}
private void configureKSM() {
conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0");
conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setInt(KSMConfigKeys.OZONE_KSM_HANDLER_COUNT_KEY, numOfKsmHandlers);
}
private void configureHddsDatanodes() {
conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(HDDS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
randomContainerPort);
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
randomContainerPort);
}
private void configureTrace() {
if (enableTrace.isPresent()) {
conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY,
enableTrace.get());
GenericTestUtils.setRootLogLevel(Level.TRACE);
}
GenericTestUtils.setRootLogLevel(Level.INFO);
}
}
}

View File

@ -1,81 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.util.ServicePlugin;
import java.lang.reflect.Field;
import java.util.List;
/**
* Stateless helper functions for MiniOzone based tests.
*/
public class MiniOzoneTestHelper {
private MiniOzoneTestHelper() {
}
public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeDetails();
}
public static int getOzoneRestPort(DataNode dataNode) {
return MiniOzoneTestHelper.getDatanodeDetails(dataNode).getOzoneRestPort();
}
public static OzoneContainer getOzoneContainer(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine()
.getContainer();
}
public static ContainerManager getOzoneContainerManager(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine()
.getContainer().getContainerManager();
}
public static DatanodeStateMachine getStateMachine(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine();
}
private static HddsDatanodeService findHddsPlugin(DataNode dataNode) {
try {
Field pluginsField = DataNode.class.getDeclaredField("plugins");
pluginsField.setAccessible(true);
List<ServicePlugin> plugins =
(List<ServicePlugin>) pluginsField.get(dataNode);
for (ServicePlugin plugin : plugins) {
if (plugin instanceof HddsDatanodeService) {
return (HddsDatanodeService) plugin;
}
}
} catch (NoSuchFieldException | IllegalAccessException e) {
e.printStackTrace();
}
throw new IllegalStateException("Can't find the Hdds server plugin in the"
+ " plugin collection of datanode");
}
}

View File

@ -44,10 +44,10 @@ class RatisTestSuite implements Closeable {
static final int NUM_DATANODES = 3; static final int NUM_DATANODES = 3;
private final OzoneConfiguration conf; private final OzoneConfiguration conf;
private final MiniOzoneClassicCluster cluster; private final MiniOzoneCluster cluster;
/** /**
* Create a {@link MiniOzoneClassicCluster} for testing by setting * Create a {@link MiniOzoneCluster} for testing by setting
* OZONE_ENABLED = true, * OZONE_ENABLED = true,
* RATIS_ENABLED = true, and * RATIS_ENABLED = true, and
* OZONE_HANDLER_TYPE_KEY = "distributed". * OZONE_HANDLER_TYPE_KEY = "distributed".
@ -61,14 +61,10 @@ public OzoneConfiguration getConf() {
return conf; return conf;
} }
public MiniOzoneClassicCluster getCluster() { public MiniOzoneCluster getCluster() {
return cluster; return cluster;
} }
public int getDatanodeInfoPort() {
return cluster.getDataNodes().get(0).getInfoPort();
}
public OzoneRestClient newOzoneRestClient() public OzoneRestClient newOzoneRestClient()
throws OzoneException, URISyntaxException { throws OzoneException, URISyntaxException {
return RatisTestHelper.newOzoneRestClient(getDatanodeOzoneRestPort()); return RatisTestHelper.newOzoneRestClient(getDatanodeOzoneRestPort());
@ -76,12 +72,12 @@ public OzoneRestClient newOzoneRestClient()
@Override @Override
public void close() { public void close() {
cluster.close(); cluster.shutdown();
} }
public int getDatanodeOzoneRestPort() { public int getDatanodeOzoneRestPort() {
return MiniOzoneTestHelper.getOzoneRestPort( return cluster.getHddsDatanodes().get(0).getDatanodeDetails()
cluster.getDataNodes().get(0)); .getOzoneRestPort();
} }
} }
@ -100,12 +96,10 @@ static void initRatisConf(RpcType rpc, Configuration conf) {
+ " = " + rpc.name()); + " = " + rpc.name());
} }
static MiniOzoneClassicCluster newMiniOzoneCluster( static MiniOzoneCluster newMiniOzoneCluster(
int numDatanodes, OzoneConfiguration conf) throws IOException { int numDatanodes, OzoneConfiguration conf) throws IOException {
final MiniOzoneClassicCluster cluster = final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
new MiniOzoneClassicCluster.Builder(conf) .setNumDatanodes(numDatanodes).build();
.numDataNodes(numDatanodes)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
return cluster; return cluster;
} }

View File

@ -49,22 +49,19 @@ public class TestContainerOperations {
@BeforeClass @BeforeClass
public static void setup() throws Exception { public static void setup() throws Exception {
int containerSizeGB = 5; int containerSizeGB = 5;
long datanodeCapacities = 3 * OzoneConsts.TB;
ContainerOperationClient.setContainerSizeB( ContainerOperationClient.setContainerSizeB(
containerSizeGB * OzoneConsts.GB); containerSizeGB * OzoneConsts.GB);
ozoneConf = new OzoneConfiguration(); ozoneConf = new OzoneConfiguration();
ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
cluster = new MiniOzoneClassicCluster.Builder(ozoneConf).numDataNodes(1) cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(1).build();
.storageCapacities(new long[] {datanodeCapacities, datanodeCapacities})
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
StorageContainerLocationProtocolClientSideTranslatorPB client = StorageContainerLocationProtocolClientSideTranslatorPB client =
cluster.createStorageContainerLocationClient(); cluster.getStorageContainerLocationClient();
RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
storageClient = new ContainerOperationClient( storageClient = new ContainerOperationClient(
client, new XceiverClientManager(ozoneConf)); client, new XceiverClientManager(ozoneConf));
cluster.waitForHeartbeatProcessed(); cluster.waitForClusterToBeReady();
} }
@AfterClass @AfterClass

View File

@ -20,7 +20,6 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
@ -34,7 +33,6 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.test.TestGenericTestUtils;
import org.apache.hadoop.util.ServicePlugin;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -56,7 +54,7 @@
*/ */
public class TestMiniOzoneCluster { public class TestMiniOzoneCluster {
private static MiniOzoneClassicCluster cluster; private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf; private static OzoneConfiguration conf;
private final static File TEST_ROOT = TestGenericTestUtils.getTestDir(); private final static File TEST_ROOT = TestGenericTestUtils.getTestDir();
@ -79,24 +77,22 @@ public static void setup() {
public static void cleanup() { public static void cleanup() {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
cluster.close();
} }
} }
@Test(timeout = 30000) @Test(timeout = 30000)
public void testStartMultipleDatanodes() throws Exception { public void testStartMultipleDatanodes() throws Exception {
final int numberOfNodes = 3; final int numberOfNodes = 3;
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(numberOfNodes) .setNumDatanodes(numberOfNodes)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.build(); .build();
List<DataNode> datanodes = cluster.getDataNodes(); cluster.waitForClusterToBeReady();
List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
assertEquals(numberOfNodes, datanodes.size()); assertEquals(numberOfNodes, datanodes.size());
for(DataNode dn : datanodes) { for(HddsDatanodeService dn : datanodes) {
// Create a single member pipe line // Create a single member pipe line
String containerName = OzoneUtils.getRequestID(); String containerName = OzoneUtils.getRequestID();
DatanodeDetails datanodeDetails = DatanodeDetails datanodeDetails = dn.getDatanodeDetails();
MiniOzoneTestHelper.getDatanodeDetails(dn);
final PipelineChannel pipelineChannel = final PipelineChannel pipelineChannel =
new PipelineChannel(datanodeDetails.getUuidString(), new PipelineChannel(datanodeDetails.getUuidString(),
HddsProtos.LifeCycleState.OPEN, HddsProtos.LifeCycleState.OPEN,
@ -133,15 +129,6 @@ public void testDatanodeIDPersistent() throws Exception {
assertEquals(id1, validId); assertEquals(id1, validId);
assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage()); assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage());
// Write should fail if unable to create file or directory
File invalidPath = new File(WRITE_TMP, "an/invalid/path");
try {
ContainerUtils.writeDatanodeDetailsTo(id1, invalidPath);
Assert.fail();
} catch (Exception e) {
assertTrue(e instanceof IOException);
}
// Read should return an empty value if file doesn't exist // Read should return an empty value if file doesn't exist
File nonExistFile = new File(READ_TMP, "non_exist.id"); File nonExistFile = new File(READ_TMP, "non_exist.id");
nonExistFile.delete(); nonExistFile.delete();

View File

@ -37,7 +37,6 @@
import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.ScmInfo;
@ -62,7 +61,6 @@
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.io.IOUtils;
import org.junit.rules.Timeout; import org.junit.rules.Timeout;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -87,7 +85,7 @@ public class TestStorageContainerManager {
public ExpectedException exception = ExpectedException.none(); public ExpectedException exception = ExpectedException.none();
@Test @Test
public void testRpcPermission() throws IOException { public void testRpcPermission() throws Exception {
// Test with default configuration // Test with default configuration
OzoneConfiguration defaultConf = new OzoneConfiguration(); OzoneConfiguration defaultConf = new OzoneConfiguration();
testRpcPermissionWithConf(defaultConf, "unknownUser", true); testRpcPermissionWithConf(defaultConf, "unknownUser", true);
@ -104,11 +102,9 @@ public void testRpcPermission() throws IOException {
private void testRpcPermissionWithConf( private void testRpcPermissionWithConf(
OzoneConfiguration ozoneConf, String fakeRemoteUsername, OzoneConfiguration ozoneConf, String fakeRemoteUsername,
boolean expectPermissionDenied) throws IOException { boolean expectPermissionDenied) throws Exception {
MiniOzoneCluster cluster = MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build();
new MiniOzoneClassicCluster.Builder(ozoneConf).numDataNodes(1) cluster.waitForClusterToBeReady();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
try { try {
String fakeUser = fakeRemoteUsername; String fakeUser = fakeRemoteUsername;
StorageContainerManager mockScm = Mockito.spy( StorageContainerManager mockScm = Mockito.spy(
@ -172,7 +168,7 @@ private void testRpcPermissionWithConf(
} }
} }
} finally { } finally {
IOUtils.cleanupWithLogger(null, cluster); cluster.shutdown();
} }
} }
@ -201,9 +197,8 @@ public void testBlockDeletionTransactions() throws Exception {
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys); numKeys);
MiniOzoneClassicCluster cluster = MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) cluster.waitForClusterToBeReady();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
try { try {
DeletedBlockLog delLog = cluster.getStorageContainerManager() DeletedBlockLog delLog = cluster.getStorageContainerManager()
@ -269,19 +264,17 @@ public void testBlockDeletionTransactions() throws Exception {
public void testBlockDeletingThrottling() throws Exception { public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15; int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, 5,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS); 1000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys); numKeys);
MiniOzoneClassicCluster cluster = new MiniOzoneClassicCluster.Builder(conf) MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(1).setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED) .setHbInterval(5000)
.setHbProcessorInterval(3000)
.build(); .build();
cluster.waitForClusterToBeReady();
DeletedBlockLog delLog = cluster.getStorageContainerManager() DeletedBlockLog delLog = cluster.getStorageContainerManager()
.getScmBlockManager().getDeletedBlockLog(); .getScmBlockManager().getDeletedBlockLog();
@ -402,14 +395,15 @@ public void testSCMReinitialization() throws Exception {
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
//This will set the cluster id in the version file //This will set the cluster id in the version file
MiniOzoneCluster cluster = MiniOzoneCluster cluster =
new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
StartupOption.INIT.setClusterId("testClusterId"); StartupOption.INIT.setClusterId("testClusterId");
// This will initialize SCM // This will initialize SCM
StorageContainerManager.scmInit(conf); StorageContainerManager.scmInit(conf);
SCMStorage scmStore = new SCMStorage(conf); SCMStorage scmStore = new SCMStorage(conf);
Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
Assert.assertNotEquals("testClusterId", scmStore.getClusterID()); Assert.assertNotEquals("testClusterId", scmStore.getClusterID());
cluster.shutdown();
} }
@Test @Test

View File

@ -23,7 +23,6 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@ -55,11 +54,11 @@
*/ */
public class TestStorageContainerManagerHelper { public class TestStorageContainerManagerHelper {
private final MiniOzoneClassicCluster cluster; private final MiniOzoneCluster cluster;
private final Configuration conf; private final Configuration conf;
private final StorageHandler storageHandler; private final StorageHandler storageHandler;
public TestStorageContainerManagerHelper(MiniOzoneClassicCluster cluster, public TestStorageContainerManagerHelper(MiniOzoneCluster cluster,
Configuration conf) throws IOException { Configuration conf) throws IOException {
this.cluster = cluster; this.cluster = cluster;
this.conf = conf; this.conf = conf;
@ -169,10 +168,9 @@ private MetadataStore getContainerMetadata(String containerName)
private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID) private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
throws IOException { throws IOException {
for (DataNode dn : cluster.getDataNodes()) { for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
if (MiniOzoneTestHelper.getDatanodeDetails(dn).getUuidString() if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) {
.equals(dnUUID)) { return dn.getDatanodeStateMachine().getContainer();
return MiniOzoneTestHelper.getOzoneContainer(dn);
} }
} }
throw new IOException("Unable to get the ozone container " throw new IOException("Unable to get the ozone container "

View File

@ -20,7 +20,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -58,7 +58,7 @@ public class TestOzoneRestClient {
@Rule @Rule
public ExpectedException thrown = ExpectedException.none(); public ExpectedException thrown = ExpectedException.none();
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null; private static OzoneClient ozClient = null;
private static ObjectStore store = null; private static ObjectStore store = null;
@ -75,9 +75,8 @@ public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
InetSocketAddress ksmHttpAddress = cluster.getKeySpaceManager() InetSocketAddress ksmHttpAddress = cluster.getKeySpaceManager()
.getHttpServer().getHttpAddress(); .getHttpServer().getHttpAddress();
ozClient = OzoneClientFactory.getRestClient(ksmHttpAddress.getHostName(), ozClient = OzoneClientFactory.getRestClient(ksmHttpAddress.getHostName(),

View File

@ -20,7 +20,7 @@
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -70,7 +70,7 @@ public class TestOzoneRpcClient {
@Rule @Rule
public ExpectedException thrown = ExpectedException.none(); public ExpectedException thrown = ExpectedException.none();
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null; private static OzoneClient ozClient = null;
private static ObjectStore store = null; private static ObjectStore store = null;
private static KeySpaceManager keySpaceManager; private static KeySpaceManager keySpaceManager;
@ -91,12 +91,12 @@ public static void init() throws Exception {
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 1); conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 1);
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(10) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf); ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore(); store = ozClient.getObjectStore();
storageContainerLocationClient = storageContainerLocationClient =
cluster.createStorageContainerLocationClient(); cluster.getStorageContainerLocationClient();
keySpaceManager = cluster.getKeySpaceManager(); keySpaceManager = cluster.getKeySpaceManager();
} }

View File

@ -19,9 +19,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneClientFactory;
@ -55,10 +53,9 @@ public void test() throws IOException, TimeoutException, InterruptedException,
//setup a cluster (1G free space is enough for a unit test) //setup a cluster (1G free space is enough for a unit test)
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1"); conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1");
MiniOzoneClassicCluster cluster = MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) .setNumDatanodes(1).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
cluster.waitOzoneReady();
//the easiest way to create an open container is creating a key //the easiest way to create an open container is creating a key
OzoneClient client = OzoneClientFactory.getClient(conf); OzoneClient client = OzoneClientFactory.getClient(conf);
@ -86,8 +83,8 @@ public void test() throws IOException, TimeoutException, InterruptedException,
Assert.assertFalse(isContainerClosed(cluster, containerName)); Assert.assertFalse(isContainerClosed(cluster, containerName));
DatanodeDetails datanodeDetails = MiniOzoneTestHelper DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails(cluster.getDataNodes().get(0)); .getDatanodeDetails();
//send the order to close the container //send the order to close the container
cluster.getStorageContainerManager().getScmNodeManager() cluster.getStorageContainerManager().getScmNodeManager()
.addDatanodeCommand(datanodeDetails.getUuid(), .addDatanodeCommand(datanodeDetails.getUuid(),
@ -101,12 +98,13 @@ public void test() throws IOException, TimeoutException, InterruptedException,
Assert.assertTrue(isContainerClosed(cluster, containerName)); Assert.assertTrue(isContainerClosed(cluster, containerName));
} }
private Boolean isContainerClosed(MiniOzoneClassicCluster cluster, private Boolean isContainerClosed(MiniOzoneCluster cluster,
String containerName) { String containerName) {
ContainerData containerData; ContainerData containerData;
try { try {
containerData = MiniOzoneTestHelper.getOzoneContainerManager(cluster containerData = cluster.getHddsDatanodes().get(0)
.getDataNodes().get(0)).readContainer(containerName); .getDatanodeStateMachine().getContainer().getContainerManager()
.readContainer(containerName);
return !containerData.isOpen(); return !containerData.isOpen();
} catch (StorageContainerException e) { } catch (StorageContainerException e) {
throw new AssertionError(e); throw new AssertionError(e);

View File

@ -19,11 +19,9 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@ -59,8 +57,8 @@ public void testCreateOzoneContainer() throws Exception {
OzoneContainer container = null; OzoneContainer container = null;
MiniOzoneCluster cluster = null; MiniOzoneCluster cluster = null;
try { try {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
// We don't start Ozone Container via data node, we will do it // We don't start Ozone Container via data node, we will do it
// independently in our test path. // independently in our test path.
Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline( Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
@ -105,9 +103,10 @@ public void testOzoneContainerViaDataNode() throws Exception {
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getLeader().getContainerPort()); pipeline.getLeader().getContainerPort());
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
// This client talks to ozone container via datanode. // This client talks to ozone container via datanode.
XceiverClient client = new XceiverClient(pipeline, conf); XceiverClient client = new XceiverClient(pipeline, conf);
@ -208,9 +207,10 @@ public void testBothGetandPutSmallFile() throws Exception {
OzoneConfiguration conf = newOzoneConfiguration(); OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf); client = createClientForTesting(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
String containerName = client.getPipeline().getContainerName(); String containerName = client.getPipeline().getContainerName();
runTestBothGetandPutSmallFile(containerName, client); runTestBothGetandPutSmallFile(containerName, client);
@ -266,9 +266,10 @@ public void testCloseContainer() throws Exception {
OzoneConfiguration conf = newOzoneConfiguration(); OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf); client = createClientForTesting(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
client.connect(); client.connect();
String containerName = client.getPipeline().getContainerName(); String containerName = client.getPipeline().getContainerName();
@ -356,9 +357,10 @@ public void testDeleteContainer() throws Exception {
OzoneConfiguration conf = newOzoneConfiguration(); OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf); client = createClientForTesting(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
client.connect(); client.connect();
String containerName = client.getPipeline().getContainerName(); String containerName = client.getPipeline().getContainerName();
@ -471,9 +473,10 @@ public void testXcieverClientAsync() throws Exception {
OzoneConfiguration conf = newOzoneConfiguration(); OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf); client = createClientForTesting(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
String containerName = client.getPipeline().getContainerName(); String containerName = client.getPipeline().getContainerName();
runAsyncTests(containerName, client); runAsyncTests(containerName, client);
} finally { } finally {
@ -492,9 +495,10 @@ public void testInvalidRequest() throws Exception {
OzoneConfiguration conf = newOzoneConfiguration(); OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf); client = createClientForTesting(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false) .setRandomContainerPort(false)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
client.connect(); client.connect();
// Send a request without traceId. // Send a request without traceId.

View File

@ -18,11 +18,9 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@ -76,20 +74,19 @@ private static void runTest(
// create Ozone clusters // create Ozone clusters
final OzoneConfiguration conf = newOzoneConfiguration(); final OzoneConfiguration conf = newOzoneConfiguration();
RatisTestHelper.initRatisConf(rpc, conf); RatisTestHelper.initRatisConf(rpc, conf);
final MiniOzoneClassicCluster cluster = final MiniOzoneCluster cluster =
new MiniOzoneClassicCluster.Builder(conf) MiniOzoneCluster.newBuilder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL) .setNumDatanodes(numNodes)
.numDataNodes(numNodes)
.build(); .build();
try { try {
cluster.waitOzoneReady(); cluster.waitForClusterToBeReady();
final String containerName = OzoneUtils.getRequestID(); final String containerName = OzoneUtils.getRequestID();
final List<DataNode> datanodes = cluster.getDataNodes(); final List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
final Pipeline pipeline = ContainerTestHelper.createPipeline( final Pipeline pipeline = ContainerTestHelper.createPipeline(
containerName, containerName,
CollectionUtils.as(datanodes, CollectionUtils.as(datanodes,
MiniOzoneTestHelper::getDatanodeDetails)); HddsDatanodeService::getDatanodeDetails));
LOG.info("pipeline=" + pipeline); LOG.info("pipeline=" + pipeline);
// Create Ratis cluster // Create Ratis cluster

View File

@ -18,12 +18,10 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.RpcType;
@ -75,17 +73,15 @@ private static void runTestRatisManager(RpcType rpc) throws Exception {
// create Ozone clusters // create Ozone clusters
final OzoneConfiguration conf = newOzoneConfiguration(); final OzoneConfiguration conf = newOzoneConfiguration();
RatisTestHelper.initRatisConf(rpc, conf); RatisTestHelper.initRatisConf(rpc, conf);
final MiniOzoneClassicCluster cluster = final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
new MiniOzoneClassicCluster.Builder(conf) .setNumDatanodes(5)
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL)
.numDataNodes(5)
.build(); .build();
try { try {
cluster.waitOzoneReady(); cluster.waitForClusterToBeReady();
final List<DataNode> datanodes = cluster.getDataNodes(); final List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream() final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream()
.map(MiniOzoneTestHelper::getDatanodeDetails).collect( .map(HddsDatanodeService::getDatanodeDetails).collect(
Collectors.toList()); Collectors.toList());
//final RatisManager manager = RatisManager.newRatisManager(conf); //final RatisManager manager = RatisManager.newRatisManager(conf);

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.ozone.freon; package org.apache.hadoop.ozone.freon;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -56,9 +55,9 @@ public static void init() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED) .setNumDatanodes(5).build();
.numDataNodes(5).build(); cluster.waitForClusterToBeReady();
} }
/** /**

View File

@ -18,11 +18,8 @@
package org.apache.hadoop.ozone.freon; package org.apache.hadoop.ozone.freon;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
@ -52,11 +49,8 @@ public class TestFreon {
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster.waitForClusterToBeReady();
cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.numDataNodes(5).build();
} }
/** /**

View File

@ -22,8 +22,7 @@
import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.*;
@ -51,7 +50,7 @@
public class TestContainerReportWithKeys { public class TestContainerReportWithKeys {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
TestContainerReportWithKeys.class); TestContainerReportWithKeys.class);
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneConfiguration conf; private static OzoneConfiguration conf;
private static StorageContainerManager scm; private static StorageContainerManager scm;
@ -71,8 +70,8 @@ public static void init() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager(); scm = cluster.getStorageContainerManager();
} }
@ -117,7 +116,7 @@ public void testContainerReportKeyWrite() throws Exception {
cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
.get(0).getBlocksLatestVersionOnly().get(0); .get(0).getBlocksLatestVersionOnly().get(0);
ContainerData cd = getContainerData(cluster, keyInfo.getContainerName()); ContainerData cd = getContainerData(keyInfo.getContainerName());
LOG.info("DN Container Data: keyCount: {} used: {} ", LOG.info("DN Container Data: keyCount: {} used: {} ",
cd.getKeyCount(), cd.getBytesUsed()); cd.getKeyCount(), cd.getBytesUsed());
@ -129,12 +128,11 @@ public void testContainerReportKeyWrite() throws Exception {
} }
private static ContainerData getContainerData(MiniOzoneClassicCluster clus, private static ContainerData getContainerData(String containerName) {
String containerName) { ContainerData containerData;
ContainerData containerData = null;
try { try {
ContainerManager containerManager = MiniOzoneTestHelper ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
.getOzoneContainerManager(clus.getDataNodes().get(0)); .getDatanodeStateMachine().getContainer().getContainerManager();
containerData = containerManager.readContainer(containerName); containerData = containerManager.readContainer(containerName);
} catch (StorageContainerException e) { } catch (StorageContainerException e) {
throw new AssertionError(e); throw new AssertionError(e);

View File

@ -22,7 +22,6 @@
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -55,8 +54,8 @@ public void setup() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
ksmManager = cluster.getKeySpaceManager(); ksmManager = cluster.getKeySpaceManager();
} }

View File

@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.ksm; package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -28,6 +28,8 @@
import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
@ -47,6 +49,7 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -59,7 +62,7 @@
*/ */
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestKSMSQLCli { public class TestKSMSQLCli {
private MiniOzoneClassicCluster cluster = null; private MiniOzoneCluster cluster = null;
private StorageHandler storageHandler; private StorageHandler storageHandler;
private UserArgs userArgs; private UserArgs userArgs;
private OzoneConfiguration conf; private OzoneConfiguration conf;
@ -104,12 +107,12 @@ public void setup() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(), userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null); null, null, null, null);
cluster.waitForHeartbeatProcessed(); cluster.waitForClusterToBeReady();
VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs); VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
createVolumeArgs0.setUserName(userName); createVolumeArgs0.setUserName(userName);
@ -149,15 +152,23 @@ public void setup() throws Exception {
stream = storageHandler.newKeyWriter(keyArgs3); stream = storageHandler.newKeyWriter(keyArgs3);
stream.close(); stream.close();
cluster.shutdown(); cluster.getKeySpaceManager().stop();
cluster.getStorageContainerManager().stop();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
cli = new SQLCLI(conf); cli = new SQLCLI(conf);
} }
@After
public void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test @Test
public void testKSMDB() throws Exception { public void testKSMDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + KSM_DB_NAME; String dbPath = dbRootPath + "/" + KSM_DB_NAME;

View File

@ -22,7 +22,6 @@
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -123,12 +122,12 @@ public static void init() throws Exception {
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.setClusterId(clusterId) .setClusterId(clusterId)
.setScmId(scmId) .setScmId(scmId)
.setKsmId(ksmId) .setKsmId(ksmId)
.build(); .build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(), userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null); null, null, null, null);

View File

@ -21,12 +21,9 @@
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
import org.apache.hadoop.ozone.protocol.proto import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.ServicePort; .KeySpaceManagerProtocolProtos.ServicePort;
@ -45,7 +42,6 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients; import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients;
@ -61,17 +57,14 @@ public class TestKeySpaceManagerRestInterface {
@BeforeClass @BeforeClass
public static void setUp() throws Exception { public static void setUp() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED) cluster.waitForClusterToBeReady();
.setClusterId(UUID.randomUUID().toString())
.setScmId(UUID.randomUUID().toString())
.build();
} }
@AfterClass @AfterClass
public static void tearDown() throws Exception { public static void tearDown() throws Exception {
if (cluster != null) { if (cluster != null) {
cluster.close(); cluster.shutdown();
} }
} }
@ -115,9 +108,9 @@ public void testGetServiceList() throws Exception {
scmInfo.getPort(ServicePort.Type.RPC)); scmInfo.getPort(ServicePort.Type.RPC));
ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE); ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
DataNode datanode = ((MiniOzoneClassicCluster) cluster) DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
.getDataNodes().get(0); .getDatanodeDetails();
Assert.assertEquals(datanode.getDatanodeHostname(), Assert.assertEquals(datanodeDetails.getHostName(),
datanodeInfo.getHostname()); datanodeInfo.getHostname());
Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts(); Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
@ -125,7 +118,7 @@ public void testGetServiceList() throws Exception {
switch (type) { switch (type) {
case HTTP: case HTTP:
case HTTPS: case HTTPS:
Assert.assertEquals(MiniOzoneTestHelper.getOzoneRestPort(datanode), Assert.assertEquals(datanodeDetails.getOzoneRestPort(),
(int) ports.get(type)); (int) ports.get(type));
break; break;
default: default:

View File

@ -21,7 +21,6 @@
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -77,8 +76,8 @@ public static void init() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(), userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null); null, null, null, null);

View File

@ -20,7 +20,6 @@
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -76,8 +75,8 @@ public static void init() throws Exception {
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5); conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
userArgs = new UserArgs(null, OzoneUtils.getRequestID(), userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
null, null, null, null); null, null, null, null);

View File

@ -26,9 +26,7 @@
import java.io.File; import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -38,9 +36,7 @@
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType;
@ -82,7 +78,7 @@ public class TestOzoneShell {
private static String url; private static String url;
private static File baseDir; private static File baseDir;
private static OzoneConfiguration conf = null; private static OzoneConfiguration conf = null;
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneRestClient client = null; private static OzoneRestClient client = null;
private static Shell shell = null; private static Shell shell = null;
@ -95,11 +91,10 @@ public class TestOzoneShell {
* Create a MiniDFSCluster for testing with using distributed Ozone * Create a MiniDFSCluster for testing with using distributed Ozone
* handler type. * handler type.
* *
* @throws IOException * @throws Exception
*/ */
@BeforeClass @BeforeClass
public static void init() public static void init() throws Exception {
throws IOException, URISyntaxException, OzoneException {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
String path = GenericTestUtils.getTempPath( String path = GenericTestUtils.getTempPath(
@ -115,10 +110,10 @@ public static void init()
shell = new Shell(); shell = new Shell();
shell.setConf(conf); shell.setConf(conf);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); final int port = cluster.getHddsDatanodes().get(0).getDatanodeDetails()
final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getOzoneRestPort();
url = String.format("http://localhost:%d", port); url = String.format("http://localhost:%d", port);
client = new OzoneRestClient(String.format("http://localhost:%d", port)); client = new OzoneRestClient(String.format("http://localhost:%d", port));
client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER); client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);

View File

@ -19,10 +19,8 @@
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@ -51,15 +49,12 @@ public class TestAllocateContainer {
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
long datanodeCapacities = 3 * OzoneConsts.TB;
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(3) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
.storageCapacities(new long[] {datanodeCapacities, datanodeCapacities}) cluster.waitForClusterToBeReady();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
storageContainerLocationClient = storageContainerLocationClient =
cluster.createStorageContainerLocationClient(); cluster.getStorageContainerLocationClient();
xceiverClientManager = new XceiverClientManager(conf); xceiverClientManager = new XceiverClientManager(conf);
cluster.waitForHeartbeatProcessed();
} }
@AfterClass @AfterClass
@ -67,7 +62,7 @@ public static void shutdown() throws InterruptedException {
if(cluster != null) { if(cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster); IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
} }
@Test @Test

View File

@ -19,22 +19,19 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
import org.apache.hadoop.hdds.scm.container.ContainerMapping; import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.scm.cli.SQLCLI; import org.apache.hadoop.ozone.scm.cli.SQLCLI;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -52,12 +49,12 @@
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.OzoneConsts.KB;
import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
//import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -83,10 +80,9 @@ public TestContainerSQLCli(String type) {
private static SQLCLI cli; private static SQLCLI cli;
private MiniOzoneClassicCluster cluster; private MiniOzoneCluster cluster;
private OzoneConfiguration conf; private OzoneConfiguration conf;
private StorageContainerLocationProtocolClientSideTranslatorPB private String datanodeIpAddress;
storageContainerLocationClient;
private ContainerMapping mapping; private ContainerMapping mapping;
private NodeManager nodeManager; private NodeManager nodeManager;
@ -105,7 +101,6 @@ public TestContainerSQLCli(String type) {
@Before @Before
public void setup() throws Exception { public void setup() throws Exception {
long datanodeCapacities = 3 * OzoneConsts.TB;
blockContainerMap = new HashMap<>(); blockContainerMap = new HashMap<>();
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
@ -120,13 +115,12 @@ public void setup() throws Exception {
factor = HddsProtos.ReplicationFactor.ONE; factor = HddsProtos.ReplicationFactor.ONE;
type = HddsProtos.ReplicationType.STAND_ALONE; type = HddsProtos.ReplicationType.STAND_ALONE;
} }
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(2) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2).build();
.storageCapacities(new long[] {datanodeCapacities, datanodeCapacities}) cluster.waitForClusterToBeReady();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); datanodeIpAddress = cluster.getHddsDatanodes().get(0)
storageContainerLocationClient = .getDatanodeDetails().getIpAddress();
cluster.createStorageContainerLocationClient(); cluster.getKeySpaceManager().stop();
cluster.waitForHeartbeatProcessed(); cluster.getStorageContainerManager().stop();
cluster.shutdown();
nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
mapping = new ContainerMapping(conf, nodeManager, 128); mapping = new ContainerMapping(conf, nodeManager, 128);
@ -179,12 +173,15 @@ public void setup() throws Exception {
@After @After
public void shutdown() throws InterruptedException { public void shutdown() throws InterruptedException {
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster); if (cluster != null) {
cluster.shutdown();
}
} }
@Test @Test
public void testConvertBlockDB() throws Exception { public void testConvertBlockDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + BLOCK_DB; String dbPath = dbRootPath + "/" + BLOCK_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath}; String[] args = {"-p", dbPath, "-o", dbOutPath};
@ -206,7 +203,8 @@ public void testConvertBlockDB() throws Exception {
@Test @Test
public void testConvertNodepoolDB() throws Exception { public void testConvertNodepoolDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + NODEPOOL_DB; String dbPath = dbRootPath + "/" + NODEPOOL_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath}; String[] args = {"-p", dbPath, "-o", dbOutPath};
@ -233,7 +231,8 @@ public void testConvertNodepoolDB() throws Exception {
@Test @Test
public void testConvertContainerDB() throws Exception { public void testConvertContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
// TODO : the following will fail due to empty Datanode list, need to fix. // TODO : the following will fail due to empty Datanode list, need to fix.
//String dnUUID = cluster.getDataNodes().get(0).getUuid(); //String dnUUID = cluster.getDataNodes().get(0).getUuid();
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
@ -275,7 +274,7 @@ public void testConvertContainerDB() throws Exception {
rs = executeQuery(conn, sql); rs = executeQuery(conn, sql);
int count = 0; int count = 0;
while (rs.next()) { while (rs.next()) {
assertEquals("127.0.0.1", rs.getString("ipAddr")); assertEquals(datanodeIpAddress, rs.getString("ipAddress"));
//assertEquals(dnUUID, rs.getString("datanodeUUID")); //assertEquals(dnUUID, rs.getString("datanodeUUID"));
count += 1; count += 1;
} }

View File

@ -19,10 +19,8 @@
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
@ -59,17 +57,15 @@ public class TestContainerSmallFile {
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
long datanodeCapacities = 3 * OzoneConsts.TB;
ozoneConfig = new OzoneConfiguration(); ozoneConfig = new OzoneConfiguration();
ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
cluster = new MiniOzoneClassicCluster.Builder(ozoneConfig).numDataNodes(1) cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1)
.storageCapacities(new long[] {datanodeCapacities, datanodeCapacities}) .build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
storageContainerLocationClient = cluster storageContainerLocationClient = cluster
.createStorageContainerLocationClient(); .getStorageContainerLocationClient();
xceiverClientManager = new XceiverClientManager(ozoneConfig); xceiverClientManager = new XceiverClientManager(ozoneConfig);
cluster.waitForHeartbeatProcessed();
} }
@AfterClass @AfterClass
@ -77,7 +73,7 @@ public static void shutdown() throws InterruptedException {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster); IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
} }
@Test @Test

View File

@ -20,10 +20,8 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@ -61,7 +59,7 @@
public class TestSCMCli { public class TestSCMCli {
private static SCMCLI cli; private static SCMCLI cli;
private static MiniOzoneClassicCluster cluster; private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf; private static OzoneConfiguration conf;
private static StorageContainerLocationProtocolClientSideTranslatorPB private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient; storageContainerLocationClient;
@ -82,11 +80,11 @@ public class TestSCMCli {
@BeforeClass @BeforeClass
public static void setup() throws Exception { public static void setup() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(3) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
xceiverClientManager = new XceiverClientManager(conf); xceiverClientManager = new XceiverClientManager(conf);
storageContainerLocationClient = storageContainerLocationClient =
cluster.createStorageContainerLocationClient(); cluster.getStorageContainerLocationClient();
containerOperationClient = new ContainerOperationClient( containerOperationClient = new ContainerOperationClient(
storageContainerLocationClient, new XceiverClientManager(conf)); storageContainerLocationClient, new XceiverClientManager(conf));
outContent = new ByteArrayOutputStream(); outContent = new ByteArrayOutputStream();
@ -116,7 +114,10 @@ private int runCommandAndGetOutput(String[] cmd,
@AfterClass @AfterClass
public static void shutdown() throws InterruptedException { public static void shutdown() throws InterruptedException {
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster); if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
} }
@Test @Test
@ -235,8 +236,8 @@ public void testDeleteContainer() throws Exception {
@Test @Test
public void testInfoContainer() throws Exception { public void testInfoContainer() throws Exception {
// The cluster has one Datanode server. // The cluster has one Datanode server.
DatanodeDetails datanodeDetails = MiniOzoneTestHelper DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails(cluster.getDataNodes().get(0)); .getDatanodeDetails();
String formatStr = String formatStr =
"Container Name: %s\n" + "Container Name: %s\n" +
"Container State: %s\n" + "Container State: %s\n" +

View File

@ -21,11 +21,8 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -65,18 +62,19 @@ public class TestSCMMXBean {
public static void init() throws IOException, TimeoutException, public static void init() throws IOException, TimeoutException,
InterruptedException { InterruptedException {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(numOfDatanodes) .setNumDatanodes(numOfDatanodes)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.build(); .build();
cluster.waitOzoneReady(); cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager(); scm = cluster.getStorageContainerManager();
mbs = ManagementFactory.getPlatformMBeanServer(); mbs = ManagementFactory.getPlatformMBeanServer();
} }
@AfterClass @AfterClass
public static void shutdown() { public static void shutdown() {
IOUtils.cleanupWithLogger(null, cluster); if (cluster != null) {
cluster.shutdown();
}
} }
@Test @Test

View File

@ -28,11 +28,9 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerReport; import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@ -55,7 +53,7 @@ public class TestSCMMetrics {
@Rule @Rule
public Timeout testTimeout = new Timeout(90000); public Timeout testTimeout = new Timeout(90000);
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
@Test @Test
public void testContainerMetrics() throws Exception { public void testContainerMetrics() throws Exception {
@ -71,9 +69,9 @@ public void testContainerMetrics() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
try { try {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED) .setNumDatanodes(nodeCount).build();
.numDataNodes(nodeCount).build(); cluster.waitForClusterToBeReady();
ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
writeBytes, readCount, writeCount); writeBytes, readCount, writeCount);
@ -165,17 +163,16 @@ public void testStaleNodeContainerReport() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
try { try {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED) .setNumDatanodes(nodeCount).build();
.numDataNodes(nodeCount).build(); cluster.waitForClusterToBeReady();
ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
writeBytes, readCount, writeCount); writeBytes, readCount, writeCount);
StorageContainerManager scmManager = cluster.getStorageContainerManager(); StorageContainerManager scmManager = cluster.getStorageContainerManager();
DataNode dataNode = cluster.getDataNodes().get(0); String datanodeUuid = cluster.getHddsDatanodes().get(0)
String datanodeUuid = MiniOzoneTestHelper.getDatanodeDetails(dataNode) .getDatanodeDetails().getUuidString();
.getUuidString();
ContainerReportsRequestProto request = createContainerReport(numReport, ContainerReportsRequestProto request = createContainerReport(numReport,
stat, datanodeUuid); stat, datanodeUuid);
scmManager.sendContainerReport(request); scmManager.sendContainerReport(request);

View File

@ -20,10 +20,8 @@
import com.google.common.cache.Cache; import com.google.common.cache.Cache;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
@ -57,18 +55,22 @@ public class TestXceiverClientManager {
public ExpectedException exception = ExpectedException.none(); public ExpectedException exception = ExpectedException.none();
@BeforeClass @BeforeClass
public static void init() throws IOException { public static void init() throws Exception {
config = new OzoneConfiguration(); config = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(config) cluster = MiniOzoneCluster.newBuilder(config)
.numDataNodes(3) .setNumDatanodes(3)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .build();
cluster.waitForClusterToBeReady();
storageContainerLocationClient = cluster storageContainerLocationClient = cluster
.createStorageContainerLocationClient(); .getStorageContainerLocationClient();
} }
@AfterClass @AfterClass
public static void shutdown() { public static void shutdown() {
IOUtils.cleanupWithLogger(null, cluster, storageContainerLocationClient); if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
} }
@Test @Test

View File

@ -21,7 +21,6 @@
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
@ -32,10 +31,8 @@
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
@ -63,13 +60,12 @@ public class TestXceiverClientMetrics {
private static String containerOwner = "OZONE"; private static String containerOwner = "OZONE";
@BeforeClass @BeforeClass
public static void init() throws IOException { public static void init() throws Exception {
config = new OzoneConfiguration(); config = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(config) cluster = MiniOzoneCluster.newBuilder(config).build();
.numDataNodes(1) cluster.waitForClusterToBeReady();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
storageContainerLocationClient = cluster storageContainerLocationClient = cluster
.createStorageContainerLocationClient(); .getStorageContainerLocationClient();
} }
@AfterClass @AfterClass

View File

@ -16,10 +16,9 @@
*/ */
package org.apache.hadoop.hdds.scm.node; package org.apache.hadoop.hdds.scm.node;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -34,7 +33,6 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.INVALID;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -52,7 +50,7 @@
*/ */
public class TestQueryNode { public class TestQueryNode {
private static int numOfDatanodes = 5; private static int numOfDatanodes = 5;
private MiniOzoneClassicCluster cluster; private MiniOzoneCluster cluster;
private ContainerOperationClient scmClient; private ContainerOperationClient scmClient;
@ -67,13 +65,12 @@ public void setUp() throws Exception {
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(numOfDatanodes) .setNumDatanodes(numOfDatanodes)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.build(); .build();
cluster.waitOzoneReady(); cluster.waitForClusterToBeReady();
scmClient = new ContainerOperationClient(cluster scmClient = new ContainerOperationClient(cluster
.createStorageContainerLocationClient(), .getStorageContainerLocationClient(),
new XceiverClientManager(conf)); new XceiverClientManager(conf));
} }
@ -95,8 +92,8 @@ public void testHealthyNodesCount() throws Exception {
@Test(timeout = 10 * 1000L) @Test(timeout = 10 * 1000L)
public void testStaleNodesCount() throws Exception { public void testStaleNodesCount() throws Exception {
cluster.shutdownDataNode(0); cluster.shutdownHddsDatanode(0);
cluster.shutdownDataNode(1); cluster.shutdownHddsDatanode(1);
GenericTestUtils.waitFor(() -> GenericTestUtils.waitFor(() ->
cluster.getStorageContainerManager().getNodeCount(STALE) == 2, cluster.getStorageContainerManager().getNodeCount(STALE) == 2,

View File

@ -17,9 +17,7 @@
*/ */
package org.apache.hadoop.ozone.web; package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -49,7 +47,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static int port = 0; private static int port = 0;
/** /**
@ -66,10 +64,10 @@ public static void init() throws Exception {
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); port = cluster.getHddsDatanodes().get(0)
port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
} }
/** /**

View File

@ -17,18 +17,16 @@
*/ */
package org.apache.hadoop.ozone.web; package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestOzoneHelper; import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.Timeout; import org.junit.rules.Timeout;
@ -45,7 +43,7 @@ public class TestLocalOzoneVolumes extends TestOzoneHelper {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static int port = 0; private static int port = 0;
/** /**
@ -69,10 +67,10 @@ public static void init() throws Exception {
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); port = cluster.getHddsDatanodes().get(0)
port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
} }
/** /**
@ -175,7 +173,7 @@ public void testGetVolumesOfAnotherUser() throws IOException {
* *
* @throws IOException * @throws IOException
*/ */
@Test @Test @Ignore
public void testGetVolumesOfAnotherUserShouldFail() throws IOException { public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
super.testGetVolumesOfAnotherUserShouldFail(port); super.testGetVolumesOfAnotherUserShouldFail(port);
} }

View File

@ -24,7 +24,6 @@
import static org.junit.Assert.*; import static org.junit.Assert.*;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.client.OzoneRestClient; import org.apache.hadoop.ozone.web.client.OzoneRestClient;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -61,15 +60,21 @@ public class TestOzoneRestWithMiniCluster {
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(1) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
cluster.waitOzoneReady(); int port = cluster.getHddsDatanodes().get(0)
ozoneClient = cluster.createOzoneRestClient(); .getDatanodeDetails().getOzoneRestPort();
ozoneClient = new OzoneRestClient(
String.format("http://localhost:%d", port));
ozoneClient.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
} }
@AfterClass @AfterClass
public static void shutdown() throws InterruptedException { public static void shutdown() throws InterruptedException {
IOUtils.cleanupWithLogger(null, ozoneClient, cluster); if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, ozoneClient);
} }
@Test @Test

View File

@ -18,9 +18,7 @@
package org.apache.hadoop.ozone.web; package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -57,7 +55,7 @@ public class TestOzoneWebAccess {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneClassicCluster cluster; private static MiniOzoneCluster cluster;
private static int port; private static int port;
/** /**
@ -77,10 +75,10 @@ public static void init() throws Exception {
.getTempPath(TestOzoneWebAccess.class.getSimpleName()); .getTempPath(TestOzoneWebAccess.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); port = cluster.getHddsDatanodes().get(0)
port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
} }
/** /**

View File

@ -18,12 +18,9 @@
package org.apache.hadoop.ozone.web.client; package org.apache.hadoop.ozone.web.client;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@ -55,7 +52,7 @@ public class TestBuckets {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneRestClient ozoneRestClient = null; private static OzoneRestClient ozoneRestClient = null;
/** /**
@ -78,10 +75,9 @@ public static void init() throws IOException,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); final int port = cluster.getHddsDatanodes().get(0).getDatanodeDetails()
DataNode dataNode = cluster.getDataNodes().get(0); .getOzoneRestPort();
final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));
} }

View File

@ -26,13 +26,10 @@
import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyData;
@ -87,7 +84,7 @@ public class TestKeys {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneClassicCluster ozoneCluster = null; private static MiniOzoneCluster ozoneCluster = null;
private static String path; private static String path;
private static OzoneRestClient ozoneRestClient = null; private static OzoneRestClient ozoneRestClient = null;
private static long currentTime; private static long currentTime;
@ -108,10 +105,10 @@ public static void init() throws Exception {
path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName()); path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
ozoneCluster = new MiniOzoneClassicCluster.Builder(conf) ozoneCluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); ozoneCluster.waitForClusterToBeReady();
DataNode dataNode = ozoneCluster.getDataNodes().get(0); final int port = ozoneCluster.getHddsDatanodes().get(0)
final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));
currentTime = Time.now(); currentTime = Time.now();
@ -277,12 +274,12 @@ static void runTestPutKey(PutHelper helper) throws Exception {
} }
private static void restartDatanode( private static void restartDatanode(
MiniOzoneClassicCluster cluster, int datanodeIdx, OzoneRestClient client) MiniOzoneCluster cluster, int datanodeIdx, OzoneRestClient client)
throws IOException, OzoneException, URISyntaxException { throws OzoneException, URISyntaxException {
cluster.restartDataNode(datanodeIdx); cluster.restartHddsDatanode(datanodeIdx);
// refresh the datanode endpoint uri after datanode restart // refresh the datanode endpoint uri after datanode restart
DataNode dataNode = cluster.getDataNodes().get(datanodeIdx); final int port = ozoneCluster.getHddsDatanodes().get(0)
final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
client.setEndPoint(String.format("http://localhost:%d", port)); client.setEndPoint(String.format("http://localhost:%d", port));
} }
@ -297,14 +294,13 @@ public void testPutAndGetKeyWithDnRestart() throws Exception {
} }
static void runTestPutAndGetKeyWithDnRestart( static void runTestPutAndGetKeyWithDnRestart(
PutHelper helper, MiniOzoneClassicCluster cluster) throws Exception { PutHelper helper, MiniOzoneCluster cluster) throws Exception {
String keyName = helper.putKey().getKeyName(); String keyName = helper.putKey().getKeyName();
assertNotNull(helper.getBucket()); assertNotNull(helper.getBucket());
assertNotNull(helper.getFile()); assertNotNull(helper.getFile());
// restart the datanode // restart the datanode
restartDatanode(cluster, 0, helper.client); restartDatanode(cluster, 0, helper.client);
// verify getKey after the datanode restart // verify getKey after the datanode restart
String newFileName = helper.dir + "/" String newFileName = helper.dir + "/"
+ OzoneUtils.getRequestID().toLowerCase(); + OzoneUtils.getRequestID().toLowerCase();
@ -609,8 +605,8 @@ public void testDeleteKey() throws Exception {
Assert.assertEquals(20, bucketKeys.totalNumOfKeys()); Assert.assertEquals(20, bucketKeys.totalNumOfKeys());
int numOfCreatedKeys = 0; int numOfCreatedKeys = 0;
OzoneContainer cm = MiniOzoneTestHelper OzoneContainer cm = ozoneCluster.getHddsDatanodes().get(0)
.getOzoneContainer(ozoneCluster.getDataNodes().get(0)); .getDatanodeStateMachine().getContainer();
// Expected to delete chunk file list. // Expected to delete chunk file list.
List<File> expectedChunkFiles = Lists.newArrayList(); List<File> expectedChunkFiles = Lists.newArrayList();

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.web.client; package org.apache.hadoop.ozone.web.client;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.RatisTestHelper;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -50,7 +50,7 @@ public class TestKeysRatis {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static RatisTestHelper.RatisTestSuite suite; private static RatisTestHelper.RatisTestSuite suite;
private static MiniOzoneClassicCluster ozoneCluster = null; private static MiniOzoneCluster ozoneCluster = null;
static private String path; static private String path;
private static OzoneRestClient ozoneRestClient = null; private static OzoneRestClient ozoneRestClient = null;
@ -59,6 +59,7 @@ public static void init() throws Exception {
suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class); suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class);
path = suite.getConf().get(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT); path = suite.getConf().get(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT);
ozoneCluster = suite.getCluster(); ozoneCluster = suite.getCluster();
ozoneCluster.waitForClusterToBeReady();
ozoneRestClient = suite.newOzoneRestClient(); ozoneRestClient = suite.newOzoneRestClient();
} }

View File

@ -43,9 +43,7 @@
import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler; import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -86,7 +84,7 @@
public class TestOzoneClient { public class TestOzoneClient {
private static Logger log = Logger.getLogger(TestOzoneClient.class); private static Logger log = Logger.getLogger(TestOzoneClient.class);
private static int testVolumeCount = 5; private static int testVolumeCount = 5;
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static String endpoint = null; private static String endpoint = null;
@BeforeClass @BeforeClass
@ -95,11 +93,11 @@ public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED); OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); int port = cluster.getHddsDatanodes().get(0)
endpoint = String.format("http://localhost:%d", .getDatanodeDetails().getOzoneRestPort();
MiniOzoneTestHelper.getOzoneRestPort(dataNode)); endpoint = String.format("http://localhost:%d", port);
} }
@AfterClass @AfterClass

View File

@ -21,10 +21,8 @@
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
@ -63,7 +61,7 @@
* Test Ozone Volumes Lifecycle. * Test Ozone Volumes Lifecycle.
*/ */
public class TestVolume { public class TestVolume {
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static OzoneRestClient ozoneRestClient = null; private static OzoneRestClient ozoneRestClient = null;
/** /**
@ -88,10 +86,10 @@ public static void init() throws Exception {
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); final int port = cluster.getHddsDatanodes().get(0)
final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode); .getDatanodeDetails().getOzoneRestPort();
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));

View File

@ -19,11 +19,9 @@
package org.apache.hadoop.ozone.web.client; package org.apache.hadoop.ozone.web.client;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -41,7 +39,7 @@ public class TestVolumeRatis {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
private static OzoneRestClient ozoneClient; private static OzoneRestClient ozoneClient;
private static MiniOzoneClassicCluster cluster; private static MiniOzoneCluster cluster;
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
@ -60,10 +58,10 @@ public static void init() throws Exception {
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(3) cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
DataNode dataNode = cluster.getDataNodes().get(0); final int port = cluster.getHddsDatanodes().get(0)
final int port = dataNode.getInfoPort(); .getDatanodeDetails().getOzoneRestPort();
ozoneClient = new OzoneRestClient( ozoneClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));

View File

@ -0,0 +1,18 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# log4j configuration used during build and unit tests
log4j.rootLogger=info,stdout
log4j.threshold=ALL
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

View File

@ -104,8 +104,8 @@ public class SQLCLI extends Configured implements Tool {
"VALUES (\"%s\", \"%s\")"; "VALUES (\"%s\", \"%s\")";
private static final String INSERT_DATANODE_INFO = private static final String INSERT_DATANODE_INFO =
"INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " + "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
"containerPort,) " + "containerPort) " +
"VALUES (\"%s\", \"%s\", \"%s\", %d"; "VALUES (\"%s\", \"%s\", \"%s\", \"%d\")";
private static final String INSERT_CONTAINER_MEMBERS = private static final String INSERT_CONTAINER_MEMBERS =
"INSERT INTO containerMembers (containerName, datanodeUUID) " + "INSERT INTO containerMembers (containerName, datanodeUUID) " +
"VALUES (\"%s\", \"%s\")"; "VALUES (\"%s\", \"%s\")";
@ -644,7 +644,7 @@ private void insertNodePoolDB(Connection conn, String blockPool,
String insertDatanodeDetails = String String insertDatanodeDetails = String
.format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(), .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(),
datanodeDetails.getUuid(), datanodeDetails.getIpAddress(), datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(),
datanodeDetails.getContainerPort()); datanodeDetails.getContainerPort());
executeSQL(conn, insertDatanodeDetails); executeSQL(conn, insertDatanodeDetails);
} }

View File

@ -170,5 +170,30 @@
<artifactId>hadoop-mapreduce-client-jobclient</artifactId> <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-server-framework</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-server-scm</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-client</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-container-service</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-ozone-manager</artifactId>
<scope>test</scope>
</dependency>
</dependencies> </dependencies>
</project> </project>

View File

@ -24,13 +24,12 @@
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
@ -48,7 +47,7 @@
* Test OzoneFSInputStream by reading through multiple interfaces. * Test OzoneFSInputStream by reading through multiple interfaces.
*/ */
public class TestOzoneFSInputStream { public class TestOzoneFSInputStream {
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static FileSystem fs; private static FileSystem fs;
private static StorageHandler storageHandler; private static StorageHandler storageHandler;
private static Path filePath = null; private static Path filePath = null;
@ -66,10 +65,10 @@ public class TestOzoneFSInputStream {
public static void init() throws Exception { public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 10); conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 10);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(10) .setNumDatanodes(10)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.build(); .build();
cluster.waitForClusterToBeReady();
storageHandler = storageHandler =
new ObjectStoreHandler(conf).getStorageHandler(); new ObjectStoreHandler(conf).getStorageHandler();
@ -88,9 +87,10 @@ public static void init() throws Exception {
storageHandler.createBucket(bucketArgs); storageHandler.createBucket(bucketArgs);
// Fetch the host and port for File System init // Fetch the host and port for File System init
DataNode dataNode = cluster.getDataNodes().get(0); DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
int port = dataNode.getInfoPort(); .getDatanodeDetails();
String host = dataNode.getDatanodeHostname(); int port = datanodeDetails.getOzoneRestPort();
String host = datanodeDetails.getHostName();
// Set the fs.defaultFS and start the filesystem // Set the fs.defaultFS and start the filesystem
String uri = String.format("%s://%s.%s/", String uri = String.format("%s://%s.%s/",

View File

@ -23,6 +23,7 @@
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
@ -40,8 +41,6 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
@ -82,7 +81,7 @@ public static Collection<Object[]> data() {
private boolean useAbsolutePath; private boolean useAbsolutePath;
private static MiniOzoneClassicCluster cluster = null; private static MiniOzoneCluster cluster = null;
private static FileSystem fs; private static FileSystem fs;
@ -97,10 +96,10 @@ public TestOzoneFileInterfaces(boolean setDefaultFs,
@Before @Before
public void init() throws Exception { public void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = MiniOzoneCluster.newBuilder(conf)
.numDataNodes(3) .setNumDatanodes(3)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
.build(); .build();
cluster.waitForClusterToBeReady();
storageHandler = storageHandler =
new ObjectStoreHandler(conf).getStorageHandler(); new ObjectStoreHandler(conf).getStorageHandler();
@ -132,9 +131,11 @@ public void init() throws Exception {
@After @After
public void teardown() throws IOException { public void teardown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
IOUtils.closeQuietly(fs); IOUtils.closeQuietly(fs);
IOUtils.closeQuietly(storageHandler); IOUtils.closeQuietly(storageHandler);
IOUtils.closeQuietly(cluster);
} }
@Test @Test

View File

@ -26,8 +26,7 @@
import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.ozone.Constants; import org.apache.hadoop.fs.ozone.Constants;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs;
@ -45,7 +44,7 @@
*/ */
class OzoneContract extends AbstractFSContract { class OzoneContract extends AbstractFSContract {
private static MiniOzoneClassicCluster cluster; private static MiniOzoneCluster cluster;
private static StorageHandler storageHandler; private static StorageHandler storageHandler;
private static final String CONTRACT_XML = "contract/ozone.xml"; private static final String CONTRACT_XML = "contract/ozone.xml";
@ -70,10 +69,12 @@ public static void createCluster() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.addResource(CONTRACT_XML); conf.addResource(CONTRACT_XML);
cluster = cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
new MiniOzoneClassicCluster.Builder(conf).numDataNodes(5) try {
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); cluster.waitForClusterToBeReady();
cluster.waitClusterUp(); } catch (Exception e) {
throw new IOException(e);
}
storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
} }