HDDS-567. Rename Mapping to ContainerManager in SCM. Contributed by Nanda kumar.

This commit is contained in:
Nanda kumar 2018-10-03 16:00:39 +05:30
parent 7b374482d2
commit 095c269620
36 changed files with 260 additions and 254 deletions

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmUtils;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@ -70,7 +70,7 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
// by itself and does not rely on the Block service offered by SCM.
private final NodeManager nodeManager;
private final Mapping containerManager;
private final ContainerManager containerManager;
private final long containerSize;
@ -92,7 +92,7 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
* @throws IOException
*/
public BlockManagerImpl(final Configuration conf,
final NodeManager nodeManager, final Mapping containerManager,
final NodeManager nodeManager, final ContainerManager containerManager,
EventPublisher eventPublisher)
throws IOException {
this.nodeManager = nodeManager;

View File

@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.scm.block;
import com.google.common.collect.ArrayListMultimap;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
@ -42,15 +42,15 @@ public class DatanodeDeletedBlockTransactions {
private int maximumAllowedTXNum;
// Current counter of inserted TX.
private int currentTXNum;
private Mapping mappingService;
private ContainerManager containerManager;
// A list of TXs mapped to a certain datanode ID.
private final ArrayListMultimap<UUID, DeletedBlocksTransaction>
transactions;
DatanodeDeletedBlockTransactions(Mapping mappingService,
DatanodeDeletedBlockTransactions(ContainerManager containerManager,
int maximumAllowedTXNum, int nodeNum) {
this.transactions = ArrayListMultimap.create();
this.mappingService = mappingService;
this.containerManager = containerManager;
this.maximumAllowedTXNum = maximumAllowedTXNum;
this.nodeNum = nodeNum;
}
@ -60,7 +60,7 @@ public class DatanodeDeletedBlockTransactions {
Pipeline pipeline = null;
try {
ContainerWithPipeline containerWithPipeline =
mappingService.getContainerWithPipeline(tx.getContainerID());
containerManager.getContainerWithPipeline(tx.getContainerID());
if (containerWithPipeline.getContainerInfo().isContainerOpen()
|| containerWithPipeline.getPipeline().isEmpty()) {
return false;

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto
.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.scm.command
.CommandStatusReportHandler.DeleteBlockStatus;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
@ -92,15 +92,15 @@ public class DeletedBlockLogImpl
private final int maxRetry;
private final MetadataStore deletedStore;
private final Mapping containerManager;
private final ContainerManager containerManager;
private final Lock lock;
// The latest id of deleted blocks in the db.
private long lastTxID;
// Maps txId to set of DNs which are successful in committing the transaction
private Map<Long, Set<UUID>> transactionToDNsCommitMap;
public DeletedBlockLogImpl(Configuration conf, Mapping containerManager)
throws IOException {
public DeletedBlockLogImpl(Configuration conf,
ContainerManager containerManager) throws IOException {
maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.block;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@ -63,7 +63,7 @@ public class SCMBlockDeletingService extends BackgroundService {
// ThreadPoolSize=2, 1 for scheduler and the other for the scanner.
private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 2;
private final DeletedBlockLog deletedBlockLog;
private final Mapping mappingService;
private final ContainerManager containerManager;
private final NodeManager nodeManager;
private final EventPublisher eventPublisher;
@ -81,12 +81,13 @@ public class SCMBlockDeletingService extends BackgroundService {
private int blockDeleteLimitSize;
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
Mapping mapper, NodeManager nodeManager, EventPublisher eventPublisher,
long interval, long serviceTimeout, Configuration conf) {
ContainerManager containerManager, NodeManager nodeManager,
EventPublisher eventPublisher, long interval, long serviceTimeout,
Configuration conf) {
super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog;
this.mappingService = mapper;
this.containerManager = containerManager;
this.nodeManager = nodeManager;
this.eventPublisher = eventPublisher;
@ -139,7 +140,7 @@ public class SCMBlockDeletingService extends BackgroundService {
List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
Map<Long, Long> transactionMap = null;
if (datanodes != null) {
transactions = new DatanodeDeletedBlockTransactions(mappingService,
transactions = new DatanodeDeletedBlockTransactions(containerManager,
blockDeleteLimitSize, datanodes.size());
try {
transactionMap = deletedBlockLog.getTransactions(transactions);
@ -174,7 +175,7 @@ public class SCMBlockDeletingService extends BackgroundService {
transactions.getTransactionIDList(dnId)));
}
}
mappingService.updateDeleteTransactionId(transactionMap);
containerManager.updateDeleteTransactionId(transactionMap);
}
if (dnTxCount > 0) {

View File

@ -46,9 +46,9 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
LoggerFactory.getLogger(CloseContainerEventHandler.class);
private final Mapping containerManager;
private final ContainerManager containerManager;
public CloseContainerEventHandler(Mapping containerManager) {
public CloseContainerEventHandler(ContainerManager containerManager) {
this.containerManager = containerManager;
}

View File

@ -44,11 +44,11 @@ public class CloseContainerWatcher extends
public static final Logger LOG =
LoggerFactory.getLogger(CloseContainerWatcher.class);
private final Mapping containerManager;
private final ContainerManager containerManager;
public CloseContainerWatcher(Event<CloseContainerRetryableReq> startEvent,
Event<CloseContainerStatus> completionEvent,
LeaseManager<Long> leaseManager, Mapping containerManager) {
LeaseManager<Long> leaseManager, ContainerManager containerManager) {
super(startEvent, completionEvent, leaseManager);
this.containerManager = containerManager;
}

View File

@ -33,10 +33,11 @@ import java.util.List;
import java.util.Map;
/**
* Mapping class contains the mapping from a name to a pipeline mapping. This is
* used by SCM when allocating new locations and when looking up a key.
* ContainerManager class contains the mapping from a name to a pipeline
* mapping. This is used by SCM when allocating new locations and when
* looking up a key.
*/
public interface Mapping extends Closeable {
public interface ContainerManager extends Closeable {
/**
* Returns the ContainerInfo from the container ID.
*

View File

@ -50,21 +50,21 @@ public class ContainerReportHandler implements
private final NodeManager nodeManager;
private final Mapping containerMapping;
private final ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private ReplicationActivityStatus replicationStatus;
public ContainerReportHandler(Mapping containerMapping,
public ContainerReportHandler(ContainerManager containerManager,
NodeManager nodeManager,
ReplicationActivityStatus replicationActivityStatus) {
Preconditions.checkNotNull(containerMapping);
Preconditions.checkNotNull(containerManager);
Preconditions.checkNotNull(nodeManager);
Preconditions.checkNotNull(replicationActivityStatus);
this.containerStateManager = containerMapping.getStateManager();
this.containerStateManager = containerManager.getStateManager();
this.nodeManager = nodeManager;
this.containerMapping = containerMapping;
this.containerManager = containerManager;
this.replicationStatus = replicationActivityStatus;
}
@ -80,7 +80,7 @@ public class ContainerReportHandler implements
try {
//update state in container db and trigger close container events
containerMapping
containerManager
.processContainerReports(datanodeOrigin, containerReport, false);
Set<ContainerID> containerIds = containerReport.getReportsList().stream()

View File

@ -136,7 +136,7 @@ public class ContainerStateManager implements Closeable {
*/
@SuppressWarnings("unchecked")
public ContainerStateManager(Configuration configuration,
Mapping containerMapping, PipelineSelector pipelineSelector) {
ContainerManager containerManager, PipelineSelector pipelineSelector) {
// Initialize the container state machine.
Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
@ -158,15 +158,15 @@ public class ContainerStateManager implements Closeable {
lastUsedMap = new ConcurrentHashMap<>();
containerCount = new AtomicLong(0);
containers = new ContainerStateMap();
loadExistingContainers(containerMapping, pipelineSelector);
loadExistingContainers(containerManager, pipelineSelector);
}
private void loadExistingContainers(Mapping containerMapping,
private void loadExistingContainers(ContainerManager containerManager,
PipelineSelector pipelineSelector) {
List<ContainerInfo> containerList;
try {
containerList = containerMapping.listContainer(0, Integer.MAX_VALUE);
containerList = containerManager.listContainer(0, Integer.MAX_VALUE);
// if there are no container to load, let us return.
if (containerList == null || containerList.size() == 0) {

View File

@ -74,12 +74,12 @@ import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
/**
* Mapping class contains the mapping from a name to a pipeline mapping. This
* is used by SCM when
* allocating new locations and when looking up a key.
* ContainerManager class contains the mapping from a name to a pipeline
* mapping. This is used by SCM when allocating new locations and when
* looking up a key.
*/
public class ContainerMapping implements Mapping {
private static final Logger LOG = LoggerFactory.getLogger(ContainerMapping
public class SCMContainerManager implements ContainerManager {
private static final Logger LOG = LoggerFactory.getLogger(SCMContainerManager
.class);
private final NodeManager nodeManager;
@ -108,7 +108,7 @@ public class ContainerMapping implements Mapping {
* @throws IOException on Failure.
*/
@SuppressWarnings("unchecked")
public ContainerMapping(
public SCMContainerManager(
final Configuration conf, final NodeManager nodeManager, final int
cacheSizeMB, EventPublisher eventPublisher) throws IOException {
this.nodeManager = nodeManager;
@ -653,7 +653,7 @@ public class ContainerMapping implements Mapping {
/**
* Since allocatedBytes of a container is only in memory, stored in
* containerStateManager, when closing ContainerMapping, we need to update
* containerStateManager, when closing SCMContainerManager, we need to update
* this in the container store.
*
* @throws IOException on failure.

View File

@ -160,7 +160,7 @@ public class SCMClientProtocolServer implements
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
return scm.getScmContainerManager()
return scm.getContainerManager()
.allocateContainer(replicationType, factor, owner);
}
@ -168,7 +168,7 @@ public class SCMClientProtocolServer implements
public ContainerInfo getContainer(long containerID) throws IOException {
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
return scm.getScmContainerManager()
return scm.getContainerManager()
.getContainer(containerID);
}
@ -176,7 +176,7 @@ public class SCMClientProtocolServer implements
public ContainerWithPipeline getContainerWithPipeline(long containerID)
throws IOException {
if (chillModePrecheck.isInChillMode()) {
ContainerInfo contInfo = scm.getScmContainerManager()
ContainerInfo contInfo = scm.getContainerManager()
.getContainer(containerID);
if (contInfo.isContainerOpen()) {
if (!hasRequiredReplicas(contInfo)) {
@ -188,7 +188,7 @@ public class SCMClientProtocolServer implements
}
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
return scm.getScmContainerManager()
return scm.getContainerManager()
.getContainerWithPipeline(containerID);
}
@ -198,7 +198,7 @@ public class SCMClientProtocolServer implements
*/
private boolean hasRequiredReplicas(ContainerInfo contInfo) {
try{
return getScm().getScmContainerManager().getStateManager()
return getScm().getContainerManager().getStateManager()
.getContainerReplicas(contInfo.containerID())
.size() >= contInfo.getReplicationFactor().getNumber();
} catch (SCMException ex) {
@ -211,7 +211,7 @@ public class SCMClientProtocolServer implements
@Override
public List<ContainerInfo> listContainer(long startContainerID,
int count) throws IOException {
return scm.getScmContainerManager().
return scm.getContainerManager().
listContainer(startContainerID, count);
}
@ -219,7 +219,7 @@ public class SCMClientProtocolServer implements
public void deleteContainer(long containerID) throws IOException {
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
scm.getScmContainerManager().deleteContainer(containerID);
scm.getContainerManager().deleteContainer(containerID);
}
@ -257,10 +257,10 @@ public class SCMClientProtocolServer implements
.ObjectStageChangeRequestProto.Op.create) {
if (stage == StorageContainerLocationProtocolProtos
.ObjectStageChangeRequestProto.Stage.begin) {
scm.getScmContainerManager().updateContainerState(id, HddsProtos
scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CREATE);
} else {
scm.getScmContainerManager().updateContainerState(id, HddsProtos
scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CREATED);
}
} else {
@ -268,10 +268,10 @@ public class SCMClientProtocolServer implements
.ObjectStageChangeRequestProto.Op.close) {
if (stage == StorageContainerLocationProtocolProtos
.ObjectStageChangeRequestProto.Stage.begin) {
scm.getScmContainerManager().updateContainerState(id, HddsProtos
scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.FINALIZE);
} else {
scm.getScmContainerManager().updateContainerState(id, HddsProtos
scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CLOSE);
}
}

View File

@ -196,7 +196,7 @@ public class SCMDatanodeProtocolServer implements
.register(datanodeDetails, nodeReport, pipelineReportsProto);
if (registeredCommand.getError()
== SCMRegisteredResponseProto.ErrorCode.success) {
scm.getScmContainerManager().processContainerReports(datanodeDetails,
scm.getContainerManager().processContainerReports(datanodeDetails,
containerReportsProto, true);
eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
new NodeRegistrationContainerReport(datanodeDetails,

View File

@ -40,9 +40,9 @@ import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.replication
.ReplicationActivityStatus;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
@ -151,7 +151,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* State Managers of SCM.
*/
private final NodeManager scmNodeManager;
private final Mapping scmContainerManager;
private final ContainerManager containerManager;
private final BlockManager scmBlockManager;
private final SCMStorage scmStorage;
@ -206,43 +206,43 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmNodeManager = new SCMNodeManager(
conf, scmStorage.getClusterID(), this, eventQueue);
scmContainerManager = new ContainerMapping(
containerManager = new SCMContainerManager(
conf, getScmNodeManager(), cacheSize, eventQueue);
scmBlockManager = new BlockManagerImpl(
conf, getScmNodeManager(), scmContainerManager, eventQueue);
conf, getScmNodeManager(), containerManager, eventQueue);
replicationStatus = new ReplicationActivityStatus();
CloseContainerEventHandler closeContainerHandler =
new CloseContainerEventHandler(scmContainerManager);
new CloseContainerEventHandler(containerManager);
NodeReportHandler nodeReportHandler =
new NodeReportHandler(scmNodeManager);
PipelineReportHandler pipelineReportHandler =
new PipelineReportHandler(
scmContainerManager.getPipelineSelector());
containerManager.getPipelineSelector());
CommandStatusReportHandler cmdStatusReportHandler =
new CommandStatusReportHandler();
NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
StaleNodeHandler staleNodeHandler =
new StaleNodeHandler(scmContainerManager.getPipelineSelector());
new StaleNodeHandler(containerManager.getPipelineSelector());
DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
getScmContainerManager().getStateManager());
getContainerManager().getStateManager());
ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
PendingDeleteHandler pendingDeleteHandler =
new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
ContainerReportHandler containerReportHandler =
new ContainerReportHandler(scmContainerManager, scmNodeManager,
new ContainerReportHandler(containerManager, scmNodeManager,
replicationStatus);
scmChillModeManager = new SCMChillModeManager(conf,
getScmContainerManager().getStateManager().getAllContainers(),
getContainerManager().getStateManager().getAllContainers(),
eventQueue);
PipelineActionEventHandler pipelineActionEventHandler =
new PipelineActionEventHandler();
PipelineCloseHandler pipelineCloseHandler =
new PipelineCloseHandler(scmContainerManager.getPipelineSelector());
new PipelineCloseHandler(containerManager.getPipelineSelector());
long watcherTimeout =
conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
@ -263,14 +263,14 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
new SCMContainerPlacementCapacity(scmNodeManager, conf);
replicationManager = new ReplicationManager(containerPlacementPolicy,
scmContainerManager.getStateManager(), eventQueue,
containerManager.getStateManager(), eventQueue,
commandWatcherLeaseManager);
// setup CloseContainer watcher
CloseContainerWatcher closeContainerWatcher =
new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager,
scmContainerManager);
containerManager);
closeContainerWatcher.start(eventQueue);
scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
@ -632,7 +632,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
@VisibleForTesting
public ContainerInfo getContainerInfo(long containerID) throws
IOException {
return scmContainerManager.getContainer(containerID);
return containerManager.getContainer(containerID);
}
/**
@ -774,7 +774,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
} catch (Exception ex) {
LOG.error("SCM Event Queue stop failed", ex);
}
IOUtils.cleanupWithLogger(LOG, scmContainerManager);
IOUtils.cleanupWithLogger(LOG, containerManager);
}
/**
@ -805,8 +805,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* Returns SCM container manager.
*/
@VisibleForTesting
public Mapping getScmContainerManager() {
return scmContainerManager;
public ContainerManager getContainerManager() {
return containerManager;
}
/**

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@ -56,7 +56,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.MB;
* Tests for SCM Block Manager.
*/
public class TestBlockManager implements EventHandler<Boolean> {
private static ContainerMapping mapping;
private static SCMContainerManager mapping;
private static MockNodeManager nodeManager;
private static BlockManagerImpl blockManager;
private static File testDir;
@ -83,7 +83,7 @@ public class TestBlockManager implements EventHandler<Boolean> {
throw new IOException("Unable to create test directory path");
}
nodeManager = new MockNodeManager(true, 10);
mapping = new ContainerMapping(conf, nodeManager, 128, eventQueue);
mapping = new SCMContainerManager(conf, nodeManager, 128, eventQueue);
blockManager = new BlockManagerImpl(conf,
nodeManager, mapping, eventQueue);
eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.hdds.scm.block;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@ -72,7 +72,7 @@ public class TestDeletedBlockLog {
private static DeletedBlockLogImpl deletedBlockLog;
private OzoneConfiguration conf;
private File testDir;
private Mapping containerManager;
private ContainerManager containerManager;
private List<DatanodeDetails> dnList;
@Before
@ -82,7 +82,7 @@ public class TestDeletedBlockLog {
conf = new OzoneConfiguration();
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
containerManager = Mockito.mock(ContainerMapping.class);
containerManager = Mockito.mock(SCMContainerManager.class);
deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
dnList = new ArrayList<>(3);
setupContainerManager();

View File

@ -50,7 +50,7 @@ public class TestCloseContainerEventHandler {
private static Configuration configuration;
private static MockNodeManager nodeManager;
private static ContainerMapping mapping;
private static SCMContainerManager mapping;
private static long size;
private static File testDir;
private static EventQueue eventQueue;
@ -65,7 +65,7 @@ public class TestCloseContainerEventHandler {
configuration
.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
nodeManager = new MockNodeManager(true, 10);
mapping = new ContainerMapping(configuration, nodeManager, 128,
mapping = new SCMContainerManager(configuration, nodeManager, 128,
new EventQueue());
eventQueue = new EventQueue();
eventQueue.addHandler(CLOSE_CONTAINER,

View File

@ -74,10 +74,10 @@ public class TestContainerReportHandler implements EventPublisher {
public void test() throws IOException {
//GIVEN
OzoneConfiguration conf = new OzoneConfiguration();
Mapping mapping = Mockito.mock(Mapping.class);
ContainerManager containerManager = Mockito.mock(ContainerManager.class);
PipelineSelector selector = Mockito.mock(PipelineSelector.class);
when(mapping.getContainer(anyLong()))
when(containerManager.getContainer(anyLong()))
.thenAnswer(
(Answer<ContainerInfo>) invocation ->
new Builder()
@ -88,15 +88,15 @@ public class TestContainerReportHandler implements EventPublisher {
);
ContainerStateManager containerStateManager =
new ContainerStateManager(conf, mapping, selector);
new ContainerStateManager(conf, containerManager, selector);
when(mapping.getStateManager()).thenReturn(containerStateManager);
when(containerManager.getStateManager()).thenReturn(containerStateManager);
ReplicationActivityStatus replicationActivityStatus =
new ReplicationActivityStatus();
ContainerReportHandler reportHandler =
new ContainerReportHandler(mapping, nodeManager,
new ContainerReportHandler(containerManager, nodeManager,
replicationActivityStatus);
DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();

View File

@ -41,7 +41,7 @@ public class TestContainerStateManager {
@Before
public void init() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
Mapping mapping = Mockito.mock(Mapping.class);
ContainerManager mapping = Mockito.mock(ContainerManager.class);
PipelineSelector selector = Mockito.mock(PipelineSelector.class);
containerStateManager = new ContainerStateManager(conf, mapping, selector);

View File

@ -58,10 +58,10 @@ import java.util.UUID;
import java.util.concurrent.TimeUnit;
/**
* Tests for Container Mapping.
* Tests for Container ContainerManager.
*/
public class TestContainerMapping {
private static ContainerMapping mapping;
public class TestSCMContainerManager {
private static SCMContainerManager containerManager;
private static MockNodeManager nodeManager;
private static File testDir;
private static XceiverClientManager xceiverClientManager;
@ -77,7 +77,7 @@ public class TestContainerMapping {
Configuration conf = SCMTestUtils.getConf();
testDir = GenericTestUtils
.getTestDir(TestContainerMapping.class.getSimpleName());
.getTestDir(TestSCMContainerManager.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setTimeDuration(
@ -89,7 +89,7 @@ public class TestContainerMapping {
throw new IOException("Unable to create test directory path");
}
nodeManager = new MockNodeManager(true, 10);
mapping = new ContainerMapping(conf, nodeManager, 128,
containerManager = new SCMContainerManager(conf, nodeManager, 128,
new EventQueue());
xceiverClientManager = new XceiverClientManager(conf);
random = new Random();
@ -97,8 +97,8 @@ public class TestContainerMapping {
@AfterClass
public static void cleanup() throws IOException {
if(mapping != null) {
mapping.close();
if(containerManager != null) {
containerManager.close();
}
FileUtil.fullyDelete(testDir);
}
@ -110,7 +110,7 @@ public class TestContainerMapping {
@Test
public void testallocateContainer() throws Exception {
ContainerWithPipeline containerInfo = mapping.allocateContainer(
ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@ -127,7 +127,7 @@ public class TestContainerMapping {
*/
Set<UUID> pipelineList = new TreeSet<>();
for (int x = 0; x < 30; x++) {
ContainerWithPipeline containerInfo = mapping.allocateContainer(
ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@ -142,7 +142,7 @@ public class TestContainerMapping {
@Test
public void testGetContainer() throws IOException {
ContainerWithPipeline containerInfo = mapping.allocateContainer(
ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@ -155,10 +155,9 @@ public class TestContainerMapping {
@Test
public void testGetContainerWithPipeline() throws Exception {
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
ContainerWithPipeline containerWithPipeline = containerManager
.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
ContainerInfo contInfo = containerWithPipeline.getContainerInfo();
// Add dummy replicas for container.
DatanodeDetails dn1 = DatanodeDetails.newBuilder()
@ -169,28 +168,28 @@ public class TestContainerMapping {
.setHostName("host2")
.setIpAddress("2.2.2.2")
.setUuid(UUID.randomUUID().toString()).build();
mapping
containerManager
.updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CREATE);
mapping.updateContainerState(contInfo.getContainerID(),
containerManager.updateContainerState(contInfo.getContainerID(),
LifeCycleEvent.CREATED);
mapping.updateContainerState(contInfo.getContainerID(),
containerManager.updateContainerState(contInfo.getContainerID(),
LifeCycleEvent.FINALIZE);
mapping
containerManager
.updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CLOSE);
ContainerInfo finalContInfo = contInfo;
LambdaTestUtils.intercept(SCMException.class, "No entry exist for "
+ "containerId:", () -> mapping.getContainerWithPipeline(
+ "containerId:", () -> containerManager.getContainerWithPipeline(
finalContInfo.getContainerID()));
mapping.getStateManager().getContainerStateMap()
containerManager.getStateManager().getContainerStateMap()
.addContainerReplica(contInfo.containerID(), dn1, dn2);
contInfo = mapping.getContainer(contInfo.getContainerID());
contInfo = containerManager.getContainer(contInfo.getContainerID());
Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED);
Pipeline pipeline = containerWithPipeline.getPipeline();
mapping.getPipelineSelector().finalizePipeline(pipeline);
containerManager.getPipelineSelector().finalizePipeline(pipeline);
ContainerWithPipeline containerWithPipeline2 = mapping
ContainerWithPipeline containerWithPipeline2 = containerManager
.getContainerWithPipeline(contInfo.getContainerID());
pipeline = containerWithPipeline2.getPipeline();
Assert.assertNotEquals(containerWithPipeline, containerWithPipeline2);
@ -202,24 +201,23 @@ public class TestContainerMapping {
@Test
public void testgetNoneExistentContainer() throws IOException {
thrown.expectMessage("Specified key does not exist.");
mapping.getContainer(random.nextLong());
containerManager.getContainer(random.nextLong());
}
@Test
public void testContainerCreationLeaseTimeout() throws IOException,
InterruptedException {
nodeManager.setChillmode(false);
ContainerWithPipeline containerInfo = mapping.allocateContainer(
ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
mapping.updateContainerState(containerInfo.getContainerInfo()
containerManager.updateContainerState(containerInfo.getContainerInfo()
.getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
Thread.sleep(TIMEOUT + 1000);
NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
.getMatchingContainerIDs(
"OZONE",
NavigableSet<ContainerID> deleteContainers = containerManager
.getStateManager().getMatchingContainerIDs("OZONE",
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING);
@ -228,7 +226,7 @@ public class TestContainerMapping {
thrown.expect(IOException.class);
thrown.expectMessage("Lease Exception");
mapping
containerManager
.updateContainerState(containerInfo.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
}
@ -258,25 +256,27 @@ public class TestContainerMapping {
.newBuilder();
crBuilder.addAllReports(reports);
mapping.processContainerReports(datanodeDetails, crBuilder.build(), false);
containerManager.processContainerReports(
datanodeDetails, crBuilder.build(), false);
ContainerInfo updatedContainer =
mapping.getContainer(info.getContainerID());
containerManager.getContainer(info.getContainerID());
Assert.assertEquals(100000000L,
updatedContainer.getNumberOfKeys());
Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
LambdaTestUtils.intercept(SCMException.class, "No entry "
+ "exist for containerId:", () -> mapping.getStateManager()
+ "exist for containerId:", () -> containerManager.getStateManager()
.getContainerReplicas(ContainerID.valueof(c.getContainerID())));
}
mapping.processContainerReports(TestUtils.randomDatanodeDetails(),
containerManager.processContainerReports(TestUtils.randomDatanodeDetails(),
crBuilder.build(), true);
for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
Assert.assertTrue(mapping.getStateManager().getContainerReplicas(
ContainerID.valueof(c.getContainerID())).size() > 0);
Assert.assertTrue(containerManager.getStateManager()
.getContainerReplicas(
ContainerID.valueof(c.getContainerID())).size() > 0);
}
}
@ -313,9 +313,10 @@ public class TestContainerMapping {
.newBuilder();
crBuilder.addAllReports(reports);
mapping.processContainerReports(datanodeDetails, crBuilder.build(), false);
containerManager.processContainerReports(
datanodeDetails, crBuilder.build(), false);
List<ContainerInfo> list = mapping.listContainer(0, 50);
List<ContainerInfo> list = containerManager.listContainer(0, 50);
Assert.assertEquals(2, list.stream().filter(
x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count());
Assert.assertEquals(300000000L, list.stream().filter(
@ -329,20 +330,18 @@ public class TestContainerMapping {
@Test
public void testCloseContainer() throws IOException {
ContainerInfo info = createContainer();
mapping.updateContainerState(info.getContainerID(),
containerManager.updateContainerState(info.getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
.getMatchingContainerIDs(
containerOwner,
NavigableSet<ContainerID> pendingCloseContainers = containerManager
.getStateManager().getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSING);
Assert.assertTrue(pendingCloseContainers.contains(info.containerID()));
mapping.updateContainerState(info.getContainerID(),
containerManager.updateContainerState(info.getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
NavigableSet<ContainerID> closeContainers = mapping.getStateManager()
.getMatchingContainerIDs(
containerOwner,
NavigableSet<ContainerID> closeContainers = containerManager
.getStateManager().getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED);
@ -350,20 +349,19 @@ public class TestContainerMapping {
}
/**
* Creates a container with the given name in ContainerMapping.
* Creates a container with the given name in SCMContainerManager.
* @throws IOException
*/
private ContainerInfo createContainer()
throws IOException {
nodeManager.setChillmode(false);
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
ContainerWithPipeline containerWithPipeline = containerManager
.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
mapping.updateContainerState(containerInfo.getContainerID(),
containerManager.updateContainerState(containerInfo.getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
mapping.updateContainerState(containerInfo.getContainerID(),
containerManager.updateContainerState(containerInfo.getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
return containerInfo;
}
@ -371,10 +369,10 @@ public class TestContainerMapping {
@Test
public void testFlushAllContainers() throws IOException {
ContainerInfo info = createContainer();
List<ContainerInfo> containers = mapping.getStateManager()
List<ContainerInfo> containers = containerManager.getStateManager()
.getAllContainers();
Assert.assertTrue(containers.size() > 0);
mapping.flushContainerInfo();
containerManager.flushContainerInfo();
}
}

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
@ -97,12 +97,13 @@ public class TestContainerPlacement {
return nodeManager;
}
ContainerMapping createContainerManager(Configuration config,
SCMContainerManager createContainerManager(Configuration config,
NodeManager scmNodeManager) throws IOException {
EventQueue eventQueue = new EventQueue();
final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
return new ContainerMapping(config, scmNodeManager, cacheSize, eventQueue);
return new SCMContainerManager(config, scmNodeManager, cacheSize,
eventQueue);
}
@ -131,7 +132,7 @@ public class TestContainerPlacement {
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
SCMNodeManager nodeManager = createNodeManager(conf);
ContainerMapping containerManager =
SCMContainerManager containerManager =
createContainerManager(conf, nodeManager);
List<DatanodeDetails> datanodes =
TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);

View File

@ -31,8 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
@ -69,7 +69,7 @@ public class TestDeadNodeHandler {
public void setup() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
containerStateManager = new ContainerStateManager(conf,
Mockito.mock(Mapping.class),
Mockito.mock(ContainerManager.class),
Mockito.mock(PipelineSelector.class));
eventQueue = new EventQueue();
nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue);

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
.CloseContainerRetryableReq;
import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.server.events.EventHandler;
@ -61,8 +61,8 @@ public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
private static EventWatcher<CloseContainerRetryableReq, CloseContainerStatus>
watcher;
private static LeaseManager<Long> leaseManager;
private static ContainerMapping containerMapping = Mockito
.mock(ContainerMapping.class);
private static SCMContainerManager containerManager = Mockito
.mock(SCMContainerManager.class);
private static EventQueue queue;
@Rule
public Timeout timeout = new Timeout(1000*15);
@ -230,7 +230,7 @@ public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
time);
leaseManager.start();
watcher = new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerMapping);
SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerManager);
queue = new EventQueue();
watcher.start(queue);
}
@ -274,8 +274,8 @@ public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
throws IOException {
ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class);
ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class);
when(containerMapping.getContainer(id1)).thenReturn(containerInfo);
when(containerMapping.getContainer(id2)).thenReturn(containerInfo2);
when(containerManager.getContainer(id1)).thenReturn(containerInfo);
when(containerManager.getContainer(id2)).thenReturn(containerInfo2);
when(containerInfo.isContainerOpen()).thenReturn(true);
when(containerInfo2.isContainerOpen()).thenReturn(isOpen);
}

View File

@ -55,7 +55,7 @@ public class TestContainerStateManagerIntegration {
private MiniOzoneCluster cluster;
private XceiverClientManager xceiverClientManager;
private StorageContainerManager scm;
private ContainerMapping scmContainerMapping;
private ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private PipelineSelector selector;
private String containerOwner = "OZONE";
@ -69,9 +69,9 @@ public class TestContainerStateManagerIntegration {
cluster.waitTobeOutOfChillMode();
xceiverClientManager = new XceiverClientManager(conf);
scm = cluster.getStorageContainerManager();
scmContainerMapping = (ContainerMapping) scm.getScmContainerManager();
containerStateManager = scmContainerMapping.getStateManager();
selector = scmContainerMapping.getPipelineSelector();
containerManager = scm.getContainerManager();
containerStateManager = containerManager.getStateManager();
selector = containerManager.getPipelineSelector();
}
@After
@ -128,7 +128,7 @@ public class TestContainerStateManagerIntegration {
xceiverClientManager.getFactor(), containerOwner);
containers.add(container.getContainerInfo());
if (i >= 5) {
scm.getScmContainerManager().updateContainerState(container
scm.getContainerManager().updateContainerState(container
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
}
@ -137,7 +137,7 @@ public class TestContainerStateManagerIntegration {
// New instance of ContainerStateManager should load all the containers in
// container store.
ContainerStateManager stateManager =
new ContainerStateManager(conf, scmContainerMapping, selector);
new ContainerStateManager(conf, containerManager, selector);
int matchCount = stateManager
.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@ -154,10 +154,10 @@ public class TestContainerStateManagerIntegration {
ContainerWithPipeline container1 = scm.getClientProtocolServer().
allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@ -179,10 +179,10 @@ public class TestContainerStateManagerIntegration {
Assert.assertEquals(container2.getContainerInfo().getContainerID(),
info.getContainerID());
scmContainerMapping
containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@ -216,7 +216,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.ALLOCATED).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -224,7 +224,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.CREATING).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -232,7 +232,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.OPEN).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -240,7 +240,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.CLOSING).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -248,7 +248,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.DELETE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -256,7 +256,7 @@ public class TestContainerStateManagerIntegration {
HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers);
scmContainerMapping
containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLEANUP);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -270,10 +270,10 @@ public class TestContainerStateManagerIntegration {
.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
scmContainerMapping
containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.TIMEOUT);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -287,16 +287,16 @@ public class TestContainerStateManagerIntegration {
.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
scmContainerMapping
containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
scmContainerMapping
containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
scmContainerMapping
containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@ -310,10 +310,10 @@ public class TestContainerStateManagerIntegration {
ContainerWithPipeline container1 = scm.getClientProtocolServer()
.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
scmContainerMapping.updateContainerState(container1
containerManager.updateContainerState(container1
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping.updateContainerState(container1
containerManager.updateContainerState(container1
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@ -330,8 +330,8 @@ public class TestContainerStateManagerIntegration {
Assert.assertEquals(container1.getContainerInfo().getContainerID(),
info.getContainerID());
ContainerMapping containerMapping =
(ContainerMapping) scmContainerMapping;
SCMContainerManager containerMapping =
(SCMContainerManager) containerManager;
// manually trigger a flush, this will persist the allocated bytes value
// to disk
containerMapping.flushContainerInfo();

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
@ -51,7 +51,7 @@ public class TestNode2PipelineMap {
private static StorageContainerManager scm;
private static ContainerWithPipeline ratisContainer;
private static ContainerStateMap stateMap;
private static ContainerMapping mapping;
private static ContainerManager containerManager;
private static PipelineSelector pipelineSelector;
/**
@ -65,10 +65,11 @@ public class TestNode2PipelineMap {
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
mapping = (ContainerMapping)scm.getScmContainerManager();
stateMap = mapping.getStateManager().getContainerStateMap();
ratisContainer = mapping.allocateContainer(RATIS, THREE, "testOwner");
pipelineSelector = mapping.getPipelineSelector();
containerManager = scm.getContainerManager();
stateMap = containerManager.getStateManager().getContainerStateMap();
ratisContainer = containerManager.allocateContainer(
RATIS, THREE, "testOwner");
pipelineSelector = containerManager.getPipelineSelector();
}
/**
@ -106,13 +107,13 @@ public class TestNode2PipelineMap {
// Now close the container and it should not show up while fetching
// containers by pipeline
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATE);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATED);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
Set<ContainerID> set2 = pipelineSelector.getOpenContainerIDsByPipeline(
ratisContainer.getPipeline().getId());

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@ -52,7 +52,7 @@ public class TestNodeFailure {
private static OzoneConfiguration conf;
private static ContainerWithPipeline ratisContainer1;
private static ContainerWithPipeline ratisContainer2;
private static ContainerMapping mapping;
private static ContainerManager containerManager;
private static long timeForFailure;
/**
@ -75,9 +75,11 @@ public class TestNodeFailure {
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
mapping = (ContainerMapping)scm.getScmContainerManager();
ratisContainer1 = mapping.allocateContainer(RATIS, THREE, "testOwner");
ratisContainer2 = mapping.allocateContainer(RATIS, THREE, "testOwner");
containerManager = scm.getContainerManager();
ratisContainer1 = containerManager.allocateContainer(
RATIS, THREE, "testOwner");
ratisContainer2 = containerManager.allocateContainer(
RATIS, THREE, "testOwner");
// At this stage, there should be 2 pipeline one with 1 open container each.
// Try closing the both the pipelines, one with a closed container and
// the other with an open container.
@ -113,12 +115,12 @@ public class TestNodeFailure {
ratisContainer1.getPipeline().getLifeCycleState());
Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
ratisContainer2.getPipeline().getLifeCycleState());
Assert.assertNull(
mapping.getPipelineSelector().getPipeline(pipelineToFail.getId()));
Assert.assertNull(containerManager.getPipelineSelector()
.getPipeline(pipelineToFail.getId()));
// Now restart the datanode and make sure that a new pipeline is created.
cluster.restartHddsDatanode(dnToFail);
ContainerWithPipeline ratisContainer3 =
mapping.allocateContainer(RATIS, THREE, "testOwner");
containerManager.allocateContainer(RATIS, THREE, "testOwner");
//Assert that new container is not created from the ratis 2 pipeline
Assert.assertNotEquals(ratisContainer3.getPipeline().getId(),
ratisContainer2.getPipeline().getId());

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@ -51,7 +51,7 @@ public class TestPipelineClose {
private static ContainerWithPipeline ratisContainer1;
private static ContainerWithPipeline ratisContainer2;
private static ContainerStateMap stateMap;
private static ContainerMapping mapping;
private static ContainerManager containerManager;
private static PipelineSelector pipelineSelector;
/**
@ -65,11 +65,13 @@ public class TestPipelineClose {
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(6).build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
mapping = (ContainerMapping)scm.getScmContainerManager();
stateMap = mapping.getStateManager().getContainerStateMap();
ratisContainer1 = mapping.allocateContainer(RATIS, THREE, "testOwner");
ratisContainer2 = mapping.allocateContainer(RATIS, THREE, "testOwner");
pipelineSelector = mapping.getPipelineSelector();
containerManager = scm.getContainerManager();
stateMap = containerManager.getStateManager().getContainerStateMap();
ratisContainer1 = containerManager
.allocateContainer(RATIS, THREE, "testOwner");
ratisContainer2 = containerManager
.allocateContainer(RATIS, THREE, "testOwner");
pipelineSelector = containerManager.getPipelineSelector();
// At this stage, there should be 2 pipeline one with 1 open container each.
// Try closing the both the pipelines, one with a closed container and
// the other with an open container.
@ -98,13 +100,13 @@ public class TestPipelineClose {
// Now close the container and it should not show up while fetching
// containers by pipeline
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATE);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATED);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
mapping
containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
Set<ContainerID> setClosed = pipelineSelector.getOpenContainerIDsByPipeline(
@ -132,9 +134,9 @@ public class TestPipelineClose {
Assert.assertEquals(1, setOpen.size());
long cId2 = ratisContainer2.getContainerInfo().getContainerID();
mapping
containerManager
.updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATE);
mapping
containerManager
.updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATED);
pipelineSelector.finalizePipeline(ratisContainer2.getPipeline());
Assert.assertEquals(ratisContainer2.getPipeline().getLifeCycleState(),

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@ -46,8 +46,8 @@ public class TestSCMRestart {
private static OzoneConfiguration conf;
private static Pipeline ratisPipeline1;
private static Pipeline ratisPipeline2;
private static ContainerMapping mapping;
private static ContainerMapping newMapping;
private static ContainerManager containerManager;
private static ContainerManager newContainerManager;
/**
* Create a MiniDFSCluster for testing.
@ -64,17 +64,17 @@ public class TestSCMRestart {
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
mapping = (ContainerMapping)scm.getScmContainerManager();
ratisPipeline1 =
mapping.allocateContainer(RATIS, THREE, "Owner1").getPipeline();
ratisPipeline2 =
mapping.allocateContainer(RATIS, ONE, "Owner2").getPipeline();
containerManager = scm.getContainerManager();
ratisPipeline1 = containerManager.allocateContainer(
RATIS, THREE, "Owner1").getPipeline();
ratisPipeline2 = containerManager.allocateContainer(
RATIS, ONE, "Owner2").getPipeline();
// At this stage, there should be 2 pipeline one with 1 open container
// each. Try restarting the SCM and then discover that pipeline are in
// correct state.
cluster.restartStorageContainerManager();
newMapping = (ContainerMapping)(cluster.getStorageContainerManager()
.getScmContainerManager());
newContainerManager = cluster.getStorageContainerManager()
.getContainerManager();
}
/**
@ -90,10 +90,10 @@ public class TestSCMRestart {
@Test
public void testPipelineWithScmRestart() throws IOException {
// After restart make sure that the pipeline are still present
Pipeline ratisPipeline1AfterRestart = newMapping.getPipelineSelector()
.getPipeline(ratisPipeline1.getId());
Pipeline ratisPipeline2AfterRestart = newMapping.getPipelineSelector()
.getPipeline(ratisPipeline2.getId());
Pipeline ratisPipeline1AfterRestart = newContainerManager
.getPipelineSelector().getPipeline(ratisPipeline1.getId());
Pipeline ratisPipeline2AfterRestart = newContainerManager
.getPipelineSelector().getPipeline(ratisPipeline2.getId());
Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
@ -111,9 +111,8 @@ public class TestSCMRestart {
// Try creating a new ratis pipeline, it should be from the same pipeline
// as was before restart
Pipeline newRatisPipeline =
newMapping.allocateContainer(RATIS, THREE, "Owner1")
.getPipeline();
Pipeline newRatisPipeline = newContainerManager
.allocateContainer(RATIS, THREE, "Owner1").getPipeline();
Assert.assertEquals(newRatisPipeline.getId(), ratisPipeline1.getId());
}
}

View File

@ -44,13 +44,13 @@ public class OzoneTestUtils {
StorageContainerManager scm) throws IOException {
return performOperationOnKeyContainers((blockID) -> {
try {
scm.getScmContainerManager()
scm.getContainerManager()
.updateContainerState(blockID.getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
scm.getScmContainerManager()
scm.getContainerManager()
.updateContainerState(blockID.getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
Assert.assertFalse(scm.getScmContainerManager()
Assert.assertFalse(scm.getContainerManager()
.getContainerWithPipeline(blockID.getContainerID())
.getContainerInfo().isContainerOpen());
} catch (IOException e) {

View File

@ -445,7 +445,7 @@ public class TestOzoneRestClient {
// Sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the actually value size.
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);

View File

@ -301,7 +301,7 @@ public class TestCloseContainerHandlingByClient {
Assert.assertTrue(!containerIdList.isEmpty());
for (long containerID : containerIdList) {
Pipeline pipeline =
cluster.getStorageContainerManager().getScmContainerManager()
cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline();
pipelineList.add(pipeline);
List<DatanodeDetails> datanodes = pipeline.getMachines();
@ -349,7 +349,7 @@ public class TestCloseContainerHandlingByClient {
new ArrayList<>(groupOutputStream.getLocationInfoList());
long containerID = locationInfos.get(0).getContainerID();
List<DatanodeDetails> datanodes =
cluster.getStorageContainerManager().getScmContainerManager()
cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline().getMachines();
Assert.assertEquals(1, datanodes.size());
waitForContainerClose(keyName, key, HddsProtos.ReplicationType.STAND_ALONE);
@ -451,7 +451,7 @@ public class TestCloseContainerHandlingByClient {
groupOutputStream.getLocationInfoList();
long containerID = locationInfos.get(0).getContainerID();
List<DatanodeDetails> datanodes =
cluster.getStorageContainerManager().getScmContainerManager()
cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline().getMachines();
Assert.assertEquals(1, datanodes.size());
// move the container on the datanode to Closing state, this will ensure

View File

@ -580,7 +580,7 @@ public class TestOzoneRpcClient {
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);

View File

@ -72,7 +72,6 @@ import static org.apache.hadoop.hdds
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@ -242,7 +241,7 @@ public class TestBlockDeletion {
ContainerReportsProto dummyReport = dummyReportsBuilder.build();
logCapturer.clearOutput();
scm.getScmContainerManager().processContainerReports(
scm.getContainerManager().processContainerReports(
cluster.getHddsDatanodes().get(0).getDatanodeDetails(), dummyReport,
false);
// wait for event to be handled by event handler

View File

@ -102,7 +102,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
@ -157,7 +157,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
@ -214,7 +214,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(3, datanodes.size());

View File

@ -81,7 +81,7 @@ public class TestCloseContainerHandler {
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
.getScmContainerManager().getContainerWithPipeline(containerID)
.getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
Assert.assertFalse(isContainerClosed(cluster, containerID));

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@ -128,7 +128,7 @@ public class TestScmChillMode {
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096);
final List<ContainerInfo> containers = cluster
.getStorageContainerManager()
.getScmContainerManager().getStateManager().getAllContainers();
.getContainerManager().getStateManager().getAllContainers();
GenericTestUtils.waitFor(() -> {
return containers.size() > 10;
}, 100, 1000);
@ -251,7 +251,7 @@ public class TestScmChillMode {
new TestStorageContainerManagerHelper(miniCluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100 * 2, 4096);
final List<ContainerInfo> containers = miniCluster
.getStorageContainerManager().getScmContainerManager()
.getStorageContainerManager().getContainerManager()
.getStateManager().getAllContainers();
GenericTestUtils.waitFor(() -> {
return containers.size() > 10;
@ -264,8 +264,8 @@ public class TestScmChillMode {
containers.remove(3);
// Close remaining containers
ContainerMapping mapping = (ContainerMapping) miniCluster
.getStorageContainerManager().getScmContainerManager();
SCMContainerManager mapping = (SCMContainerManager) miniCluster
.getStorageContainerManager().getContainerManager();
containers.forEach(c -> {
try {
mapping.updateContainerState(c.getContainerID(),
@ -347,7 +347,7 @@ public class TestScmChillMode {
SCMClientProtocolServer clientProtocolServer = cluster
.getStorageContainerManager().getClientProtocolServer();
assertFalse((scm.getClientProtocolServer()).getChillModeStatus());
final List<ContainerInfo> containers = scm.getScmContainerManager()
final List<ContainerInfo> containers = scm.getContainerManager()
.getStateManager().getAllContainers();
scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, true);
GenericTestUtils.waitFor(() -> {

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
@ -25,7 +27,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@ -83,7 +84,7 @@ public class TestContainerSQLCli {
private OzoneConfiguration conf;
private String datanodeIpAddress;
private ContainerMapping mapping;
private ContainerManager containerManager;
private NodeManager nodeManager;
private BlockManagerImpl blockManager;
@ -119,9 +120,10 @@ public class TestContainerSQLCli {
cluster.getStorageContainerManager().stop();
eventQueue = new EventQueue();
nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
mapping = new ContainerMapping(conf, nodeManager, 128,
containerManager = new SCMContainerManager(conf, nodeManager, 128,
eventQueue);
blockManager = new BlockManagerImpl(conf, nodeManager, mapping, eventQueue);
blockManager = new BlockManagerImpl(
conf, nodeManager, containerManager, eventQueue);
eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
GenericTestUtils.waitFor(() -> {
@ -165,7 +167,7 @@ public class TestContainerSQLCli {
}
blockManager.close();
mapping.close();
containerManager.close();
nodeManager.close();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);