+ TRACK_REPLICATE_COMMAND =
+ new TypedEvent<>(ReplicationManager.ReplicationRequestToRepeat.class);
+ /**
+ * This event comes from the Heartbeat dispatcher (in fact from the
+ * datanode) to notify the scm that the replication is done. This is
+ * received by the replicate command watcher to mark in-progress task as
+ * finished.
+
+ * TODO: Temporary event, should be replaced by specific Heartbeat
+ * ActionRequred event.
+ */
+ public static final TypedEvent REPLICATION_COMPLETE =
+ new TypedEvent<>(ReplicationCompleted.class);
+
/**
* Private Ctor. Never Constructed.
*/
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index aba64101741..f4cd448f160 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.block.BlockManager;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
@@ -38,7 +39,12 @@ import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+ .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+ .SCMContainerPlacementCapacity;
import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -61,9 +67,13 @@ import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.common.StorageInfo;
+import org.apache.hadoop.ozone.lease.LeaseManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -153,6 +163,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* Key = DatanodeUuid, value = ContainerStat.
*/
private Cache containerReportCache;
+ private final ReplicationManager replicationManager;
+ private final LeaseManager commandWatcherLeaseManager;
/**
* Creates a new StorageContainerManager. Configuration will be updated
@@ -207,6 +219,20 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
+ long watcherTimeout =
+ conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
+ HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
+
+ commandWatcherLeaseManager = new LeaseManager<>(watcherTimeout);
+
+ //TODO: support configurable containerPlacement policy
+ ContainerPlacementPolicy containerPlacementPolicy =
+ new SCMContainerPlacementCapacity(scmNodeManager, conf);
+
+ replicationManager = new ReplicationManager(containerPlacementPolicy,
+ scmContainerManager.getStateManager(), eventQueue,
+ commandWatcherLeaseManager);
+
scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
.OZONE_ADMINISTRATORS);
scmUsername = UserGroupInformation.getCurrentUser().getUserName();
@@ -552,7 +578,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
httpServer.start();
scmBlockManager.start();
-
+ replicationManager.start();
setStartTime();
}
@@ -561,6 +587,20 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
*/
public void stop() {
+ try {
+ LOG.info("Stopping Replication Manager Service.");
+ replicationManager.stop();
+ } catch (Exception ex) {
+ LOG.error("Replication manager service stop failed.", ex);
+ }
+
+ try {
+ LOG.info("Stopping Lease Manager of the command watchers");
+ commandWatcherLeaseManager.shutdown();
+ } catch (Exception ex) {
+ LOG.error("Lease Manager of the command watchers stop failed");
+ }
+
try {
LOG.info("Stopping datanode service RPC server");
getDatanodeProtocolServer().stop();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
new file mode 100644
index 00000000000..5966f2a6c1a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.placement.algorithms;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+
+import org.junit.Assert;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+public class TestSCMContainerPlacementCapacity {
+ @Test
+ public void chooseDatanodes() throws SCMException {
+ //given
+ Configuration conf = new OzoneConfiguration();
+
+ List datanodes = new ArrayList<>();
+ for (int i = 0; i < 7; i++) {
+ datanodes.add(TestUtils.getDatanodeDetails());
+ }
+
+ NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
+ when(mockNodeManager.getNodes(NodeState.HEALTHY))
+ .thenReturn(new ArrayList<>(datanodes));
+
+ when(mockNodeManager.getNodeStat(anyObject()))
+ .thenReturn(new SCMNodeMetric(100l, 0L, 100L));
+ when(mockNodeManager.getNodeStat(datanodes.get(2)))
+ .thenReturn(new SCMNodeMetric(100l, 90L, 10L));
+ when(mockNodeManager.getNodeStat(datanodes.get(3)))
+ .thenReturn(new SCMNodeMetric(100l, 80L, 20L));
+ when(mockNodeManager.getNodeStat(datanodes.get(4)))
+ .thenReturn(new SCMNodeMetric(100l, 70L, 30L));
+
+ SCMContainerPlacementCapacity scmContainerPlacementRandom =
+ new SCMContainerPlacementCapacity(mockNodeManager, conf);
+
+ List existingNodes = new ArrayList<>();
+ existingNodes.add(datanodes.get(0));
+ existingNodes.add(datanodes.get(1));
+
+ Map selectedCount = new HashMap<>();
+ for (DatanodeDetails datanode : datanodes) {
+ selectedCount.put(datanode, 0);
+ }
+
+ for (int i = 0; i < 1000; i++) {
+
+ //when
+ List datanodeDetails =
+ scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15);
+
+ //then
+ Assert.assertEquals(1, datanodeDetails.size());
+ DatanodeDetails datanode0Details = datanodeDetails.get(0);
+
+ Assert.assertNotEquals(
+ "Datanode 0 should not been selected: excluded by parameter",
+ datanodes.get(0), datanode0Details);
+ Assert.assertNotEquals(
+ "Datanode 1 should not been selected: excluded by parameter",
+ datanodes.get(1), datanode0Details);
+ Assert.assertNotEquals(
+ "Datanode 2 should not been selected: not enough space there",
+ datanodes.get(2), datanode0Details);
+
+ selectedCount
+ .put(datanode0Details, selectedCount.get(datanode0Details) + 1);
+
+ }
+
+ //datanode 4 has less space. Should be selected less times.
+ Assert.assertTrue(selectedCount.get(datanodes.get(3)) > selectedCount
+ .get(datanodes.get(6)));
+ Assert.assertTrue(selectedCount.get(datanodes.get(4)) > selectedCount
+ .get(datanodes.get(6)));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
new file mode 100644
index 00000000000..430c181205f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.placement.algorithms;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+
+import org.junit.Assert;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+public class TestSCMContainerPlacementRandom {
+
+ @Test
+ public void chooseDatanodes() throws SCMException {
+ //given
+ Configuration conf = new OzoneConfiguration();
+
+ List datanodes = new ArrayList<>();
+ for (int i = 0; i < 5; i++) {
+ datanodes.add(TestUtils.getDatanodeDetails());
+ }
+
+ NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
+ when(mockNodeManager.getNodes(NodeState.HEALTHY))
+ .thenReturn(new ArrayList<>(datanodes));
+
+ when(mockNodeManager.getNodeStat(anyObject()))
+ .thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+ when(mockNodeManager.getNodeStat(datanodes.get(2)))
+ .thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+
+ SCMContainerPlacementRandom scmContainerPlacementRandom =
+ new SCMContainerPlacementRandom(mockNodeManager, conf);
+
+ List existingNodes = new ArrayList<>();
+ existingNodes.add(datanodes.get(0));
+ existingNodes.add(datanodes.get(1));
+
+ for (int i = 0; i < 100; i++) {
+ //when
+ List datanodeDetails =
+ scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15);
+
+ //then
+ Assert.assertEquals(1, datanodeDetails.size());
+ DatanodeDetails datanode0Details = datanodeDetails.get(0);
+
+ Assert.assertNotEquals(
+ "Datanode 0 should not been selected: excluded by parameter",
+ datanodes.get(0), datanode0Details);
+ Assert.assertNotEquals(
+ "Datanode 1 should not been selected: excluded by parameter",
+ datanodes.get(1), datanode0Details);
+ Assert.assertNotEquals(
+ "Datanode 2 should not been selected: not enough space there",
+ datanodes.get(2), datanode0Details);
+
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
new file mode 100644
index 00000000000..e3e876b5e6b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+ .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager
+ .ReplicationRequestToRepeat;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+
+import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents
+ .TRACK_REPLICATE_COMMAND;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test behaviour of the TestReplication.
+ */
+public class TestReplicationManager {
+
+ private EventQueue queue;
+
+ private List trackReplicationEvents;
+
+ private List> copyEvents;
+
+ private ContainerStateManager containerStateManager;
+
+ private ContainerPlacementPolicy containerPlacementPolicy;
+ private List listOfDatanodeDetails;
+
+ @Before
+ public void initReplicationManager() throws IOException {
+
+ listOfDatanodeDetails = TestUtils.getListOfDatanodeDetails(5);
+
+ containerPlacementPolicy =
+ (excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails
+ .subList(2, 2 + nodesRequired);
+
+ containerStateManager = Mockito.mock(ContainerStateManager.class);
+
+ //container with 2 replicas
+ ContainerInfo containerInfo = new ContainerInfo.Builder()
+ .setState(LifeCycleState.CLOSED)
+ .build();
+
+ when(containerStateManager.getContainer(anyObject()))
+ .thenReturn(containerInfo);
+
+ queue = new EventQueue();
+
+ trackReplicationEvents = new ArrayList<>();
+ queue.addHandler(TRACK_REPLICATE_COMMAND,
+ (event, publisher) -> trackReplicationEvents.add(event));
+
+ copyEvents = new ArrayList<>();
+ queue.addHandler(SCMEvents.DATANODE_COMMAND,
+ (event, publisher) -> copyEvents.add(event));
+
+ }
+
+ @Test
+ public void testEventSending() throws InterruptedException, IOException {
+
+
+ //GIVEN
+
+ LeaseManager leaseManager = new LeaseManager<>(100000L);
+ try {
+ leaseManager.start();
+
+ ReplicationManager replicationManager =
+ new ReplicationManager(containerPlacementPolicy,
+ containerStateManager,
+ queue, leaseManager) {
+ @Override
+ protected List getCurrentReplicas(
+ ReplicationRequest request) throws IOException {
+ return listOfDatanodeDetails.subList(0, 2);
+ }
+ };
+ replicationManager.start();
+
+ //WHEN
+
+ queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+ new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+ (short) 3));
+
+ Thread.sleep(500L);
+ queue.processAll(1000L);
+
+ //THEN
+
+ Assert.assertEquals(1, trackReplicationEvents.size());
+ Assert.assertEquals(1, copyEvents.size());
+ } finally {
+ if (leaseManager != null) {
+ leaseManager.shutdown();
+ }
+ }
+ }
+
+ @Test
+ public void testCommandWatcher() throws InterruptedException, IOException {
+
+ Logger.getRootLogger().setLevel(Level.DEBUG);
+ LeaseManager leaseManager = new LeaseManager<>(1000L);
+
+ try {
+ leaseManager.start();
+
+ ReplicationManager replicationManager =
+ new ReplicationManager(containerPlacementPolicy, containerStateManager,
+
+
+ queue, leaseManager) {
+ @Override
+ protected List getCurrentReplicas(
+ ReplicationRequest request) throws IOException {
+ return listOfDatanodeDetails.subList(0, 2);
+ }
+ };
+ replicationManager.start();
+
+ queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+ new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+ (short) 3));
+
+ Thread.sleep(500L);
+
+ queue.processAll(1000L);
+
+ Assert.assertEquals(1, trackReplicationEvents.size());
+ Assert.assertEquals(1, copyEvents.size());
+
+ Assert.assertEquals(trackReplicationEvents.get(0).getId(),
+ copyEvents.get(0).getCommand().getId());
+
+ //event is timed out
+ Thread.sleep(1500);
+
+ queue.processAll(1000L);
+
+ //original copy command + retry
+ Assert.assertEquals(2, trackReplicationEvents.size());
+ Assert.assertEquals(2, copyEvents.size());
+
+ } finally {
+ if (leaseManager != null) {
+ leaseManager.shutdown();
+ }
+ }
+ }
+
+ public static Pipeline createPipeline(Iterable ids)
+ throws IOException {
+ Objects.requireNonNull(ids, "ids == null");
+ final Iterator i = ids.iterator();
+ Preconditions.checkArgument(i.hasNext());
+ final DatanodeDetails leader = i.next();
+ String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(3);
+ final Pipeline pipeline =
+ new Pipeline(leader.getUuidString(), LifeCycleState.OPEN,
+ ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
+ pipeline.addMember(leader);
+ for (; i.hasNext(); ) {
+ pipeline.addMember(i.next());
+ }
+ return pipeline;
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
similarity index 92%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
index 6d74c683eeb..a593718f573 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.container.replication;
+package org.apache.hadoop.hdds.scm.container.replication;
import java.util.Random;
import java.util.UUID;
@@ -39,7 +39,7 @@ public class TestReplicationQueue {
}
@Test
- public void testDuplicateAddOp() {
+ public void testDuplicateAddOp() throws InterruptedException {
long contId = random.nextLong();
String nodeId = UUID.randomUUID().toString();
ReplicationRequest obj1, obj2, obj3;
@@ -53,12 +53,12 @@ public class TestReplicationQueue {
replicationQueue.add(obj3);
Assert.assertEquals("Should add only 1 msg as second one is duplicate",
1, replicationQueue.size());
- ReplicationRequest temp = replicationQueue.poll();
+ ReplicationRequest temp = replicationQueue.take();
Assert.assertEquals(temp, obj3);
}
@Test
- public void testPollOp() {
+ public void testPollOp() throws InterruptedException {
long contId = random.nextLong();
String nodeId = UUID.randomUUID().toString();
ReplicationRequest msg1, msg2, msg3, msg4, msg5;
@@ -82,19 +82,19 @@ public class TestReplicationQueue {
// Since Priority queue orders messages according to replication count,
// message with lowest replication should be first
ReplicationRequest temp;
- temp = replicationQueue.poll();
+ temp = replicationQueue.take();
Assert.assertEquals("Should have 2 objects",
2, replicationQueue.size());
Assert.assertEquals(temp, msg3);
- temp = replicationQueue.poll();
+ temp = replicationQueue.take();
Assert.assertEquals("Should have 1 objects",
1, replicationQueue.size());
Assert.assertEquals(temp, msg5);
// Message 2 should be ordered before message 5 as both have same replication
// number but message 2 has earlier timestamp.
- temp = replicationQueue.poll();
+ temp = replicationQueue.take();
Assert.assertEquals("Should have 0 objects",
replicationQueue.size(), 0);
Assert.assertEquals(temp, msg4);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
similarity index 93%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
index 5b1fd0f43a9..1423c999381 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -19,5 +19,5 @@
/**
* SCM Testing and Mocking Utils.
*/
-package org.apache.hadoop.ozone.container.replication;
+package org.apache.hadoop.hdds.scm.container.replication;
// Test classes for Replication functionality.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
index 651b77618e4..802f2ef05aa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
import org.junit.Assert;
import org.junit.Test;
+import java.util.ArrayList;
import java.util.List;
import java.util.Random;
@@ -86,11 +87,11 @@ public class TestContainerPlacement {
for (int x = 0; x < opsCount; x++) {
long containerSize = random.nextInt(100) * OzoneConsts.GB;
List nodesCapacity =
- capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
+ capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired, containerSize);
assertEquals(nodesRequired, nodesCapacity.size());
List nodesRandom =
- randomPlacer.chooseDatanodes(nodesRequired, containerSize);
+ randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired, containerSize);
// One fifth of all calls are delete
if (x % 5 == 0) {