From 0927bc4f76c6803be31855835a9191ab7ed47bc7 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Sun, 15 Jul 2018 10:34:00 -0700 Subject: [PATCH] HDDS-251. Integrate BlockDeletingService in KeyValueHandler. Contributed by Lokesh Jain --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 +- .../src/main/resources/ozone-default.xml | 4 +- .../container/common/impl/ContainerSet.java | 16 +++-- .../common/interfaces/Container.java | 2 +- .../ContainerDeletionChoosingPolicy.java | 1 - .../container/keyvalue/KeyValueHandler.java | 24 +++++++ .../background/BlockDeletingService.java | 11 +++- .../common/TestBlockDeletingService.java | 26 ++++++-- .../TestContainerDeletionChoosingPolicy.java | 62 +++++++------------ .../commandhandler/TestBlockDeletion.java | 14 ++--- .../TestCloseContainerByPipeline.java | 18 +++--- .../TestCloseContainerHandler.java | 12 ++-- .../ozone/om/TestContainerReportWithKeys.java | 11 ++-- .../hadoop/ozone/web/client/TestKeys.java | 2 - 14 files changed, 117 insertions(+), 91 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 9725d2c9082..46eb8aaeee4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -231,8 +231,9 @@ public final class ScmConfigKeys { "ozone.scm.container.provision_batch_size"; public static final int OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT = 20; - public static final String OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY = - "ozone.scm.container.deletion-choosing.policy"; + public static final String + OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY = + "ozone.scm.keyvalue.container.deletion-choosing.policy"; public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT = "ozone.scm.container.creation.lease.timeout"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 1b6fb336c89..da3870e0234 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -541,13 +541,13 @@ The port number of the Ozone SCM client service. - ozone.scm.container.deletion-choosing.policy + ozone.scm.keyvalue.container.deletion-choosing.policy org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy OZONE, MANAGEMENT - The policy used for choosing desire containers for block deletion. + The policy used for choosing desired keyvalue containers for block deletion. Datanode selects some containers to process block deletion in a certain interval defined by ozone.block.deleting.service.interval. The number of containers to process in each interval is defined diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index bcba8c8ab58..7a6cb2db656 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common + .interfaces.ContainerDeletionChoosingPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -247,9 +249,15 @@ public class ContainerSet { return state; } - // TODO: Implement BlockDeletingService - public List chooseContainerForBlockDeletion( - int count) throws StorageContainerException { - return null; + public List chooseContainerForBlockDeletion(int count, + ContainerDeletionChoosingPolicy deletionPolicy) + throws StorageContainerException { + Map containerDataMap = containerMap.entrySet().stream() + .filter(e -> e.getValue().getContainerType() + == ContainerProtos.ContainerType.KeyValueContainer) + .collect(Collectors.toMap(Map.Entry::getKey, + e -> e.getValue().getContainerData())); + return deletionPolicy + .chooseContainerForBlockDeletion(count, containerDataMap); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java index 03ed7b1136e..fe35e1d5374 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java @@ -68,7 +68,7 @@ public interface Container extends RwLock { * @return ContainerData - Container Data. * @throws StorageContainerException */ - ContainerData getContainerData() throws StorageContainerException; + ContainerData getContainerData(); /** * Get the Container Lifecycle state. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java index 25383686eed..dce86e9375e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java @@ -28,7 +28,6 @@ import java.util.Map; * This interface is used for choosing desired containers for * block deletion. */ -// TODO: Fix ContainerDeletionChoosingPolicy to work with new StorageLayer public interface ContainerDeletionChoosingPolicy { /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 3806ed68cf9..84b36449703 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -62,6 +62,8 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager; +import org.apache.hadoop.ozone.container.keyvalue.statemachine + .background.BlockDeletingService; import org.apache.hadoop.util.AutoCloseableLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +73,7 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos @@ -90,6 +93,14 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Stage; +import static org.apache.hadoop.ozone + .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone + .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone + .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone + .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; /** * Handler for KeyValue Container type. @@ -102,6 +113,7 @@ public class KeyValueHandler extends Handler { private final ContainerType containerType; private final KeyManager keyManager; private final ChunkManager chunkManager; + private final BlockDeletingService blockDeletingService; private VolumeChoosingPolicy volumeChoosingPolicy; private final int maxContainerSizeGB; private final AutoCloseableLock handlerLock; @@ -113,6 +125,18 @@ public class KeyValueHandler extends Handler { containerType = ContainerType.KeyValueContainer; keyManager = new KeyManagerImpl(config); chunkManager = new ChunkManagerImpl(); + long svcInterval = config + .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, + OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + long serviceTimeout = config + .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, + OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + this.blockDeletingService = + new BlockDeletingService(containerSet, svcInterval, serviceTimeout, + config); + blockDeletingService.start(); // TODO: Add supoort for different volumeChoosingPolicies. volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); maxContainerSizeGB = config.getInt(ScmConfigKeys diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 6aa54d15def..151ef9452bb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -19,10 +19,14 @@ package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; import com.google.common.collect.Lists; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.ratis.shaded.com.google.protobuf .InvalidProtocolBufferException; import org.apache.commons.io.FileUtils; @@ -69,6 +73,7 @@ public class BlockDeletingService extends BackgroundService{ LoggerFactory.getLogger(BlockDeletingService.class); ContainerSet containerSet; + private ContainerDeletionChoosingPolicy containerDeletionPolicy; private final Configuration conf; // Throttle number of blocks to delete per task, @@ -89,6 +94,10 @@ public class BlockDeletingService extends BackgroundService{ TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.containerSet = containerSet; + containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + TopNOrderedContainerDeletionChoosingPolicy.class, + ContainerDeletionChoosingPolicy.class), conf); this.conf = conf; this.blockLimitPerTask = conf.getInt( OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, @@ -110,7 +119,7 @@ public class BlockDeletingService extends BackgroundService{ // The chosen result depends on what container deletion policy is // configured. containers = containerSet.chooseContainerForBlockDeletion( - containerLimitPerInterval); + containerLimitPerInterval, containerDeletionPolicy); LOG.info("Plan to choose {} containers for block deletion, " + "actually returns {} valid containers.", containerLimitPerInterval, containers.size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 724a6822203..1ddd39ac208 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -21,17 +21,16 @@ import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils; @@ -47,7 +46,6 @@ import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataStore; import org.junit.Assert; -import org.junit.Ignore; import org.junit.Test; import org.junit.BeforeClass; import org.junit.Before; @@ -58,9 +56,9 @@ import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; -import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -75,7 +73,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys * Tests to test block deleting service. */ // TODO: Fix BlockDeletingService to work with new StorageLayer -@Ignore public class TestBlockDeletingService { private static final Logger LOG = @@ -120,6 +117,8 @@ public class TestBlockDeletingService { KeyValueContainerData data = new KeyValueContainerData(containerID, ContainerTestHelper.CONTAINER_MAX_SIZE_GB); Container container = new KeyValueContainer(data, conf); + container.create(new VolumeSet(UUID.randomUUID().toString(), conf), + new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); containerSet.addContainer(container); data = (KeyValueContainerData) containerSet.getContainer( containerID).getContainerData(); @@ -188,6 +187,9 @@ public class TestBlockDeletingService { @Test public void testBlockDeletion() throws Exception { Configuration conf = new OzoneConfiguration(); + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + RandomContainerDeletionChoosingPolicy.class.getName()); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); @@ -236,6 +238,9 @@ public class TestBlockDeletingService { @Test public void testShutdownService() throws Exception { Configuration conf = new OzoneConfiguration(); + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + RandomContainerDeletionChoosingPolicy.class.getName()); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); @@ -264,6 +269,9 @@ public class TestBlockDeletingService { @Test public void testBlockDeletionTimeout() throws Exception { Configuration conf = new OzoneConfiguration(); + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + RandomContainerDeletionChoosingPolicy.class.getName()); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); @@ -333,6 +341,9 @@ public class TestBlockDeletingService { // 1 block from 1 container can be deleted. Configuration conf = new OzoneConfiguration(); // Process 1 container per interval + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + RandomContainerDeletionChoosingPolicy.class.getName()); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1); ContainerSet containerSet = new ContainerSet(); @@ -366,6 +377,9 @@ public class TestBlockDeletingService { // per container can be actually deleted. So it requires 2 waves // to cleanup all blocks. Configuration conf = new OzoneConfiguration(); + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + RandomContainerDeletionChoosingPolicy.class.getName()); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index c1615517ca4..b2e4c9a44d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -27,29 +27,22 @@ import java.util.Random; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataStore; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; /** * The class for testing container deletion choosing policy. */ -@Ignore public class TestContainerDeletionChoosingPolicy { private static String path; private static ContainerSet containerSet; @@ -73,7 +66,8 @@ public class TestContainerDeletionChoosingPolicy { } Assert.assertTrue(containerDir.mkdirs()); - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY, + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, RandomContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); @@ -89,15 +83,17 @@ public class TestContainerDeletionChoosingPolicy { containerSet.getContainerMap().containsKey(data.getContainerID())); } - List result0 = containerSet - .chooseContainerForBlockDeletion(5); + ContainerDeletionChoosingPolicy deletionPolicy = + new RandomContainerDeletionChoosingPolicy(); + List result0 = + containerSet.chooseContainerForBlockDeletion(5, deletionPolicy); Assert.assertEquals(5, result0.size()); // test random choosing List result1 = containerSet - .chooseContainerForBlockDeletion(numContainers); + .chooseContainerForBlockDeletion(numContainers, deletionPolicy); List result2 = containerSet - .chooseContainerForBlockDeletion(numContainers); + .chooseContainerForBlockDeletion(numContainers, deletionPolicy); boolean hasShuffled = false; for (int i = 0; i < numContainers; i++) { @@ -118,12 +114,12 @@ public class TestContainerDeletionChoosingPolicy { } Assert.assertTrue(containerDir.mkdirs()); - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY, + conf.set( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, TopNOrderedContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); containerSet = new ContainerSet(); - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); int numContainers = 10; Random random = new Random(); @@ -131,38 +127,28 @@ public class TestContainerDeletionChoosingPolicy { // create [numContainers + 1] containers for (int i = 0; i <= numContainers; i++) { long containerId = RandomUtils.nextLong(); - KeyValueContainerData data = new KeyValueContainerData(new Long(i), - ContainerTestHelper.CONTAINER_MAX_SIZE_GB); + KeyValueContainerData data = + new KeyValueContainerData(new Long(containerId), + ContainerTestHelper.CONTAINER_MAX_SIZE_GB); + if (i != numContainers) { + int deletionBlocks = random.nextInt(numContainers) + 1; + data.incrPendingDeletionBlocks(deletionBlocks); + name2Count.put(containerId, deletionBlocks); + } KeyValueContainer container = new KeyValueContainer(data, conf); containerSet.addContainer(container); Assert.assertTrue( containerSet.getContainerMap().containsKey(containerId)); - - // don't create deletion blocks in the last container. - if (i == numContainers) { - break; - } - - // create random number of deletion blocks and write to container db - int deletionBlocks = random.nextInt(numContainers) + 1; - // record value - name2Count.put(containerId, deletionBlocks); - for (int j = 0; j <= deletionBlocks; j++) { - MetadataStore metadata = KeyUtils.getDB(data, conf); - String blk = "blk" + i + "-" + j; - byte[] blkBytes = DFSUtil.string2Bytes(blk); - metadata.put( - DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk), - blkBytes); - } } - List result0 = containerSet - .chooseContainerForBlockDeletion(5); + ContainerDeletionChoosingPolicy deletionPolicy = + new TopNOrderedContainerDeletionChoosingPolicy(); + List result0 = + containerSet.chooseContainerForBlockDeletion(5, deletionPolicy); Assert.assertEquals(5, result0.size()); List result1 = containerSet - .chooseContainerForBlockDeletion(numContainers + 1); + .chooseContainerForBlockDeletion(numContainers + 1, deletionPolicy); // the empty deletion blocks container should not be chosen Assert.assertEquals(numContainers, result1.size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index c60c6c4cee0..4ae827bd412 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -47,7 +47,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataStore; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import java.io.File; @@ -58,11 +57,10 @@ import java.util.function.Consumer; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -@Ignore("Need to be fixed according to ContainerIO") public class TestBlockDeletion { private static OzoneConfiguration conf = null; private static ObjectStore store; - private static ContainerSet dnContainerManager = null; + private static ContainerSet dnContainerSet = null; private static StorageContainerManager scm = null; private static OzoneManager om = null; private static Set containerIdsWithDeletedBlocks; @@ -88,7 +86,7 @@ public class TestBlockDeletion { MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); store = OzoneClientFactory.getRpcClient(conf).getObjectStore(); - dnContainerManager = cluster.getHddsDatanodes().get(0) + dnContainerSet = cluster.getHddsDatanodes().get(0) .getDatanodeStateMachine().getContainer().getContainerSet(); om = cluster.getOzoneManager(); scm = cluster.getStorageContainerManager(); @@ -140,7 +138,7 @@ public class TestBlockDeletion { private void matchContainerTransactionIds() throws IOException { List containerDataList = new ArrayList<>(); - dnContainerManager.listContainer(0, 10000, containerDataList); + dnContainerSet.listContainer(0, 10000, containerDataList); for (ContainerData containerData : containerDataList) { long containerId = containerData.getContainerID(); if (containerIdsWithDeletedBlocks.contains(containerId)) { @@ -150,7 +148,7 @@ public class TestBlockDeletion { Assert.assertEquals( scm.getContainerInfo(containerId).getDeleteTransactionId(), 0); } - Assert.assertEquals(dnContainerManager.getContainer(containerId) + Assert.assertEquals(dnContainerSet.getContainer(containerId) .getContainerData().getDeleteTransactionId(), scm.getContainerInfo(containerId).getDeleteTransactionId()); } @@ -162,7 +160,7 @@ public class TestBlockDeletion { return performOperationOnKeyContainers((blockID) -> { try { MetadataStore db = KeyUtils.getDB((KeyValueContainerData) - dnContainerManager.getContainer(blockID.getContainerID()) + dnContainerSet.getContainer(blockID.getContainerID()) .getContainerData(), conf); Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID()))); } catch (IOException e) { @@ -177,7 +175,7 @@ public class TestBlockDeletion { return performOperationOnKeyContainers((blockID) -> { try { MetadataStore db = KeyUtils.getDB((KeyValueContainerData) - dnContainerManager.getContainer(blockID.getContainerID()) + dnContainerSet.getContainer(blockID.getContainerID()) .getContainerData(), conf); Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID()))); Assert.assertNull(db.get(DFSUtil.string2Bytes( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 30b18c23fb9..61bd93557cf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -252,17 +252,13 @@ public class TestCloseContainerByPipeline { private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID, DatanodeDetails datanode) { ContainerData containerData; - try { - for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) - if (datanode.equals(datanodeService.getDatanodeDetails())) { - containerData = - datanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID).getContainerData(); - return !containerData.isOpen(); - } - } catch (StorageContainerException e) { - throw new AssertionError(e); - } + for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) + if (datanode.equals(datanodeService.getDatanodeDetails())) { + containerData = + datanodeService.getDatanodeStateMachine().getContainer() + .getContainerSet().getContainer(containerID).getContainerData(); + return !containerData.isOpen(); + } return false; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 682bd6358af..c0c9bc41347 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -102,14 +102,10 @@ public class TestCloseContainerHandler { private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID) { ContainerData containerData; - try { - containerData = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - return !containerData.isOpen(); - } catch (StorageContainerException e) { - throw new AssertionError(e); - } + containerData = cluster.getHddsDatanodes().get(0) + .getDatanodeStateMachine().getContainer().getContainerSet() + .getContainer(containerID).getContainerData(); + return !containerData.isOpen(); } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index c25b00ec6e9..c66b3de0f5c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -131,13 +131,10 @@ public class TestContainerReportWithKeys { private static ContainerData getContainerData(long containerID) { ContainerData containerData; - try { - ContainerSet containerManager = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet(); - containerData = containerManager.getContainer(containerID).getContainerData(); - } catch (StorageContainerException e) { - throw new AssertionError(e); - } + ContainerSet containerManager = cluster.getHddsDatanodes().get(0) + .getDatanodeStateMachine().getContainer().getContainerSet(); + containerData = + containerManager.getContainer(containerID).getContainerData(); return containerData; } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java index c144db20d25..540a56496bb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java @@ -67,7 +67,6 @@ import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -663,7 +662,6 @@ public class TestKeys { } @Test - @Ignore("Needs to be fixed for new SCM and Storage design") public void testDeleteKey() throws Exception { OzoneManager ozoneManager = ozoneCluster.getOzoneManager(); // To avoid interference from other test cases,