HDDS-353. Multiple delete Blocks tests are failing consistently. Contributed by Lokesh Jain.

This commit is contained in:
Mukul Kumar Singh 2018-08-20 13:37:58 +05:30
parent 6425ed27ea
commit e3d73bbc24
11 changed files with 50 additions and 29 deletions

View File

@ -180,11 +180,11 @@ public final class HddsServerUtil {
* SCM.
*
* @param conf - Ozone Config
* @return - HB interval in seconds.
* @return - HB interval in milli seconds.
*/
public static long getScmHeartbeatInterval(Configuration conf) {
return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
}
/**
@ -202,7 +202,7 @@ public final class HddsServerUtil {
long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
long heartbeatIntervalMs = getScmHeartbeatInterval(conf) * 1000;
long heartbeatIntervalMs = getScmHeartbeatInterval(conf);
// Make sure that StaleNodeInterval is configured way above the frequency

View File

@ -58,7 +58,7 @@ public class CommandStatusReportPublisher extends
getConf());
Preconditions.checkState(
heartbeatFrequency < cmdStatusReportInterval,
heartbeatFrequency <= cmdStatusReportInterval,
HDDS_COMMAND_STATUS_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}

View File

@ -64,7 +64,7 @@ public class ContainerReportPublisher extends
getConf());
Preconditions.checkState(
heartbeatFrequency < containerReportInterval,
heartbeatFrequency <= containerReportInterval,
HDDS_CONTAINER_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}

View File

@ -52,7 +52,7 @@ public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
getConf());
Preconditions.checkState(
heartbeatFrequency < nodeReportInterval,
heartbeatFrequency <= nodeReportInterval,
HDDS_NODE_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}

View File

@ -86,8 +86,7 @@ public class DatanodeStateMachine implements Closeable {
.setNameFormat("Datanode State Machine Thread - %d").build());
connectionManager = new SCMConnectionManager(conf);
context = new StateContext(this.conf, DatanodeStates.getInitState(), this);
heartbeatFrequency = TimeUnit.SECONDS.toMillis(
getScmHeartbeatInterval(conf));
heartbeatFrequency = getScmHeartbeatInterval(conf);
container = new OzoneContainer(this.datanodeDetails,
new OzoneConfiguration(conf), context);
nextHB = new AtomicLong(Time.monotonicNow());

View File

@ -27,6 +27,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.time.ZonedDateTime;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@ -203,11 +204,11 @@ public class EndpointStateMachine
this.incMissed();
if (this.getMissedCount() % getLogWarnInterval(conf) ==
0) {
LOG.error("Unable to communicate to SCM server at {}. We have not been " +
"able to communicate to this SCM server for past {} seconds.",
LOG.error(
"Unable to communicate to SCM server at {} for past {} seconds.",
this.getAddress().getHostString() + ":" + this.getAddress().getPort(),
this.getMissedCount() * getScmHeartbeatInterval(
this.conf), ex);
TimeUnit.MILLISECONDS.toSeconds(
this.getMissedCount() * getScmHeartbeatInterval(this.conf)), ex);
}
}

View File

@ -217,9 +217,10 @@ public class ContainerMapping implements Mapping {
// For close containers create pipeline from datanodes with replicas
Set<DatanodeDetails> dnWithReplicas = containerStateManager
.getContainerReplicas(contInfo.containerID());
pipeline = new Pipeline(dnWithReplicas.iterator().next().getHostName(),
contInfo.getState(), ReplicationType.STAND_ALONE,
contInfo.getReplicationFactor(), PipelineID.randomId());
pipeline =
new Pipeline(dnWithReplicas.iterator().next().getUuidString(),
contInfo.getState(), ReplicationType.STAND_ALONE,
contInfo.getReplicationFactor(), PipelineID.randomId());
dnWithReplicas.forEach(pipeline::addMember);
}
return new ContainerWithPipeline(contInfo, pipeline);

View File

@ -77,7 +77,7 @@ public class ContainerCloser {
this.isRunning = new AtomicBoolean(false);
this.reportInterval = this.configuration.getTimeDuration(
HDDS_CONTAINER_REPORT_INTERVAL,
HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
Preconditions.checkState(this.reportInterval > 0,
"report interval has to be greater than 0");
}
@ -100,7 +100,7 @@ public class ContainerCloser {
if (commandIssued.containsKey(info.getContainerID())) {
// We check if we issued a close command in last 3 * reportInterval secs.
long commandQueueTime = commandIssued.get(info.getContainerID());
long currentTime = TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow());
long currentTime = Time.monotonicNow();
if (currentTime > commandQueueTime + (MULTIPLIER * reportInterval)) {
commandIssued.remove(info.getContainerID());
mapCount.decrementAndGet();
@ -137,8 +137,7 @@ public class ContainerCloser {
PipelineID.getFromProtobuf(info.getPipelineID())));
}
if (!commandIssued.containsKey(info.getContainerID())) {
commandIssued.put(info.getContainerID(),
TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow()));
commandIssued.put(info.getContainerID(), Time.monotonicNow());
mapCount.incrementAndGet();
}
// run the hash map cleaner thread if needed, non-blocking call.
@ -156,7 +155,7 @@ public class ContainerCloser {
for (Map.Entry<Long, Long> entry : commandIssued.entrySet()) {
long commandQueueTime = entry.getValue();
if (commandQueueTime + (MULTIPLIER * reportInterval) >
TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow())) {
Time.monotonicNow()) {
// It is possible for this remove to fail due to race conditions.
// No big deal we will cleanup next time.

View File

@ -67,7 +67,8 @@ import org.junit.rules.Timeout;
import org.mockito.Mockito;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.junit.Assert.fail;
/**
@ -188,7 +189,8 @@ public class TestStorageContainerManager {
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 5, TimeUnit.SECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000,
TimeUnit.MILLISECONDS);
@ -200,7 +202,9 @@ public class TestStorageContainerManager {
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys);
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setHbInterval(100)
.build();
cluster.waitForClusterToBeReady();
try {
@ -212,6 +216,8 @@ public class TestStorageContainerManager {
TestStorageContainerManagerHelper helper =
new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
// Wait for container report
Thread.sleep(1000);
for (OmKeyInfo keyInfo : keyLocations.values()) {
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
cluster.getStorageContainerManager());
@ -271,14 +277,15 @@ public class TestStorageContainerManager {
public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
100, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys);
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setHbInterval(5000)
.setHbInterval(1000)
.setHbProcessorInterval(3000)
.build();
cluster.waitForClusterToBeReady();
@ -298,6 +305,8 @@ public class TestStorageContainerManager {
TestStorageContainerManagerHelper helper =
new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
// Wait for container report
Thread.sleep(5000);
for (OmKeyInfo keyInfo : keyLocations.values()) {
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
cluster.getStorageContainerManager());

View File

@ -60,7 +60,8 @@ import java.io.IOException;
import java.util.*;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@ -89,10 +90,13 @@ public class TestBlockDeletion {
conf.setQuietMode(false);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200,
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
TimeUnit.MILLISECONDS);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.setHbInterval(200)
.build();
cluster.waitForClusterToBeReady();
store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
dnContainerSet = cluster.getHddsDatanodes().get(0)

View File

@ -68,6 +68,7 @@ import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
@ -91,6 +92,8 @@ import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdds
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@ -137,13 +140,17 @@ public class TestKeys {
// Set short block deleting service interval to speed up deletions.
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_KEY,
shouldUseGrpc);
path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
ozoneCluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
ozoneCluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.setHbInterval(1000)
.build();
ozoneCluster.waitForClusterToBeReady();
client = new RpcClient(conf);
currentTime = Time.now();
@ -663,6 +670,7 @@ public class TestKeys {
}
@Test
@Ignore("Until delete background service is fixed.")
public void testDeleteKey() throws Exception {
OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
// To avoid interference from other test cases,