HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar.

This commit is contained in:
Xiaoyu Yao 2018-07-20 09:07:58 -07:00
parent e9c44ecfc6
commit 68b57ad32c
16 changed files with 162 additions and 74 deletions

View File

@ -17,15 +17,35 @@
*/
package org.apache.hadoop.hdds;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
/**
* Config class for HDDS.
* This class contains constants for configuration keys and default values
* used in hdds.
*/
public final class HddsConfigKeys {
/**
* Do not instantiate.
*/
private HddsConfigKeys() {
}
public static final String HDDS_HEARTBEAT_INTERVAL =
"hdds.heartbeat.interval";
public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
"30s";
public static final String HDDS_NODE_REPORT_INTERVAL =
"hdds.node.report.interval";
public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String HDDS_CONTAINER_REPORT_INTERVAL =
"hdds.container.report.interval";
public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
"hdds.command.status.report.interval";
public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT;
"60s";
}

View File

@ -156,11 +156,6 @@ public final class ScmConfigKeys {
"ozone.scm.handler.count.key";
public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
"ozone.scm.heartbeat.interval";
public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
"30s";
public static final String OZONE_SCM_DEADNODE_INTERVAL =
"ozone.scm.dead.node.interval";
public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =

View File

@ -200,11 +200,6 @@ public final class OzoneConfigKeys {
public static final int
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
public static final String OZONE_CONTAINER_REPORT_INTERVAL =
"ozone.container.report.interval";
public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
= ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT

View File

@ -153,13 +153,29 @@
<description>The timeout duration for ratis server request.</description>
</property>
<property>
<name>ozone.container.report.interval</name>
<name>hdds.node.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send node report. Each
datanode periodically send node report to SCM. Unit could be
defined with postfix (ns,ms,s,m,h,d)</description>
</property>
<property>
<name>hdds.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send container report. Each
datanode periodically send container report upon receive
sendContainerReport from SCM. Unit could be defined with
postfix (ns,ms,s,m,h,d)</description>
datanode periodically send container report to SCM. Unit could be
defined with postfix (ns,ms,s,m,h,d)</description>
</property>
<property>
<name>hdds.command.status.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send status of command
execution. Each datanode periodically the execution status of commands
received from SCM to SCM. Unit could be defined with postfix
(ns,ms,s,m,h,d)</description>
</property>
<!--Ozone Settings-->
<property>
@ -677,7 +693,7 @@
</description>
</property>
<property>
<name>ozone.scm.heartbeat.interval</name>
<name>hdds.heartbeat.interval</name>
<value>30s</value>
<tag>OZONE, MANAGEMENT</tag>
<description>

View File

@ -29,12 +29,14 @@ import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -181,9 +183,8 @@ public final class HddsServerUtil {
* @return - HB interval in seconds.
*/
public static long getScmHeartbeatInterval(Configuration conf) {
return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL,
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT,
TimeUnit.SECONDS);
return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
}
/**
@ -225,7 +226,7 @@ public final class HddsServerUtil {
sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
} catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex);
"mis-configured {}. ex: {}", HDDS_HEARTBEAT_INTERVAL, ex);
throw ex;
}
return staleNodeIntervalMs;

View File

@ -19,12 +19,20 @@ package org.apache.hadoop.ozone.container.common.report;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
import org.apache.hadoop.hdds.protocol.proto.
StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
import org.apache.hadoop.hdds.scm.HddsServerUtil;
import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT;
/**
* Publishes CommandStatusReport which will be sent to SCM as part of
* heartbeat. CommandStatusReport consist of the following information:
@ -42,9 +50,17 @@ public class CommandStatusReportPublisher extends
protected long getReportFrequency() {
if (cmdStatusReportInterval == -1) {
cmdStatusReportInterval = getConf().getTimeDuration(
HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL,
HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
HDDS_COMMAND_STATUS_REPORT_INTERVAL,
HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < cmdStatusReportInterval,
HDDS_COMMAND_STATUS_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}
return cmdStatusReportInterval;
}

View File

@ -17,13 +17,20 @@
package org.apache.hadoop.ozone.container.common.report;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.scm.HddsServerUtil;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
/**
* Publishes ContainerReport which will be sent to SCM as part of heartbeat.
@ -49,9 +56,17 @@ public class ContainerReportPublisher extends
protected long getReportFrequency() {
if (containerReportInterval == null) {
containerReportInterval = getConf().getTimeDuration(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
HDDS_CONTAINER_REPORT_INTERVAL,
HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < containerReportInterval,
HDDS_CONTAINER_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}
// Add a random delay (0~30s) on top of the container report
// interval (60s) so tha the SCM is overwhelmed by the container reports
@ -64,7 +79,7 @@ public class ContainerReportPublisher extends
}
@Override
protected ContainerReportsProto getReport() {
return ContainerReportsProto.getDefaultInstance();
protected ContainerReportsProto getReport() throws IOException {
return getContext().getParent().getContainer().getContainerReport();
}
}

View File

@ -17,8 +17,18 @@
package org.apache.hadoop.ozone.container.common.report;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.scm.HddsServerUtil;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_NODE_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_NODE_REPORT_INTERVAL_DEFAULT;
/**
* Publishes NodeReport which will be sent to SCM as part of heartbeat.
@ -28,13 +38,29 @@ import org.apache.hadoop.hdds.protocol.proto
*/
public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
private Long nodeReportInterval;
@Override
protected long getReportFrequency() {
return 90000L;
if (nodeReportInterval == null) {
nodeReportInterval = getConf().getTimeDuration(
HDDS_NODE_REPORT_INTERVAL,
HDDS_NODE_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < nodeReportInterval,
HDDS_NODE_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}
return nodeReportInterval;
}
@Override
protected NodeReportProto getReport() {
return NodeReportProto.getDefaultInstance();
protected NodeReportProto getReport() throws IOException {
return getContext().getParent().getContainer().getNodeReport();
}
}

View File

@ -23,7 +23,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine.DatanodeStates;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@ -34,6 +37,9 @@ import java.util.concurrent.TimeUnit;
public abstract class ReportPublisher<T extends GeneratedMessage>
implements Configurable, Runnable {
private static final Logger LOG = LoggerFactory.getLogger(
ReportPublisher.class);
private Configuration config;
private StateContext context;
private ScheduledExecutorService executor;
@ -76,7 +82,11 @@ public abstract class ReportPublisher<T extends GeneratedMessage>
* Generates and publishes the report to datanode state context.
*/
private void publishReport() {
context.addReport(getReport());
try {
context.addReport(getReport());
} catch (IOException e) {
LOG.error("Exception while publishing report.", e);
}
}
/**
@ -91,7 +101,7 @@ public abstract class ReportPublisher<T extends GeneratedMessage>
*
* @return datanode report
*/
protected abstract T getReport();
protected abstract T getReport() throws IOException;
/**
* Returns {@link StateContext}.

View File

@ -180,14 +180,9 @@ public class TestReportPublisher {
@Test
public void testAddingReportToHeartbeat() {
Configuration conf = new OzoneConfiguration();
ReportPublisherFactory factory = new ReportPublisherFactory(conf);
ReportPublisher nodeReportPublisher = factory.getPublisherFor(
NodeReportProto.class);
ReportPublisher containerReportPubliser = factory.getPublisherFor(
ContainerReportsProto.class);
GeneratedMessage nodeReport = nodeReportPublisher.getReport();
GeneratedMessage containerReport = containerReportPubliser.getReport();
GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
GeneratedMessage containerReport = ContainerReportsProto
.getDefaultInstance();
SCMHeartbeatRequestProto.Builder heartbeatBuilder =
SCMHeartbeatRequestProto.newBuilder();
heartbeatBuilder.setDatanodeDetails(

View File

@ -37,10 +37,10 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
/**
* A class that manages closing of containers. This allows transition from a
@ -75,8 +75,8 @@ public class ContainerCloser {
this.threadRunCount = new AtomicInteger(0);
this.isRunning = new AtomicBoolean(false);
this.reportInterval = this.configuration.getTimeDuration(
OZONE_CONTAINER_REPORT_INTERVAL,
OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
HDDS_CONTAINER_REPORT_INTERVAL,
HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
Preconditions.checkState(this.reportInterval > 0,
"report interval has to be greater than 0");
}

View File

@ -44,6 +44,8 @@ import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -52,8 +54,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
.CREATE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
.CREATED;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL;
/**
* Test class for Closing Container.
@ -72,7 +72,7 @@ public class TestContainerCloser {
configuration = SCMTestUtils.getConf();
size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB,
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL,
configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL,
1, TimeUnit.SECONDS);
testDir = GenericTestUtils
.getTestDir(TestContainerMapping.class.getSimpleName());
@ -137,7 +137,7 @@ public class TestContainerCloser {
// second report is discarded by the system if it lands in the 3 * report
// frequency window.
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS);
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(

View File

@ -58,10 +58,9 @@ import java.util.concurrent.TimeoutException;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -359,7 +358,7 @@ public class TestNodeManager {
final int interval = 100;
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@ -388,7 +387,7 @@ public class TestNodeManager {
final int interval = 100;
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@ -413,7 +412,7 @@ public class TestNodeManager {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -551,7 +550,7 @@ public class TestNodeManager {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -729,7 +728,7 @@ public class TestNodeManager {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -820,7 +819,7 @@ public class TestNodeManager {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1,
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1,
SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
MILLISECONDS);
@ -985,7 +984,7 @@ public class TestNodeManager {
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);

View File

@ -59,6 +59,7 @@ import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
@ -392,11 +393,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
private void configureSCMheartbeat() {
if (hbInterval.isPresent()) {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
hbInterval.get(), TimeUnit.MILLISECONDS);
} else {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
DEFAULT_HB_INTERVAL_MS,
TimeUnit.MILLISECONDS);
}

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.ozone;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.lang3.RandomUtils;
@ -68,6 +67,9 @@ import org.junit.rules.Timeout;
import org.mockito.Mockito;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.junit.Assert.fail;
/**
* Test class that exercises the StorageContainerManager.
*/
@ -186,9 +188,7 @@ public class TestStorageContainerManager {
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
5,
TimeUnit.SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 5, TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000,
TimeUnit.MILLISECONDS);

View File

@ -31,14 +31,13 @@ import java.util.concurrent.TimeUnit;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -61,7 +60,7 @@ public class TestQueryNode {
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
interval, TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);