HDFS-12698. Ozone: Use time units in the Ozone configuration values. Contributed by Elek, Marton.

This commit is contained in:
Anu Engineer 2017-12-18 12:39:03 -08:00 committed by Owen O'Malley
parent 8ff98e2af3
commit 2f3068bc72
31 changed files with 346 additions and 279 deletions

View File

@ -103,9 +103,9 @@ public final class CBlockConfigKeys {
/**
* Number of seconds to keep the Thread alive when it is idle.
*/
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS =
"dfs.cblock.cache.keep.alive.seconds";
public static final long DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT = 60;
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE =
"dfs.cblock.cache.keep.alive";
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT = "60s";
/**
* Priority of cache flusher thread, affecting the relative performance of
@ -123,10 +123,10 @@ public final class CBlockConfigKeys {
"dfs.cblock.cache.block.buffer.size";
public static final int DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT = 512;
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS =
"dfs.cblock.block.buffer.flush.interval.seconds";
public static final int
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT = 60;
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL =
"dfs.cblock.block.buffer.flush.interval";
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT =
"60s";
// jscsi server settings
public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY =
@ -183,9 +183,9 @@ public final class CBlockConfigKeys {
* currently the largest supported volume is about 8TB, which might take
* > 20 seconds to finish creating containers. thus set timeout to 30 sec.
*/
public static final String DFS_CBLOCK_RPC_TIMEOUT_SECONDS =
"dfs.cblock.rpc.timeout.seconds";
public static final int DFS_CBLOCK_RPC_TIMEOUT_SECONDS_DEFAULT = 300;
public static final String DFS_CBLOCK_RPC_TIMEOUT =
"dfs.cblock.rpc.timeout";
public static final String DFS_CBLOCK_RPC_TIMEOUT_DEFAULT = "300s";
private CBlockConfigKeys() {

View File

@ -141,12 +141,12 @@ public final class OzoneConfigKeys {
public static final int
OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
public static final String OZONE_CLIENT_SOCKET_TIMEOUT_MS =
"ozone.client.socket.timeout.ms";
public static final int OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT = 5000;
public static final String OZONE_CLIENT_CONNECTION_TIMEOUT_MS =
"ozone.client.connection.timeout.ms";
public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT = 5000;
public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
"ozone.client.socket.timeout";
public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
"ozone.client.connection.timeout";
public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
public static final String OZONE_REPLICATION = "ozone.replication";
public static final int OZONE_REPLICATION_DEFAULT =
@ -166,10 +166,10 @@ public final class OzoneConfigKeys {
/**
* Configuration properties for Ozone Block Deleting Service.
*/
public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS =
"ozone.block.deleting.service.interval.ms";
public static final int OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT
= 60000;
public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
"ozone.block.deleting.service.interval";
public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
= "60s";
/**
* The interval of open key clean service.
@ -190,8 +190,8 @@ public final class OzoneConfigKeys {
public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
"ozone.block.deleting.service.timeout";
public static final int OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
= 300000; // 300s for default
public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
= "300s"; // 300s for default
public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
"ozone.key.preallocation.maxsize";
@ -208,10 +208,10 @@ public final class OzoneConfigKeys {
public static final int
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
public static final String OZONE_CONTAINER_REPORT_INTERVAL_MS =
"ozone.container.report.interval.ms";
public static final int OZONE_CONTAINER_REPORT_INTERVAL_MS_DEFAULT =
60000;
public static final String OZONE_CONTAINER_REPORT_INTERVAL =
"ozone.container.report.interval";
public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
= ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;

View File

@ -69,16 +69,16 @@
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_MS;
.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS;
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS;
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
@ -87,7 +87,7 @@
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_MS;
.OZONE_SCM_STALENODE_INTERVAL;
/**
* Utility methods for Ozone and Container Clients.
@ -541,8 +541,9 @@ private static long sanitizeUserArgs(long valueTocheck, long baseValue,
* @return long in Milliseconds.
*/
public static long getScmheartbeatCheckerInterval(Configuration conf) {
return conf.getLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS,
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS_DEFAULT);
return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
}
/**
@ -553,9 +554,8 @@ public static long getScmheartbeatCheckerInterval(Configuration conf) {
* @return - HB interval in seconds.
*/
public static long getScmHeartbeatInterval(Configuration conf) {
return conf.getTimeDuration(
OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS,
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT,
return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL,
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT,
TimeUnit.SECONDS);
}
@ -568,8 +568,9 @@ public static long getScmHeartbeatInterval(Configuration conf) {
*/
public static long getStaleNodeInterval(Configuration conf) {
long staleNodeIntevalMs = conf.getLong(OZONE_SCM_STALENODE_INTERVAL_MS,
OZONE_SCM_STALENODE_INTERVAL_DEFAULT);
long staleNodeIntervalMs =
conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
@ -582,25 +583,25 @@ public static long getStaleNodeInterval(Configuration conf) {
// Here we check that staleNodeInterval is at least five times more than the
// frequency at which the accounting thread is going to run.
try {
sanitizeUserArgs(staleNodeIntevalMs, heartbeatThreadFrequencyMs, 5, 1000);
sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
5, 1000);
} catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " +
LOG.error("Stale Node Interval is cannot be honored due to " +
"mis-configured {}. ex: {}",
OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, ex);
OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
throw ex;
}
// Make sure that stale node value is greater than configured value that
// datanodes are going to send HBs.
try {
sanitizeUserArgs(staleNodeIntevalMs, heartbeatIntervalMs, 3, 1000);
sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
} catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}",
OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, ex);
"mis-configured {}. ex: {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex);
throw ex;
}
return staleNodeIntevalMs;
return staleNodeIntervalMs;
}
/**
@ -614,8 +615,9 @@ public static long getStaleNodeInterval(Configuration conf) {
*/
public static long getDeadNodeInterval(Configuration conf) {
long staleNodeIntervalMs = getStaleNodeInterval(conf);
long deadNodeIntervalMs = conf.getLong(
OZONE_SCM_DEADNODE_INTERVAL_MS, OZONE_SCM_DEADNODE_INTERVAL_DEFAULT);
long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
try {
// Make sure that dead nodes Ms is at least twice the time for staleNodes
@ -623,8 +625,7 @@ public static long getDeadNodeInterval(Configuration conf) {
sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
} catch (IllegalArgumentException ex) {
LOG.error("Dead Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}",
OZONE_SCM_STALENODE_INTERVAL_MS, ex);
"mis-configured {}. ex: {}", OZONE_SCM_STALENODE_INTERVAL, ex);
throw ex;
}
return deadNodeIntervalMs;
@ -737,24 +738,26 @@ public static CloseableHttpClient newHttpClient() {
* @return a {@link CloseableHttpClient} instance.
*/
public static CloseableHttpClient newHttpClient(Configuration conf) {
int socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT;
int connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT;
long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) {
socketTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT);
connectionTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT);
socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(socketTimeout)
.setConnectTimeout(connectionTimeout)
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
return client;

View File

@ -76,6 +76,7 @@
import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
@ -114,12 +115,14 @@ public RestClient(Configuration conf)
.setHost(getOzoneRestHandlerHost())
.setPort(port);
this.ozoneRestUri = uriBuilder.build();
int socketTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT);
int connectionTimeout = conf.getInt(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT);
long socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
long connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int maxConnection = conf.getInt(
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX,
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT);
@ -142,8 +145,8 @@ public RestClient(Configuration conf)
.setConnectionManager(connManager)
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(socketTimeout)
.setConnectTimeout(connectionTimeout)
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
this.ugi = UserGroupInformation.getCurrentUser();

View File

@ -29,8 +29,8 @@ public final class ScmConfigKeys {
public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
"scm.container.client.idle.threshold";
public static final int SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
10000;
public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
"10s";
public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
"scm.container.client.max.size";
@ -114,29 +114,29 @@ public final class ScmConfigKeys {
"ozone.scm.handler.count.key";
public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
public static final String OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS =
"ozone.scm.heartbeat.interval.seconds";
public static final int OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT =
30;
public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
"ozone.scm.heartbeat.interval";
public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
"30s";
public static final String OZONE_SCM_DEADNODE_INTERVAL_MS =
"ozone.scm.dead.node.interval.ms";
public static final long OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT * 1000L * 20L;
public static final String OZONE_SCM_DEADNODE_INTERVAL =
"ozone.scm.dead.node.interval";
public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
"10m";
public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS =
"ozone.scm.max.hb.count.to.process";
public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000;
public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS =
"ozone.scm.heartbeat.thread.interval.ms";
public static final long OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS_DEFAULT =
3000;
public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
"ozone.scm.heartbeat.thread.interval";
public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
"3s";
public static final String OZONE_SCM_STALENODE_INTERVAL_MS =
"ozone.scm.stale.node.interval.ms";
public static final long OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT * 1000L * 3L;
public static final String OZONE_SCM_STALENODE_INTERVAL =
"ozone.scm.stale.node.interval";
public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
"90s";
public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
"ozone.scm.heartbeat.rpc-timeout";
@ -199,18 +199,17 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
"ozone.scm.container.creation.lease.timeout";
public static final long OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT =
60000;
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
/**
* Don't start processing a pool if we have not had a minimum number of
* seconds from the last processing.
*/
public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
"ozone.scm.container.report.processing.interval";
public static final String
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS =
"ozone.scm.container.report.processing.interval.seconds";
public static final int
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = 60;
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
/**
* These 2 settings control the number of threads in executor pool and time
@ -219,10 +218,10 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
"ozone.scm.max.container.report.threads";
public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS =
"ozone.scm.container.reports.wait.timeout.seconds";
public static final int OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
300; // Default 5 minute wait.
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
"ozone.scm.container.reports.wait.timeout";
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
"5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";

View File

@ -47,9 +47,10 @@ public CBlockVolumeClient(OzoneConfiguration conf,
InetSocketAddress address = serverAddress != null ? serverAddress :
OzoneClientUtils.getCblockServiceRpcAddr(conf);
long version = RPC.getProtocolVersion(CBlockServiceProtocolPB.class);
int rpcTimeout =
conf.getInt(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_SECONDS,
CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_SECONDS_DEFAULT) * 1000;
int rpcTimeout = Math.toIntExact(
conf.getTimeDuration(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT,
CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS));
cblockClient = new CBlockServiceProtocolClientSideTranslatorPB(
RPC.getProtocolProxy(CBlockServiceProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf,

View File

@ -69,9 +69,9 @@
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS;
.DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT;
.DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_MAX_POOL_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
@ -123,8 +123,8 @@ public ContainerCacheFlusher(Configuration config,
DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT);
int maxPoolSize = config.getInt(DFS_CBLOCK_CACHE_MAX_POOL_SIZE,
DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT);
long keepAlive = config.getLong(DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS,
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT);
long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT, TimeUnit.SECONDS);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);
int blockBufferSize = config.getInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE,

View File

@ -31,17 +31,17 @@
import java.util.concurrent.ThreadFactory;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS;
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT;
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS;
DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT;
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_THREAD_PRIORITY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
@ -76,7 +76,7 @@ private enum FlushReason {
private final CBlockLocalCache parentCache;
private final ScheduledThreadPoolExecutor scheduledExecutor;
private final ThreadPoolExecutor threadPoolExecutor;
private final int intervalSeconds;
private final long intervalSeconds;
private final ArrayBlockingQueue<ByteBuffer> acquireQueue;
private final ArrayBlockingQueue<Runnable> workQueue;
private ByteBuffer currentBuffer;
@ -86,11 +86,13 @@ private enum FlushReason {
this.scheduledExecutor = new ScheduledThreadPoolExecutor(1);
this.intervalSeconds =
config.getInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS,
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT);
config.getTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT,
TimeUnit.SECONDS);
long keepAlive = config.getLong(DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS,
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT);
long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT,
TimeUnit.SECONDS);
this.workQueue = new ArrayBlockingQueue<>(2, true);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
import org.apache.hadoop.util.Time;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
@ -46,9 +47,10 @@ public ContainerReportManagerImpl(Configuration config) {
this.config = config;
this.lastContainerReportTime = -1;
this.reportCount = new AtomicLong(0L);
this.containerReportInterval = config.getLong(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_MS,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_MS_DEFAULT);
this.containerReportInterval = config.getTimeDuration(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
}
public ReportState getContainerReportState() {

View File

@ -80,7 +80,7 @@ public class BlockDeletingService extends BackgroundService{
private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
public BlockDeletingService(ContainerManager containerManager,
int serviceInterval, long serviceTimeout, Configuration conf) {
long serviceInterval, long serviceTimeout, Configuration conf) {
super("BlockDeletingService", serviceInterval,
TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
serviceTimeout);

View File

@ -52,9 +52,9 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS;
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT;
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
@ -104,9 +104,9 @@ public OzoneContainer(DatanodeID datanodeID, Configuration ozoneConfig) throws
this.keyManager = new KeyManagerImpl(manager, ozoneConfig);
manager.setKeyManager(this.keyManager);
int svcInterval = ozoneConfig.getInt(
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT);
long svcInterval =
ozoneConfig.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
long serviceTimeout = ozoneConfig.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);

View File

@ -57,7 +57,7 @@ public class KeyDeletingService extends BackgroundService {
private final int keyLimitPerTask;
public KeyDeletingService(ScmBlockLocationProtocol scmClient,
KeyManager manager, int serviceInterval,
KeyManager manager, long serviceInterval,
long serviceTimeout, Configuration conf) {
super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);

View File

@ -51,9 +51,9 @@
import static org.apache.hadoop.ozone
.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS;
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT;
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone
@ -104,12 +104,14 @@ public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
int blockDeleteInterval = conf.getInt(
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT);
long blockDeleteInterval = conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long serviceTimeout = conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
this.preallocateMax = conf.getLong(
OZONE_KEY_PREALLOCATION_MAXSIZE,
OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);

View File

@ -54,9 +54,9 @@
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS;
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT;
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -135,10 +135,10 @@ public BlockManagerImpl(final Configuration conf,
// SCM block deleting transaction log and deleting service.
deletedBlockLog = new DeletedBlockLogImpl(conf);
int svcInterval =
conf.getInt(
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT);
long svcInterval =
conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long serviceTimeout =
conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,

View File

@ -74,7 +74,7 @@ public class SCMBlockDeletingService extends BackgroundService {
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
Mapping mapper, NodeManager nodeManager,
int interval, long serviceTimeout, Configuration conf) {
long interval, long serviceTimeout, Configuration conf) {
super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog;

View File

@ -50,6 +50,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@ -117,9 +118,10 @@ public ContainerMapping(
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
LOG.trace("Container State Manager created.");
long containerCreationLeaseTimeout = conf.getLong(
long containerCreationLeaseTimeout = conf.getTimeDuration(
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT);
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
LOG.trace("Starting Container Lease Manager.");
containerLeaseManager = new LeaseManager<>(containerCreationLeaseTimeout);
containerLeaseManager.start();

View File

@ -49,11 +49,11 @@
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS;
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS;
.OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
import static org.apache.hadoop.scm.ScmConfigKeys
@ -72,10 +72,10 @@ public class ContainerReplicationManager implements Closeable {
private final HashSet<String> poolNames;
private final PriorityQueue<PeriodicPool> poolQueue;
private final NodeManager nodeManager;
private final int containerProcessingLag;
private final long containerProcessingLag;
private final AtomicBoolean runnable;
private final ExecutorService executorService;
private final int maxPoolWait;
private final long maxPoolWait;
private long poolProcessCount;
private final List<InProgressPool> inProgressPoolList;
private final AtomicInteger threadFaultCount;
@ -104,17 +104,18 @@ public ContainerReplicationManager(OzoneConfiguration conf,
Preconditions.checkNotNull(commandQueue);
Preconditions.checkNotNull(nodeManager);
this.containerProcessingLag =
conf.getInt(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS,
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT
conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
TimeUnit.SECONDS
) * 1000;
int maxContainerReportThreads =
conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
);
this.maxPoolWait =
conf.getInt(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS,
OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT) * 1000;
conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
this.poolManager = poolManager;
this.commandQueue = commandQueue;
this.nodeManager = nodeManager;

View File

@ -63,18 +63,17 @@ public final class InProgressPool {
private AtomicInteger nodeCount;
private AtomicInteger nodeProcessed;
private AtomicInteger containerProcessedCount;
private int maxWaitTime;
private long maxWaitTime;
/**
* Constructs an pool that is being processed.
*
* @param maxWaitTime - Maximum wait time in milliseconds.
* @param maxWaitTime - Maximum wait time in milliseconds.
* @param pool - Pool that we are working against
* @param nodeManager - Nodemanager
* @param poolManager - pool manager
* @param commandQueue - Command queue
* @param executorService - Shared Executor service.
*/
InProgressPool(int maxWaitTime, PeriodicPool pool,
InProgressPool(long maxWaitTime, PeriodicPool pool,
NodeManager nodeManager, NodePoolManager poolManager,
CommandQueue commandQueue, ExecutorService executorService) {
Preconditions.checkNotNull(pool);

View File

@ -52,11 +52,11 @@ public abstract class BackgroundService {
private final ThreadGroup threadGroup;
private final ThreadFactory threadFactory;
private final String serviceName;
private final int interval;
private final long interval;
private final long serviceTimeout;
private final TimeUnit unit;
public BackgroundService(String serviceName, int interval,
public BackgroundService(String serviceName, long interval,
TimeUnit unit, int threadPoolSize, long serviceTimeout) {
this.interval = interval;
this.unit = unit;

View File

@ -27,8 +27,8 @@
<configuration>
<!--CBlock Settings-->
<property>
<name>dfs.cblock.block.buffer.flush.interval.seconds</name>
<value>60</value>
<name>dfs.cblock.block.buffer.flush.interval</name>
<value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Controls the frequency at this the local cache flushes the
@ -63,12 +63,12 @@
</description>
</property>
<property>
<name>dfs.cblock.cache.keep.alive.seconds</name>
<value>60</value>
<name>dfs.cblock.cache.keep.alive</name>
<value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
If the cblock cache has no I/O, then the threads in the cache
pool are kept idle for this many seconds before shutting down.
pool are kept idle for this amount of time before shutting down.
</description>
</property>
<property>
@ -191,11 +191,11 @@
</description>
</property>
<property>
<name>dfs.cblock.rpc.timeout.seconds</name>
<value>300</value>
<name>dfs.cblock.rpc.timeout</name>
<value>300s</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
RPC timeout in seconds used for cblock CLI operations. When you
RPC timeout used for cblock CLI operations. When you
create very large disks, like 5TB, etc. The number of containers
allocated in the system is huge. It is will 5TB/5GB, which is 1000
containers. The client CLI might timeout even though the cblock manager
@ -347,12 +347,13 @@
</description>
</property>
<property>
<name>ozone.container.report.interval.ms</name>
<value>60000</value>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval in milliseconds of the datanode to send container
report. Each datanode periodically send container report upon receive
sendContainerReport from SCM.</description>
<description>Time interval of the datanode to send container report. Each
datanode periodically send container report upon receive
sendContainerReport from SCM. Unit could be defined with
postfix (ns,ms,s,m,h,d)</description>
</property>
<!--Ozone Settings-->
<property>
@ -388,12 +389,13 @@
</description>
</property>
<property>
<name>ozone.block.deleting.service.interval.ms</name>
<value>60000</value>
<name>ozone.block.deleting.service.interval</name>
<value>1m</value>
<tag>OZONE, PERFORMANCE, SCM</tag>
<description>Time interval in milliseconds of the block deleting service.
<description>Time interval of the block deleting service.
The block deleting service runs on each datanode periodically and
deletes blocks queued for deletion.
deletes blocks queued for deletion. Unit could be defined with
postfix (ns,ms,s,m,h,d)
</description>
</property>
<property>
@ -411,8 +413,8 @@
</description>
</property>
<property>
<name>ozone.client.connection.timeout.ms</name>
<value>5000</value>
<name>ozone.client.connection.timeout</name>
<value>5000ms</value>
<tag>OZONE, PERFORMANCE, CLIENT</tag>
<description>Connection timeout for Ozone client in milliseconds.
</description>
@ -431,10 +433,11 @@
</description>
</property>
<property>
<name>ozone.client.socket.timeout.ms</name>
<value>5000</value>
<name>ozone.client.socket.timeout</name>
<value>5000ms</value>
<tag>OZONE, CLIENT</tag>
<description>Socket timeout for Ozone client in milliseconds.</description>
<description>Socket timeout for Ozone client. Unit could be defined with
postfix (ns,ms,s,m,h,d)</description>
</property>
<property>
<name>ozone.enabled</name>
@ -748,7 +751,7 @@
<description>
The policy used for choosing desire containers for block deletion.
Datanode selects some containers to process block deletion
in a certain interval defined by ozone.block.deleting.service.interval.ms.
in a certain interval defined by ozone.block.deleting.service.interval.
The number of containers to process in each interval is defined
by ozone.block.deleting.container.limit.per.interval. This property is
used to configure the policy applied while selecting containers.
@ -783,17 +786,17 @@
</description>
</property>
<property>
<name>ozone.scm.container.report.processing.interval.seconds</name>
<value>60</value>
<name>ozone.scm.container.report.processing.interval</name>
<value>60s</value>
<tag>OZONE, PERFORMANCE</tag>
<description>Time interval in seconds for scm to process container reports
<description>Time interval for scm to process container reports
for a node pool. Scm handles node pool reports in a cyclic clock
manner, it fetches pools periodically with this time interval.
</description>
</property>
<property>
<name>ozone.scm.container.reports.wait.timeout.seconds</name>
<value>300</value>
<name>ozone.scm.container.reports.wait.timeout</name>
<value>300s</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
<description>Maximum time to wait in seconds for processing all container
reports from
@ -871,12 +874,11 @@
</description>
</property>
<property>
<name>ozone.scm.dead.node.interval.ms</name>
<value>600000</value>
<name>ozone.scm.dead.node.interval</name>
<value>10m</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The interval between heartbeats before a node is tagged as dead. This
value is in milliseconds.
The interval between heartbeats before a node is tagged as dead.
</description>
</property>
<property>
@ -896,11 +898,11 @@
</description>
</property>
<property>
<name>ozone.scm.heartbeat.interval.seconds</name>
<value>30</value>
<name>ozone.scm.heartbeat.interval</name>
<value>30s</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The heartbeat interval from a data node to SCM in seconds. Yes,
The heartbeat interval from a data node to SCM. Yes,
it is not three but 30, since most data nodes will heart beating via Ratis
heartbeats. If a client is not able to talk to a data node, it will notify
KSM/SCM eventually. So a 30 second HB seems to work. This assumes that
@ -929,8 +931,8 @@
</description>
</property>
<property>
<name>ozone.scm.heartbeat.thread.interval.ms</name>
<value>3000</value>
<name>ozone.scm.heartbeat.thread.interval</name>
<value>3s</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
When a heartbeat from the data node arrives on SCM, It is queued for
@ -1022,7 +1024,7 @@
<description>
The maximum number of heartbeat to process per loop of the
heartbeat process thread. Please see
ozone.scm.heartbeat.thread.interval.ms
ozone.scm.heartbeat.thread.interval
for more info.
</description>
</property>
@ -1039,12 +1041,12 @@
</description>
</property>
<property>
<name>ozone.scm.stale.node.interval.ms</name>
<value>90000</value>
<name>ozone.scm.stale.node.interval</name>
<value>90s</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The interval in milliseconds for stale node flagging. Please
see ozone.scm.heartbeat.thread.interval.ms before changing this value.
The interval for stale node flagging. Please
see ozone.scm.heartbeat.thread.interval before changing this value.
</description>
</property>
<property>
@ -1074,7 +1076,7 @@
<!--Client Settings-->
<property>
<name>scm.container.client.idle.threshold</name>
<value>10000</value>
<value>10s</value>
<tag>OZONE, PERFORMANCE</tag>
<description>
In the standalone pipelines, the SCM clients use netty to
@ -1096,7 +1098,7 @@
<property>
<name>ozone.scm.container.creation.lease.timeout</name>
<value>60000</value>
<value>60s</value>
<tag>OZONE, SCM</tag>
<description>
Container creation timeout in milliseconds to be used by SCM. When

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.cblock;
import com.google.common.primitives.Longs;
import static java.util.concurrent.TimeUnit.SECONDS;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
@ -50,7 +51,7 @@
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS;
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
/**
* Tests for Local Cache Buffer Manager.
@ -218,7 +219,7 @@ public void testPeriodicFlush() throws IOException,
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 5);
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -312,7 +313,7 @@ public void testMultipleBuffersFlush() throws IOException,
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 120);
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 120, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -361,7 +362,8 @@ public void testSingleBlockFlush() throws IOException,
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 5);
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);

View File

@ -45,6 +45,7 @@
import java.nio.charset.StandardCharsets;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.cblock.CBlockConfigKeys
@ -54,7 +55,7 @@
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS;
.DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
@ -198,7 +199,8 @@ public void testContainerWrites() throws IOException,
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 3);
flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 3,
TimeUnit.SECONDS);
XceiverClientManager xcm = new XceiverClientManager(flushTestConfig);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -287,7 +289,9 @@ public void testRetryLog() throws IOException,
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 3);
flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
3,
TimeUnit.SECONDS);
int numblocks = 10;
flushTestConfig.setInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE, numblocks);

View File

@ -55,6 +55,7 @@
import java.nio.file.Paths;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -553,20 +554,25 @@ private void configureTrace() {
private void configureSCMheartbeat() {
if (hbSeconds.isPresent()) {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS,
hbSeconds.get());
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
hbSeconds.get(), TimeUnit.SECONDS);
} else {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS,
DEFAULT_HB_SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
DEFAULT_HB_SECONDS,
TimeUnit.SECONDS);
}
if (hbProcessorInterval.isPresent()) {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS,
hbProcessorInterval.get());
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
hbProcessorInterval.get(),
TimeUnit.MILLISECONDS);
} else {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS,
DEFAULT_PROCESSOR_MS);
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
DEFAULT_PROCESSOR_MS,
TimeUnit.MILLISECONDS);
}
}

View File

@ -53,6 +53,7 @@
import java.util.Map;
import java.util.Collections;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@ -185,10 +186,15 @@ private void verifyPermissionDeniedException(Exception e, String userName) {
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 5);
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 3000);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
5,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000,
TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1, TimeUnit.SECONDS);
// Reset container provision size, otherwise only one container
// is created by default.
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
@ -262,10 +268,13 @@ public void testBlockDeletionTransactions() throws Exception {
public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 5);
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 3000);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, 5,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys);

View File

@ -55,10 +55,11 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS;
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -220,7 +221,8 @@ public void testBlockDeletion() throws Exception {
@Test
public void testShutdownService() throws Exception {
Configuration conf = new OzoneConfiguration();
conf.setInt(OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 500);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
TimeUnit.MILLISECONDS);
conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10);
ContainerManager containerManager = createContainerManager(conf);

View File

@ -75,7 +75,8 @@ public class TestDatanodeStateMachine {
@Before
public void setUp() throws Exception {
conf = SCMTestUtils.getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
TimeUnit.MILLISECONDS);
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
serverAddresses = new LinkedList<>();
scmServers = new LinkedList<>();

View File

@ -52,7 +52,7 @@
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.HEALTHY;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS;
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
import static org.apache.ratis.shaded.com.google.common.util.concurrent
.Uninterruptibles.sleepUninterruptibly;
@ -108,7 +108,8 @@ public void setUp() throws Exception {
}
}
OzoneConfiguration config = SCMTestUtils.getOzoneConf();
config.setInt(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS, 1);
config.setTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, 1,
TimeUnit.SECONDS);
replicationManager = new ContainerReplicationManager(config,
nodeManager, poolManager, commandQueue);
datanodeStateManager = new ReplicationDatanodeStateManager(nodeManager,

View File

@ -46,6 +46,7 @@
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
@ -69,8 +70,10 @@ public static void setUp() throws Exception {
.getTestDir(TestContainerMapping.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setLong(ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
TIMEOUT);
conf.setTimeDuration(
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
TIMEOUT,
TimeUnit.MILLISECONDS);
boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) {
throw new IOException("Unable to create test directory path");

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.scm.node;
import com.google.common.base.Supplier;
import static java.util.concurrent.TimeUnit.*;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -49,6 +50,7 @@
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static java.util.concurrent.TimeUnit.SECONDS;
@ -60,11 +62,12 @@
import static org.apache.hadoop.ozone.protocol.proto
.StorageContainerDatanodeProtocolProtos.Type;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL_MS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL_MS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.StringStartsWith.startsWith;
@ -110,7 +113,8 @@ OzoneConfiguration getConf() {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
TimeUnit.MILLISECONDS);
return conf;
}
@ -243,7 +247,8 @@ public void testScmSameNodeHeartbeats() throws IOException,
public void testScmShutdown() throws IOException, InterruptedException,
TimeoutException {
OzoneConfiguration conf = getConf();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
SCMNodeManager nodeManager = createNodeManager(conf);
DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager);
nodeManager.close();
@ -267,7 +272,8 @@ public void testScmShutdown() throws IOException, InterruptedException,
@Test
public void testScmHeartbeatAfterRestart() throws Exception {
OzoneConfiguration conf = getConf();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
DatanodeID datanodeID = SCMTestUtils.getDatanodeID();
try (SCMNodeManager nodemanager = createNodeManager(conf)) {
nodemanager.register(datanodeID);
@ -344,12 +350,13 @@ public void testScmSanityOfUserConfig1() throws IOException,
InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf();
final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval);
conf.setInt(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, interval);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, interval, MILLISECONDS);
thrown.expect(IllegalArgumentException.class);
@ -372,12 +379,13 @@ public void testScmSanityOfUserConfig2() throws IOException,
InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf();
final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval);
conf.setInt(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS);
createNodeManager(conf).close();
}
@ -396,10 +404,11 @@ public void testScmDetectStaleAndDeadNode() throws IOException,
final int nodeCount = 10;
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
@ -505,25 +514,25 @@ public void testScmClusterIsInExpectedState1() throws IOException,
* These values are very important. Here is what it means so you don't
* have to look it up while reading this code.
*
* OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS - This the frequency of the
* OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
* HB processing thread that is running in the SCM. This thread must run
* for the SCM to process the Heartbeats.
*
* OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS - This is the frequency at which
* OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
* datanodes will send heartbeats to SCM. Please note: This is the only
* config value for node manager that is specified in seconds. We don't
* want SCM heartbeat resolution to be more than in seconds.
* In this test it is not used, but we are forced to set it because we
* have validation code that checks Stale Node interval and Dead Node
* interval is larger than the value of
* OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS.
* OZONE_SCM_HEARTBEAT_INTERVAL.
*
* OZONE_SCM_STALENODE_INTERVAL_MS - This is the time that must elapse
* OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
* from the last heartbeat for us to mark a node as stale. In this test
* we set that to 3. That is if a node has not heartbeat SCM for last 3
* seconds we will mark it as stale.
*
* OZONE_SCM_DEADNODE_INTERVAL_MS - This is the time that must elapse
* OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
* from the last heartbeat for a node to be marked dead. We have an
* additional constraint that this must be at least 2 times bigger than
* Stale node Interval.
@ -535,10 +544,11 @@ public void testScmClusterIsInExpectedState1() throws IOException,
*/
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
/**
@ -712,10 +722,11 @@ public void testScmClusterIsInExpectedState2() throws IOException,
final int deadCount = 10;
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 7000);
@ -801,10 +812,14 @@ public void testScmCanHandleScale() throws IOException,
final int healthyCount = 3000;
final int staleCount = 3000;
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1,
SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000,
MILLISECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
List<DatanodeID> healthyList = createNodeSet(nodeManager,
@ -862,8 +877,9 @@ public void testScmLogsHeartbeatFlooding() throws IOException,
// Make the HB process thread run slower.
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 500);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 500);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
@ -897,7 +913,8 @@ public void testScmLogsHeartbeatFlooding() throws IOException,
public void testScmEnterAndExitChillMode() throws IOException,
InterruptedException {
OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
nodeManager.setMinimumChillModeNodes(10);
@ -956,7 +973,8 @@ public void testScmEnterAndExitChillMode() throws IOException,
public void testScmStatsFromNodeReport() throws IOException,
InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf();
conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
MILLISECONDS);
final int nodeCount = 10;
final long capacity = 2000;
final long used = 100;
@ -1001,10 +1019,11 @@ public void testScmNodeReportUpdate() throws IOException,
final int nodeCount = 1;
final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager);

View File

@ -28,6 +28,7 @@
import org.junit.Test;
import java.util.EnumSet;
import java.util.concurrent.TimeUnit;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.DEAD;
@ -36,13 +37,13 @@
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState
.STALE;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_MS;
.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS;
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS;
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_MS;
.OZONE_SCM_STALENODE_INTERVAL;
import static org.junit.Assert.assertEquals;
/**
@ -59,10 +60,11 @@ public void setUp() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
interval, TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
cluster = new MiniOzoneClassicCluster.Builder(conf)
.numDataNodes(numOfDatanodes)

View File

@ -68,6 +68,7 @@
import java.util.Map;
import java.util.Set;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ -98,7 +99,8 @@ public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
// Set short block deleting service interval to speed up deletions.
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);