HDFS-12698. Ozone: Use time units in the Ozone configuration values. Contributed by Elek, Marton.

This commit is contained in:
Anu Engineer 2017-12-18 12:39:03 -08:00 committed by Owen O'Malley
parent 8ff98e2af3
commit 2f3068bc72
31 changed files with 346 additions and 279 deletions

View File

@ -103,9 +103,9 @@ public final class CBlockConfigKeys {
/** /**
* Number of seconds to keep the Thread alive when it is idle. * Number of seconds to keep the Thread alive when it is idle.
*/ */
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS = public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE =
"dfs.cblock.cache.keep.alive.seconds"; "dfs.cblock.cache.keep.alive";
public static final long DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT = 60; public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT = "60s";
/** /**
* Priority of cache flusher thread, affecting the relative performance of * Priority of cache flusher thread, affecting the relative performance of
@ -123,10 +123,10 @@ public final class CBlockConfigKeys {
"dfs.cblock.cache.block.buffer.size"; "dfs.cblock.cache.block.buffer.size";
public static final int DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT = 512; public static final int DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT = 512;
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS = public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL =
"dfs.cblock.block.buffer.flush.interval.seconds"; "dfs.cblock.block.buffer.flush.interval";
public static final int public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT =
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT = 60; "60s";
// jscsi server settings // jscsi server settings
public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY = public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY =
@ -183,9 +183,9 @@ public final class CBlockConfigKeys {
* currently the largest supported volume is about 8TB, which might take * currently the largest supported volume is about 8TB, which might take
* > 20 seconds to finish creating containers. thus set timeout to 30 sec. * > 20 seconds to finish creating containers. thus set timeout to 30 sec.
*/ */
public static final String DFS_CBLOCK_RPC_TIMEOUT_SECONDS = public static final String DFS_CBLOCK_RPC_TIMEOUT =
"dfs.cblock.rpc.timeout.seconds"; "dfs.cblock.rpc.timeout";
public static final int DFS_CBLOCK_RPC_TIMEOUT_SECONDS_DEFAULT = 300; public static final String DFS_CBLOCK_RPC_TIMEOUT_DEFAULT = "300s";
private CBlockConfigKeys() { private CBlockConfigKeys() {

View File

@ -141,12 +141,12 @@ public final class OzoneConfigKeys {
public static final int public static final int
OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20; OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
public static final String OZONE_CLIENT_SOCKET_TIMEOUT_MS = public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
"ozone.client.socket.timeout.ms"; "ozone.client.socket.timeout";
public static final int OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT = 5000; public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
public static final String OZONE_CLIENT_CONNECTION_TIMEOUT_MS = public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
"ozone.client.connection.timeout.ms"; "ozone.client.connection.timeout";
public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT = 5000; public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
public static final String OZONE_REPLICATION = "ozone.replication"; public static final String OZONE_REPLICATION = "ozone.replication";
public static final int OZONE_REPLICATION_DEFAULT = public static final int OZONE_REPLICATION_DEFAULT =
@ -166,10 +166,10 @@ public final class OzoneConfigKeys {
/** /**
* Configuration properties for Ozone Block Deleting Service. * Configuration properties for Ozone Block Deleting Service.
*/ */
public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS = public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
"ozone.block.deleting.service.interval.ms"; "ozone.block.deleting.service.interval";
public static final int OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
= 60000; = "60s";
/** /**
* The interval of open key clean service. * The interval of open key clean service.
@ -190,8 +190,8 @@ public final class OzoneConfigKeys {
public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT = public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
"ozone.block.deleting.service.timeout"; "ozone.block.deleting.service.timeout";
public static final int OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
= 300000; // 300s for default = "300s"; // 300s for default
public static final String OZONE_KEY_PREALLOCATION_MAXSIZE = public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
"ozone.key.preallocation.maxsize"; "ozone.key.preallocation.maxsize";
@ -208,10 +208,10 @@ public final class OzoneConfigKeys {
public static final int public static final int
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
public static final String OZONE_CONTAINER_REPORT_INTERVAL_MS = public static final String OZONE_CONTAINER_REPORT_INTERVAL =
"ozone.container.report.interval.ms"; "ozone.container.report.interval";
public static final int OZONE_CONTAINER_REPORT_INTERVAL_MS_DEFAULT = public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
60000; "60s";
public static final String DFS_CONTAINER_RATIS_ENABLED_KEY public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
= ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;

View File

@ -69,16 +69,16 @@ import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_DEFAULT; .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_MS; .OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS; .OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT; .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT; .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS; .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
@ -87,7 +87,7 @@ import static org.apache.hadoop.scm.ScmConfigKeys
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_DEFAULT; .OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_MS; .OZONE_SCM_STALENODE_INTERVAL;
/** /**
* Utility methods for Ozone and Container Clients. * Utility methods for Ozone and Container Clients.
@ -541,8 +541,9 @@ public final class OzoneClientUtils {
* @return long in Milliseconds. * @return long in Milliseconds.
*/ */
public static long getScmheartbeatCheckerInterval(Configuration conf) { public static long getScmheartbeatCheckerInterval(Configuration conf) {
return conf.getLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS_DEFAULT); ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
} }
/** /**
@ -553,9 +554,8 @@ public final class OzoneClientUtils {
* @return - HB interval in seconds. * @return - HB interval in seconds.
*/ */
public static long getScmHeartbeatInterval(Configuration conf) { public static long getScmHeartbeatInterval(Configuration conf) {
return conf.getTimeDuration( return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL,
OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT,
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT,
TimeUnit.SECONDS); TimeUnit.SECONDS);
} }
@ -568,8 +568,9 @@ public final class OzoneClientUtils {
*/ */
public static long getStaleNodeInterval(Configuration conf) { public static long getStaleNodeInterval(Configuration conf) {
long staleNodeIntevalMs = conf.getLong(OZONE_SCM_STALENODE_INTERVAL_MS, long staleNodeIntervalMs =
OZONE_SCM_STALENODE_INTERVAL_DEFAULT); conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf); long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
@ -582,25 +583,25 @@ public final class OzoneClientUtils {
// Here we check that staleNodeInterval is at least five times more than the // Here we check that staleNodeInterval is at least five times more than the
// frequency at which the accounting thread is going to run. // frequency at which the accounting thread is going to run.
try { try {
sanitizeUserArgs(staleNodeIntevalMs, heartbeatThreadFrequencyMs, 5, 1000); sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
5, 1000);
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " + LOG.error("Stale Node Interval is cannot be honored due to " +
"mis-configured {}. ex: {}", "mis-configured {}. ex: {}",
OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, ex); OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
throw ex; throw ex;
} }
// Make sure that stale node value is greater than configured value that // Make sure that stale node value is greater than configured value that
// datanodes are going to send HBs. // datanodes are going to send HBs.
try { try {
sanitizeUserArgs(staleNodeIntevalMs, heartbeatIntervalMs, 3, 1000); sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " + LOG.error("Stale Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}", "mis-configured {}. ex: {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex);
OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, ex);
throw ex; throw ex;
} }
return staleNodeIntevalMs; return staleNodeIntervalMs;
} }
/** /**
@ -614,8 +615,9 @@ public final class OzoneClientUtils {
*/ */
public static long getDeadNodeInterval(Configuration conf) { public static long getDeadNodeInterval(Configuration conf) {
long staleNodeIntervalMs = getStaleNodeInterval(conf); long staleNodeIntervalMs = getStaleNodeInterval(conf);
long deadNodeIntervalMs = conf.getLong( long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
OZONE_SCM_DEADNODE_INTERVAL_MS, OZONE_SCM_DEADNODE_INTERVAL_DEFAULT); OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
try { try {
// Make sure that dead nodes Ms is at least twice the time for staleNodes // Make sure that dead nodes Ms is at least twice the time for staleNodes
@ -623,8 +625,7 @@ public final class OzoneClientUtils {
sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000); sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
LOG.error("Dead Node Interval MS is cannot be honored due to " + LOG.error("Dead Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}", "mis-configured {}. ex: {}", OZONE_SCM_STALENODE_INTERVAL, ex);
OZONE_SCM_STALENODE_INTERVAL_MS, ex);
throw ex; throw ex;
} }
return deadNodeIntervalMs; return deadNodeIntervalMs;
@ -737,24 +738,26 @@ public final class OzoneClientUtils {
* @return a {@link CloseableHttpClient} instance. * @return a {@link CloseableHttpClient} instance.
*/ */
public static CloseableHttpClient newHttpClient(Configuration conf) { public static CloseableHttpClient newHttpClient(Configuration conf) {
int socketTimeout = OzoneConfigKeys long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT; .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
int connectionTimeout = OzoneConfigKeys long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT; .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) { if (conf != null) {
socketTimeout = conf.getInt( socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS, OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT); OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
connectionTimeout = conf.getInt( TimeUnit.MILLISECONDS);
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS, connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT); OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
} }
CloseableHttpClient client = HttpClients.custom() CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig( .setDefaultRequestConfig(
RequestConfig.custom() RequestConfig.custom()
.setSocketTimeout(socketTimeout) .setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(connectionTimeout) .setConnectTimeout(Math.toIntExact(connectionTimeout))
.build()) .build())
.build(); .build();
return client; return client;

View File

@ -76,6 +76,7 @@ import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask; import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK; import static java.net.HttpURLConnection.HTTP_OK;
@ -114,12 +115,14 @@ public class RestClient implements ClientProtocol {
.setHost(getOzoneRestHandlerHost()) .setHost(getOzoneRestHandlerHost())
.setPort(port); .setPort(port);
this.ozoneRestUri = uriBuilder.build(); this.ozoneRestUri = uriBuilder.build();
int socketTimeout = conf.getInt( long socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS, OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT); OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
int connectionTimeout = conf.getInt( TimeUnit.MILLISECONDS);
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS, long connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT); OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int maxConnection = conf.getInt( int maxConnection = conf.getInt(
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX, OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX,
OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT); OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT);
@ -142,8 +145,8 @@ public class RestClient implements ClientProtocol {
.setConnectionManager(connManager) .setConnectionManager(connManager)
.setDefaultRequestConfig( .setDefaultRequestConfig(
RequestConfig.custom() RequestConfig.custom()
.setSocketTimeout(socketTimeout) .setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(connectionTimeout) .setConnectTimeout(Math.toIntExact(connectionTimeout))
.build()) .build())
.build(); .build();
this.ugi = UserGroupInformation.getCurrentUser(); this.ugi = UserGroupInformation.getCurrentUser();

View File

@ -29,8 +29,8 @@ public final class ScmConfigKeys {
public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY = public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
"scm.container.client.idle.threshold"; "scm.container.client.idle.threshold";
public static final int SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT = public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
10000; "10s";
public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY = public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
"scm.container.client.max.size"; "scm.container.client.max.size";
@ -114,29 +114,29 @@ public final class ScmConfigKeys {
"ozone.scm.handler.count.key"; "ozone.scm.handler.count.key";
public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10; public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
public static final String OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS = public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
"ozone.scm.heartbeat.interval.seconds"; "ozone.scm.heartbeat.interval";
public static final int OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT = public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
30; "30s";
public static final String OZONE_SCM_DEADNODE_INTERVAL_MS = public static final String OZONE_SCM_DEADNODE_INTERVAL =
"ozone.scm.dead.node.interval.ms"; "ozone.scm.dead.node.interval";
public static final long OZONE_SCM_DEADNODE_INTERVAL_DEFAULT = public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT * 1000L * 20L; "10m";
public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS = public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS =
"ozone.scm.max.hb.count.to.process"; "ozone.scm.max.hb.count.to.process";
public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000; public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000;
public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS = public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
"ozone.scm.heartbeat.thread.interval.ms"; "ozone.scm.heartbeat.thread.interval";
public static final long OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS_DEFAULT = public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
3000; "3s";
public static final String OZONE_SCM_STALENODE_INTERVAL_MS = public static final String OZONE_SCM_STALENODE_INTERVAL =
"ozone.scm.stale.node.interval.ms"; "ozone.scm.stale.node.interval";
public static final long OZONE_SCM_STALENODE_INTERVAL_DEFAULT = public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
OZONE_SCM_HEARBEAT_INTERVAL_SECONDS_DEFAULT * 1000L * 3L; "90s";
public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT = public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
"ozone.scm.heartbeat.rpc-timeout"; "ozone.scm.heartbeat.rpc-timeout";
@ -199,18 +199,17 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT = public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
"ozone.scm.container.creation.lease.timeout"; "ozone.scm.container.creation.lease.timeout";
public static final long OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = public static final String
60000; OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
/** /**
* Don't start processing a pool if we have not had a minimum number of * Don't start processing a pool if we have not had a minimum number of
* seconds from the last processing. * seconds from the last processing.
*/ */
public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
"ozone.scm.container.report.processing.interval";
public static final String public static final String
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS = OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
"ozone.scm.container.report.processing.interval.seconds";
public static final int
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = 60;
/** /**
* These 2 settings control the number of threads in executor pool and time * These 2 settings control the number of threads in executor pool and time
@ -219,10 +218,10 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS = public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
"ozone.scm.max.container.report.threads"; "ozone.scm.max.container.report.threads";
public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100; public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS = public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
"ozone.scm.container.reports.wait.timeout.seconds"; "ozone.scm.container.reports.wait.timeout";
public static final int OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT = public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
300; // Default 5 minute wait. "5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry"; "ozone.scm.block.deletion.max.retry";

View File

@ -47,9 +47,10 @@ public class CBlockVolumeClient {
InetSocketAddress address = serverAddress != null ? serverAddress : InetSocketAddress address = serverAddress != null ? serverAddress :
OzoneClientUtils.getCblockServiceRpcAddr(conf); OzoneClientUtils.getCblockServiceRpcAddr(conf);
long version = RPC.getProtocolVersion(CBlockServiceProtocolPB.class); long version = RPC.getProtocolVersion(CBlockServiceProtocolPB.class);
int rpcTimeout = int rpcTimeout = Math.toIntExact(
conf.getInt(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_SECONDS, conf.getTimeDuration(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT,
CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_SECONDS_DEFAULT) * 1000; CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS));
cblockClient = new CBlockServiceProtocolClientSideTranslatorPB( cblockClient = new CBlockServiceProtocolClientSideTranslatorPB(
RPC.getProtocolProxy(CBlockServiceProtocolPB.class, version, RPC.getProtocolProxy(CBlockServiceProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf, address, UserGroupInformation.getCurrentUser(), conf,

View File

@ -69,9 +69,9 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT; .DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS; .DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT; .DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_MAX_POOL_SIZE; .DFS_CBLOCK_CACHE_MAX_POOL_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
@ -123,8 +123,8 @@ public class ContainerCacheFlusher implements Runnable {
DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT); DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT);
int maxPoolSize = config.getInt(DFS_CBLOCK_CACHE_MAX_POOL_SIZE, int maxPoolSize = config.getInt(DFS_CBLOCK_CACHE_MAX_POOL_SIZE,
DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT); DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT);
long keepAlive = config.getLong(DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS, long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT); DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT, TimeUnit.SECONDS);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY, int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT); DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);
int blockBufferSize = config.getInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE, int blockBufferSize = config.getInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE,

View File

@ -31,17 +31,17 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS; DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT; DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE; DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT; DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS; DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT; DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_THREAD_PRIORITY; DFS_CBLOCK_CACHE_THREAD_PRIORITY;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
@ -76,7 +76,7 @@ public class BlockBufferManager {
private final CBlockLocalCache parentCache; private final CBlockLocalCache parentCache;
private final ScheduledThreadPoolExecutor scheduledExecutor; private final ScheduledThreadPoolExecutor scheduledExecutor;
private final ThreadPoolExecutor threadPoolExecutor; private final ThreadPoolExecutor threadPoolExecutor;
private final int intervalSeconds; private final long intervalSeconds;
private final ArrayBlockingQueue<ByteBuffer> acquireQueue; private final ArrayBlockingQueue<ByteBuffer> acquireQueue;
private final ArrayBlockingQueue<Runnable> workQueue; private final ArrayBlockingQueue<Runnable> workQueue;
private ByteBuffer currentBuffer; private ByteBuffer currentBuffer;
@ -86,11 +86,13 @@ public class BlockBufferManager {
this.scheduledExecutor = new ScheduledThreadPoolExecutor(1); this.scheduledExecutor = new ScheduledThreadPoolExecutor(1);
this.intervalSeconds = this.intervalSeconds =
config.getInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, config.getTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT); DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT,
TimeUnit.SECONDS);
long keepAlive = config.getLong(DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS, long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_SECONDS_DEFAULT); DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT,
TimeUnit.SECONDS);
this.workQueue = new ArrayBlockingQueue<>(2, true); this.workQueue = new ArrayBlockingQueue<>(2, true);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY, int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT); DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerReportManage
import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
/** /**
@ -46,9 +47,10 @@ public class ContainerReportManagerImpl implements ContainerReportManager {
this.config = config; this.config = config;
this.lastContainerReportTime = -1; this.lastContainerReportTime = -1;
this.reportCount = new AtomicLong(0L); this.reportCount = new AtomicLong(0L);
this.containerReportInterval = config.getLong( this.containerReportInterval = config.getTimeDuration(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_MS, OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_MS_DEFAULT); OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
} }
public ReportState getContainerReportState() { public ReportState getContainerReportState() {

View File

@ -80,7 +80,7 @@ public class BlockDeletingService extends BackgroundService{
private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
public BlockDeletingService(ContainerManager containerManager, public BlockDeletingService(ContainerManager containerManager,
int serviceInterval, long serviceTimeout, Configuration conf) { long serviceInterval, long serviceTimeout, Configuration conf) {
super("BlockDeletingService", serviceInterval, super("BlockDeletingService", serviceInterval,
TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
serviceTimeout); serviceTimeout);

View File

@ -52,9 +52,9 @@ import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT; import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
@ -104,9 +104,9 @@ public class OzoneContainer {
this.keyManager = new KeyManagerImpl(manager, ozoneConfig); this.keyManager = new KeyManagerImpl(manager, ozoneConfig);
manager.setKeyManager(this.keyManager); manager.setKeyManager(this.keyManager);
int svcInterval = ozoneConfig.getInt( long svcInterval =
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, ozoneConfig.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT); OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
long serviceTimeout = ozoneConfig.getTimeDuration( long serviceTimeout = ozoneConfig.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);

View File

@ -57,7 +57,7 @@ public class KeyDeletingService extends BackgroundService {
private final int keyLimitPerTask; private final int keyLimitPerTask;
public KeyDeletingService(ScmBlockLocationProtocol scmClient, public KeyDeletingService(ScmBlockLocationProtocol scmClient,
KeyManager manager, int serviceInterval, KeyManager manager, long serviceInterval,
long serviceTimeout, Configuration conf) { long serviceTimeout, Configuration conf) {
super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS, super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
KEY_DELETING_CORE_POOL_SIZE, serviceTimeout); KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);

View File

@ -51,9 +51,9 @@ import static org.apache.hadoop.ozone
import static org.apache.hadoop.ozone import static org.apache.hadoop.ozone
.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
import static org.apache.hadoop.ozone import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS; .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT; .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone import static org.apache.hadoop.ozone
@ -104,12 +104,14 @@ public class KeyManagerImpl implements KeyManager {
OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB; OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY, this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
DFS_CONTAINER_RATIS_ENABLED_DEFAULT); DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
int blockDeleteInterval = conf.getInt( long blockDeleteInterval = conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT); OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long serviceTimeout = conf.getTimeDuration( long serviceTimeout = conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
this.preallocateMax = conf.getLong( this.preallocateMax = conf.getLong(
OZONE_KEY_PREALLOCATION_MAXSIZE, OZONE_KEY_PREALLOCATION_MAXSIZE,
OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT); OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);

View File

@ -54,9 +54,9 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -135,10 +135,10 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
// SCM block deleting transaction log and deleting service. // SCM block deleting transaction log and deleting service.
deletedBlockLog = new DeletedBlockLogImpl(conf); deletedBlockLog = new DeletedBlockLogImpl(conf);
int svcInterval = long svcInterval =
conf.getInt( conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT); TimeUnit.MILLISECONDS);
long serviceTimeout = long serviceTimeout =
conf.getTimeDuration( conf.getTimeDuration(
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,

View File

@ -74,7 +74,7 @@ public class SCMBlockDeletingService extends BackgroundService {
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog, public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
Mapping mapper, NodeManager nodeManager, Mapping mapper, NodeManager nodeManager,
int interval, long serviceTimeout, Configuration conf) { long interval, long serviceTimeout, Configuration conf) {
super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS, super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog; this.deletedBlockLog = deletedBlockLog;

View File

@ -50,6 +50,7 @@ import java.nio.charset.Charset;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
@ -117,9 +118,10 @@ public class ContainerMapping implements Mapping {
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT); ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
LOG.trace("Container State Manager created."); LOG.trace("Container State Manager created.");
long containerCreationLeaseTimeout = conf.getLong( long containerCreationLeaseTimeout = conf.getTimeDuration(
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT); ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
LOG.trace("Starting Container Lease Manager."); LOG.trace("Starting Container Lease Manager.");
containerLeaseManager = new LeaseManager<>(containerCreationLeaseTimeout); containerLeaseManager = new LeaseManager<>(containerCreationLeaseTimeout);
containerLeaseManager.start(); containerLeaseManager.start();

View File

@ -49,11 +49,11 @@ import static com.google.common.util.concurrent.Uninterruptibles
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS; .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS; .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
@ -72,10 +72,10 @@ public class ContainerReplicationManager implements Closeable {
private final HashSet<String> poolNames; private final HashSet<String> poolNames;
private final PriorityQueue<PeriodicPool> poolQueue; private final PriorityQueue<PeriodicPool> poolQueue;
private final NodeManager nodeManager; private final NodeManager nodeManager;
private final int containerProcessingLag; private final long containerProcessingLag;
private final AtomicBoolean runnable; private final AtomicBoolean runnable;
private final ExecutorService executorService; private final ExecutorService executorService;
private final int maxPoolWait; private final long maxPoolWait;
private long poolProcessCount; private long poolProcessCount;
private final List<InProgressPool> inProgressPoolList; private final List<InProgressPool> inProgressPoolList;
private final AtomicInteger threadFaultCount; private final AtomicInteger threadFaultCount;
@ -104,17 +104,18 @@ public class ContainerReplicationManager implements Closeable {
Preconditions.checkNotNull(commandQueue); Preconditions.checkNotNull(commandQueue);
Preconditions.checkNotNull(nodeManager); Preconditions.checkNotNull(nodeManager);
this.containerProcessingLag = this.containerProcessingLag =
conf.getInt(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS, conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
TimeUnit.SECONDS
) * 1000; ) * 1000;
int maxContainerReportThreads = int maxContainerReportThreads =
conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS, conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
); );
this.maxPoolWait = this.maxPoolWait =
conf.getInt(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS, conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT) * 1000; OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
this.poolManager = poolManager; this.poolManager = poolManager;
this.commandQueue = commandQueue; this.commandQueue = commandQueue;
this.nodeManager = nodeManager; this.nodeManager = nodeManager;

View File

@ -63,18 +63,17 @@ public final class InProgressPool {
private AtomicInteger nodeCount; private AtomicInteger nodeCount;
private AtomicInteger nodeProcessed; private AtomicInteger nodeProcessed;
private AtomicInteger containerProcessedCount; private AtomicInteger containerProcessedCount;
private int maxWaitTime; private long maxWaitTime;
/** /**
* Constructs an pool that is being processed. * Constructs an pool that is being processed.
* * @param maxWaitTime - Maximum wait time in milliseconds.
* @param maxWaitTime - Maximum wait time in milliseconds.
* @param pool - Pool that we are working against * @param pool - Pool that we are working against
* @param nodeManager - Nodemanager * @param nodeManager - Nodemanager
* @param poolManager - pool manager * @param poolManager - pool manager
* @param commandQueue - Command queue * @param commandQueue - Command queue
* @param executorService - Shared Executor service. * @param executorService - Shared Executor service.
*/ */
InProgressPool(int maxWaitTime, PeriodicPool pool, InProgressPool(long maxWaitTime, PeriodicPool pool,
NodeManager nodeManager, NodePoolManager poolManager, NodeManager nodeManager, NodePoolManager poolManager,
CommandQueue commandQueue, ExecutorService executorService) { CommandQueue commandQueue, ExecutorService executorService) {
Preconditions.checkNotNull(pool); Preconditions.checkNotNull(pool);

View File

@ -52,11 +52,11 @@ public abstract class BackgroundService {
private final ThreadGroup threadGroup; private final ThreadGroup threadGroup;
private final ThreadFactory threadFactory; private final ThreadFactory threadFactory;
private final String serviceName; private final String serviceName;
private final int interval; private final long interval;
private final long serviceTimeout; private final long serviceTimeout;
private final TimeUnit unit; private final TimeUnit unit;
public BackgroundService(String serviceName, int interval, public BackgroundService(String serviceName, long interval,
TimeUnit unit, int threadPoolSize, long serviceTimeout) { TimeUnit unit, int threadPoolSize, long serviceTimeout) {
this.interval = interval; this.interval = interval;
this.unit = unit; this.unit = unit;

View File

@ -27,8 +27,8 @@
<configuration> <configuration>
<!--CBlock Settings--> <!--CBlock Settings-->
<property> <property>
<name>dfs.cblock.block.buffer.flush.interval.seconds</name> <name>dfs.cblock.block.buffer.flush.interval</name>
<value>60</value> <value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag> <tag>CBLOCK, PERFORMANCE</tag>
<description> <description>
Controls the frequency at this the local cache flushes the Controls the frequency at this the local cache flushes the
@ -63,12 +63,12 @@
</description> </description>
</property> </property>
<property> <property>
<name>dfs.cblock.cache.keep.alive.seconds</name> <name>dfs.cblock.cache.keep.alive</name>
<value>60</value> <value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag> <tag>CBLOCK, PERFORMANCE</tag>
<description> <description>
If the cblock cache has no I/O, then the threads in the cache If the cblock cache has no I/O, then the threads in the cache
pool are kept idle for this many seconds before shutting down. pool are kept idle for this amount of time before shutting down.
</description> </description>
</property> </property>
<property> <property>
@ -191,11 +191,11 @@
</description> </description>
</property> </property>
<property> <property>
<name>dfs.cblock.rpc.timeout.seconds</name> <name>dfs.cblock.rpc.timeout</name>
<value>300</value> <value>300s</value>
<tag>CBLOCK, MANAGEMENT</tag> <tag>CBLOCK, MANAGEMENT</tag>
<description> <description>
RPC timeout in seconds used for cblock CLI operations. When you RPC timeout used for cblock CLI operations. When you
create very large disks, like 5TB, etc. The number of containers create very large disks, like 5TB, etc. The number of containers
allocated in the system is huge. It is will 5TB/5GB, which is 1000 allocated in the system is huge. It is will 5TB/5GB, which is 1000
containers. The client CLI might timeout even though the cblock manager containers. The client CLI might timeout even though the cblock manager
@ -347,12 +347,13 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.container.report.interval.ms</name> <name>ozone.container.report.interval</name>
<value>60000</value> <value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag> <tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval in milliseconds of the datanode to send container <description>Time interval of the datanode to send container report. Each
report. Each datanode periodically send container report upon receive datanode periodically send container report upon receive
sendContainerReport from SCM.</description> sendContainerReport from SCM. Unit could be defined with
postfix (ns,ms,s,m,h,d)</description>
</property> </property>
<!--Ozone Settings--> <!--Ozone Settings-->
<property> <property>
@ -388,12 +389,13 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.block.deleting.service.interval.ms</name> <name>ozone.block.deleting.service.interval</name>
<value>60000</value> <value>1m</value>
<tag>OZONE, PERFORMANCE, SCM</tag> <tag>OZONE, PERFORMANCE, SCM</tag>
<description>Time interval in milliseconds of the block deleting service. <description>Time interval of the block deleting service.
The block deleting service runs on each datanode periodically and The block deleting service runs on each datanode periodically and
deletes blocks queued for deletion. deletes blocks queued for deletion. Unit could be defined with
postfix (ns,ms,s,m,h,d)
</description> </description>
</property> </property>
<property> <property>
@ -411,8 +413,8 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.client.connection.timeout.ms</name> <name>ozone.client.connection.timeout</name>
<value>5000</value> <value>5000ms</value>
<tag>OZONE, PERFORMANCE, CLIENT</tag> <tag>OZONE, PERFORMANCE, CLIENT</tag>
<description>Connection timeout for Ozone client in milliseconds. <description>Connection timeout for Ozone client in milliseconds.
</description> </description>
@ -431,10 +433,11 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.client.socket.timeout.ms</name> <name>ozone.client.socket.timeout</name>
<value>5000</value> <value>5000ms</value>
<tag>OZONE, CLIENT</tag> <tag>OZONE, CLIENT</tag>
<description>Socket timeout for Ozone client in milliseconds.</description> <description>Socket timeout for Ozone client. Unit could be defined with
postfix (ns,ms,s,m,h,d)</description>
</property> </property>
<property> <property>
<name>ozone.enabled</name> <name>ozone.enabled</name>
@ -748,7 +751,7 @@
<description> <description>
The policy used for choosing desire containers for block deletion. The policy used for choosing desire containers for block deletion.
Datanode selects some containers to process block deletion Datanode selects some containers to process block deletion
in a certain interval defined by ozone.block.deleting.service.interval.ms. in a certain interval defined by ozone.block.deleting.service.interval.
The number of containers to process in each interval is defined The number of containers to process in each interval is defined
by ozone.block.deleting.container.limit.per.interval. This property is by ozone.block.deleting.container.limit.per.interval. This property is
used to configure the policy applied while selecting containers. used to configure the policy applied while selecting containers.
@ -783,17 +786,17 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.container.report.processing.interval.seconds</name> <name>ozone.scm.container.report.processing.interval</name>
<value>60</value> <value>60s</value>
<tag>OZONE, PERFORMANCE</tag> <tag>OZONE, PERFORMANCE</tag>
<description>Time interval in seconds for scm to process container reports <description>Time interval for scm to process container reports
for a node pool. Scm handles node pool reports in a cyclic clock for a node pool. Scm handles node pool reports in a cyclic clock
manner, it fetches pools periodically with this time interval. manner, it fetches pools periodically with this time interval.
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.container.reports.wait.timeout.seconds</name> <name>ozone.scm.container.reports.wait.timeout</name>
<value>300</value> <value>300s</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag> <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
<description>Maximum time to wait in seconds for processing all container <description>Maximum time to wait in seconds for processing all container
reports from reports from
@ -871,12 +874,11 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.dead.node.interval.ms</name> <name>ozone.scm.dead.node.interval</name>
<value>600000</value> <value>10m</value>
<tag>OZONE, MANAGEMENT</tag> <tag>OZONE, MANAGEMENT</tag>
<description> <description>
The interval between heartbeats before a node is tagged as dead. This The interval between heartbeats before a node is tagged as dead.
value is in milliseconds.
</description> </description>
</property> </property>
<property> <property>
@ -896,11 +898,11 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.heartbeat.interval.seconds</name> <name>ozone.scm.heartbeat.interval</name>
<value>30</value> <value>30s</value>
<tag>OZONE, MANAGEMENT</tag> <tag>OZONE, MANAGEMENT</tag>
<description> <description>
The heartbeat interval from a data node to SCM in seconds. Yes, The heartbeat interval from a data node to SCM. Yes,
it is not three but 30, since most data nodes will heart beating via Ratis it is not three but 30, since most data nodes will heart beating via Ratis
heartbeats. If a client is not able to talk to a data node, it will notify heartbeats. If a client is not able to talk to a data node, it will notify
KSM/SCM eventually. So a 30 second HB seems to work. This assumes that KSM/SCM eventually. So a 30 second HB seems to work. This assumes that
@ -929,8 +931,8 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.heartbeat.thread.interval.ms</name> <name>ozone.scm.heartbeat.thread.interval</name>
<value>3000</value> <value>3s</value>
<tag>OZONE, MANAGEMENT</tag> <tag>OZONE, MANAGEMENT</tag>
<description> <description>
When a heartbeat from the data node arrives on SCM, It is queued for When a heartbeat from the data node arrives on SCM, It is queued for
@ -1022,7 +1024,7 @@
<description> <description>
The maximum number of heartbeat to process per loop of the The maximum number of heartbeat to process per loop of the
heartbeat process thread. Please see heartbeat process thread. Please see
ozone.scm.heartbeat.thread.interval.ms ozone.scm.heartbeat.thread.interval
for more info. for more info.
</description> </description>
</property> </property>
@ -1039,12 +1041,12 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.stale.node.interval.ms</name> <name>ozone.scm.stale.node.interval</name>
<value>90000</value> <value>90s</value>
<tag>OZONE, MANAGEMENT</tag> <tag>OZONE, MANAGEMENT</tag>
<description> <description>
The interval in milliseconds for stale node flagging. Please The interval for stale node flagging. Please
see ozone.scm.heartbeat.thread.interval.ms before changing this value. see ozone.scm.heartbeat.thread.interval before changing this value.
</description> </description>
</property> </property>
<property> <property>
@ -1074,7 +1076,7 @@
<!--Client Settings--> <!--Client Settings-->
<property> <property>
<name>scm.container.client.idle.threshold</name> <name>scm.container.client.idle.threshold</name>
<value>10000</value> <value>10s</value>
<tag>OZONE, PERFORMANCE</tag> <tag>OZONE, PERFORMANCE</tag>
<description> <description>
In the standalone pipelines, the SCM clients use netty to In the standalone pipelines, the SCM clients use netty to
@ -1096,7 +1098,7 @@
<property> <property>
<name>ozone.scm.container.creation.lease.timeout</name> <name>ozone.scm.container.creation.lease.timeout</name>
<value>60000</value> <value>60s</value>
<tag>OZONE, SCM</tag> <tag>OZONE, SCM</tag>
<description> <description>
Container creation timeout in milliseconds to be used by SCM. When Container creation timeout in milliseconds to be used by SCM. When

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.cblock; package org.apache.hadoop.cblock;
import com.google.common.primitives.Longs; import com.google.common.primitives.Longs;
import static java.util.concurrent.TimeUnit.SECONDS;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
@ -50,7 +51,7 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys.
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO; DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys. import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS; DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
/** /**
* Tests for Local Cache Buffer Manager. * Tests for Local Cache Buffer Manager.
@ -218,7 +219,7 @@ public class TestBufferManager {
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 5); .setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4); String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4); String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -312,7 +313,7 @@ public class TestBufferManager {
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 120); .setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 120, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4); String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4); String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -361,7 +362,8 @@ public class TestBufferManager {
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig flushTestConfig
.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 5); .setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4); String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4); String userName = "user" + RandomStringUtils.randomNumeric(4);

View File

@ -45,6 +45,7 @@ import java.io.IOException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
@ -54,7 +55,7 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO; .DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS; .DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE; .DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
@ -198,7 +199,8 @@ public class TestCBlockReadWrite {
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path); flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 3); flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 3,
TimeUnit.SECONDS);
XceiverClientManager xcm = new XceiverClientManager(flushTestConfig); XceiverClientManager xcm = new XceiverClientManager(flushTestConfig);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4); String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4); String userName = "user" + RandomStringUtils.randomNumeric(4);
@ -287,7 +289,9 @@ public class TestCBlockReadWrite {
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path); flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true); flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS, 3); flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
3,
TimeUnit.SECONDS);
int numblocks = 10; int numblocks = 10;
flushTestConfig.setInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE, numblocks); flushTestConfig.setInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE, numblocks);

View File

@ -55,6 +55,7 @@ import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.Random; import java.util.Random;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -553,20 +554,25 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster
private void configureSCMheartbeat() { private void configureSCMheartbeat() {
if (hbSeconds.isPresent()) { if (hbSeconds.isPresent()) {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
hbSeconds.get()); hbSeconds.get(), TimeUnit.SECONDS);
} else { } else {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
DEFAULT_HB_SECONDS); DEFAULT_HB_SECONDS,
TimeUnit.SECONDS);
} }
if (hbProcessorInterval.isPresent()) { if (hbProcessorInterval.isPresent()) {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, conf.setTimeDuration(
hbProcessorInterval.get()); ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
hbProcessorInterval.get(),
TimeUnit.MILLISECONDS);
} else { } else {
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, conf.setTimeDuration(
DEFAULT_PROCESSOR_MS); ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
DEFAULT_PROCESSOR_MS,
TimeUnit.MILLISECONDS);
} }
} }

View File

@ -53,6 +53,7 @@ import java.util.Set;
import java.util.Map; import java.util.Map;
import java.util.Collections; import java.util.Collections;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
@ -185,10 +186,15 @@ public class TestStorageContainerManager {
public void testBlockDeletionTransactions() throws Exception { public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5; int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 5); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 3000); 5,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000,
TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000); conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1, TimeUnit.SECONDS);
// Reset container provision size, otherwise only one container // Reset container provision size, otherwise only one container
// is created by default. // is created by default.
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
@ -262,10 +268,13 @@ public class TestStorageContainerManager {
public void testBlockDeletingThrottling() throws Exception { public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15; int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 5); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, 5,
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 3000); TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000); conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
numKeys); numKeys);

View File

@ -55,10 +55,11 @@ import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS; .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys
@ -220,7 +221,8 @@ public class TestBlockDeletingService {
@Test @Test
public void testShutdownService() throws Exception { public void testShutdownService() throws Exception {
Configuration conf = new OzoneConfiguration(); Configuration conf = new OzoneConfiguration();
conf.setInt(OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 500); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
TimeUnit.MILLISECONDS);
conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10);
ContainerManager containerManager = createContainerManager(conf); ContainerManager containerManager = createContainerManager(conf);

View File

@ -75,7 +75,8 @@ public class TestDatanodeStateMachine {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
conf = SCMTestUtils.getConf(); conf = SCMTestUtils.getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
TimeUnit.MILLISECONDS);
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
serverAddresses = new LinkedList<>(); serverAddresses = new LinkedList<>();
scmServers = new LinkedList<>(); scmServers = new LinkedList<>();

View File

@ -52,7 +52,7 @@ import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.HEALTHY; import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.HEALTHY;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS; .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
import static org.apache.ratis.shaded.com.google.common.util.concurrent import static org.apache.ratis.shaded.com.google.common.util.concurrent
.Uninterruptibles.sleepUninterruptibly; .Uninterruptibles.sleepUninterruptibly;
@ -108,7 +108,8 @@ public class TestContainerReplicationManager {
} }
} }
OzoneConfiguration config = SCMTestUtils.getOzoneConf(); OzoneConfiguration config = SCMTestUtils.getOzoneConf();
config.setInt(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_SECONDS, 1); config.setTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, 1,
TimeUnit.SECONDS);
replicationManager = new ContainerReplicationManager(config, replicationManager = new ContainerReplicationManager(config,
nodeManager, poolManager, commandQueue); nodeManager, poolManager, commandQueue);
datanodeStateManager = new ReplicationDatanodeStateManager(nodeManager, datanodeStateManager = new ReplicationDatanodeStateManager(nodeManager,

View File

@ -46,6 +46,7 @@ import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
/** /**
@ -69,8 +70,10 @@ public class TestContainerMapping {
.getTestDir(TestContainerMapping.class.getSimpleName()); .getTestDir(TestContainerMapping.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath()); testDir.getAbsolutePath());
conf.setLong(ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, conf.setTimeDuration(
TIMEOUT); ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT,
TIMEOUT,
TimeUnit.MILLISECONDS);
boolean folderExisted = testDir.exists() || testDir.mkdirs(); boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) { if (!folderExisted) {
throw new IOException("Unable to create test directory path"); throw new IOException("Unable to create test directory path");

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.scm.node; package org.apache.hadoop.ozone.scm.node;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
import static java.util.concurrent.TimeUnit.*;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -49,6 +50,7 @@ import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.concurrent.TimeUnit.SECONDS;
@ -60,11 +62,12 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState
import static org.apache.hadoop.ozone.protocol.proto import static org.apache.hadoop.ozone.protocol.proto
.StorageContainerDatanodeProtocolProtos.Type; .StorageContainerDatanodeProtocolProtos.Type;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL_MS; import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS; import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS; import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS; import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL_MS; import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.StringStartsWith.startsWith; import static org.hamcrest.core.StringStartsWith.startsWith;
@ -110,7 +113,8 @@ public class TestNodeManager {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath()); testDir.getAbsolutePath());
conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
TimeUnit.MILLISECONDS);
return conf; return conf;
} }
@ -243,7 +247,8 @@ public class TestNodeManager {
public void testScmShutdown() throws IOException, InterruptedException, public void testScmShutdown() throws IOException, InterruptedException,
TimeoutException { TimeoutException {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
SCMNodeManager nodeManager = createNodeManager(conf); SCMNodeManager nodeManager = createNodeManager(conf);
DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager); DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager);
nodeManager.close(); nodeManager.close();
@ -267,7 +272,8 @@ public class TestNodeManager {
@Test @Test
public void testScmHeartbeatAfterRestart() throws Exception { public void testScmHeartbeatAfterRestart() throws Exception {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
DatanodeID datanodeID = SCMTestUtils.getDatanodeID(); DatanodeID datanodeID = SCMTestUtils.getDatanodeID();
try (SCMNodeManager nodemanager = createNodeManager(conf)) { try (SCMNodeManager nodemanager = createNodeManager(conf)) {
nodemanager.register(datanodeID); nodemanager.register(datanodeID);
@ -344,12 +350,13 @@ public class TestNodeManager {
InterruptedException, TimeoutException { InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
final int interval = 100; final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
conf.setInt(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, interval, MILLISECONDS);
thrown.expect(IllegalArgumentException.class); thrown.expect(IllegalArgumentException.class);
@ -372,12 +379,13 @@ public class TestNodeManager {
InterruptedException, TimeoutException { InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
final int interval = 100; final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
conf.setInt(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1); TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS);
createNodeManager(conf).close(); createNodeManager(conf).close();
} }
@ -396,10 +404,11 @@ public class TestNodeManager {
final int nodeCount = 10; final int nodeCount = 10;
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) { try (SCMNodeManager nodeManager = createNodeManager(conf)) {
@ -505,25 +514,25 @@ public class TestNodeManager {
* These values are very important. Here is what it means so you don't * These values are very important. Here is what it means so you don't
* have to look it up while reading this code. * have to look it up while reading this code.
* *
* OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS - This the frequency of the * OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
* HB processing thread that is running in the SCM. This thread must run * HB processing thread that is running in the SCM. This thread must run
* for the SCM to process the Heartbeats. * for the SCM to process the Heartbeats.
* *
* OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS - This is the frequency at which * OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
* datanodes will send heartbeats to SCM. Please note: This is the only * datanodes will send heartbeats to SCM. Please note: This is the only
* config value for node manager that is specified in seconds. We don't * config value for node manager that is specified in seconds. We don't
* want SCM heartbeat resolution to be more than in seconds. * want SCM heartbeat resolution to be more than in seconds.
* In this test it is not used, but we are forced to set it because we * In this test it is not used, but we are forced to set it because we
* have validation code that checks Stale Node interval and Dead Node * have validation code that checks Stale Node interval and Dead Node
* interval is larger than the value of * interval is larger than the value of
* OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS. * OZONE_SCM_HEARTBEAT_INTERVAL.
* *
* OZONE_SCM_STALENODE_INTERVAL_MS - This is the time that must elapse * OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
* from the last heartbeat for us to mark a node as stale. In this test * from the last heartbeat for us to mark a node as stale. In this test
* we set that to 3. That is if a node has not heartbeat SCM for last 3 * we set that to 3. That is if a node has not heartbeat SCM for last 3
* seconds we will mark it as stale. * seconds we will mark it as stale.
* *
* OZONE_SCM_DEADNODE_INTERVAL_MS - This is the time that must elapse * OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
* from the last heartbeat for a node to be marked dead. We have an * from the last heartbeat for a node to be marked dead. We have an
* additional constraint that this must be at least 2 times bigger than * additional constraint that this must be at least 2 times bigger than
* Stale node Interval. * Stale node Interval.
@ -535,10 +544,11 @@ public class TestNodeManager {
*/ */
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
/** /**
@ -712,10 +722,11 @@ public class TestNodeManager {
final int deadCount = 10; final int deadCount = 10;
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 7000); conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 7000);
@ -801,10 +812,14 @@ public class TestNodeManager {
final int healthyCount = 3000; final int healthyCount = 3000;
final int staleCount = 3000; final int staleCount = 3000;
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1,
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000,
MILLISECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) { try (SCMNodeManager nodeManager = createNodeManager(conf)) {
List<DatanodeID> healthyList = createNodeSet(nodeManager, List<DatanodeID> healthyList = createNodeSet(nodeManager,
@ -862,8 +877,9 @@ public class TestNodeManager {
// Make the HB process thread run slower. // Make the HB process thread run slower.
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 500); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 500); conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 500);
try (SCMNodeManager nodeManager = createNodeManager(conf)) { try (SCMNodeManager nodeManager = createNodeManager(conf)) {
@ -897,7 +913,8 @@ public class TestNodeManager {
public void testScmEnterAndExitChillMode() throws IOException, public void testScmEnterAndExitChillMode() throws IOException,
InterruptedException { InterruptedException {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) { try (SCMNodeManager nodeManager = createNodeManager(conf)) {
nodeManager.setMinimumChillModeNodes(10); nodeManager.setMinimumChillModeNodes(10);
@ -956,7 +973,8 @@ public class TestNodeManager {
public void testScmStatsFromNodeReport() throws IOException, public void testScmStatsFromNodeReport() throws IOException,
InterruptedException, TimeoutException { InterruptedException, TimeoutException {
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
MILLISECONDS);
final int nodeCount = 10; final int nodeCount = 10;
final long capacity = 2000; final long capacity = 2000;
final long used = 100; final long used = 100;
@ -1001,10 +1019,11 @@ public class TestNodeManager {
final int nodeCount = 1; final int nodeCount = 1;
final int interval = 100; final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) { try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager); DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager);

View File

@ -28,6 +28,7 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.concurrent.TimeUnit;
import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.DEAD; import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState.DEAD;
@ -36,13 +37,13 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState
import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState
.STALE; .STALE;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_MS; .OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS; .OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS; .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.scm.ScmConfigKeys import static org.apache.hadoop.scm.ScmConfigKeys
.OZONE_SCM_STALENODE_INTERVAL_MS; .OZONE_SCM_STALENODE_INTERVAL;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
/** /**
@ -59,10 +60,11 @@ public class TestQueryNode {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
final int interval = 100; final int interval = 100;
conf.setInt(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, interval); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL_SECONDS, 1, SECONDS); interval, TimeUnit.MILLISECONDS);
conf.setInt(OZONE_SCM_STALENODE_INTERVAL_MS, 3 * 1000); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setInt(OZONE_SCM_DEADNODE_INTERVAL_MS, 6 * 1000); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.numDataNodes(numOfDatanodes) .numDataNodes(numOfDatanodes)

View File

@ -68,6 +68,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.Random; import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
@ -98,7 +99,8 @@ public class TestKeys {
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
// Set short block deleting service interval to speed up deletions. // Set short block deleting service interval to speed up deletions.
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS, 1000); conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
1000, TimeUnit.MILLISECONDS);
path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName()); path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);