From f9c0bc094a10d9cdbb89792b24c7135404aab667 Mon Sep 17 00:00:00 2001 From: litao Date: Wed, 19 Jan 2022 14:10:39 +0800 Subject: [PATCH] HDFS-16399. Reconfig cache report parameters for datanode (#3841) (cherry picked from commit e3556463301f103ccf26a17e116c507c143e7121) --- .../hadoop/hdfs/server/datanode/DNConf.java | 12 +++++- .../hadoop/hdfs/server/datanode/DataNode.java | 24 ++++++++++- .../datanode/TestDataNodeReconfiguration.java | 43 +++++++++++++++++++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 4 files changed, 78 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index ae32df5e543..3dde2cc9c15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -113,7 +113,7 @@ public class DNConf { final long outliersReportIntervalMs; final long ibrInterval; final long initialBlockReportDelayMs; - final long cacheReportInterval; + volatile long cacheReportInterval; final long datanodeSlowIoWarningThresholdMs; final String minimumNameNodeVersion; @@ -484,4 +484,14 @@ public class DNConf { public long getBlockReportInterval() { return blockReportInterval; } + + void setCacheReportInterval(long intervalMs) { + Preconditions.checkArgument(intervalMs > 0, + "dfs.cachereport.intervalMsec should be larger than 0"); + cacheReportInterval = intervalMs; + } + + public long getCacheReportInterval() { + return cacheReportInterval; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 2fd03873f11..3c86b1189e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; @@ -310,7 +312,8 @@ public class DataNode extends ReconfigurableBase DFS_DATANODE_DATA_DIR_KEY, DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, - DFS_DATANODE_MAX_RECEIVER_THREADS_KEY)); + DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, + DFS_CACHEREPORT_INTERVAL_MSEC_KEY)); public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); @@ -651,6 +654,8 @@ public class DataNode extends ReconfigurableBase } case DFS_DATANODE_MAX_RECEIVER_THREADS_KEY: return reconfDataXceiverParameters(property, newVal); + case DFS_CACHEREPORT_INTERVAL_MSEC_KEY: + return reconfCacheReportParameters(property, newVal); default: break; } @@ -675,6 +680,23 @@ public class DataNode extends ReconfigurableBase } } + private String reconfCacheReportParameters(String property, String newVal) + throws ReconfigurationException { + String result; + try { + LOG.info("Reconfiguring {} to {}", property, newVal); + Preconditions.checkNotNull(dnConf, "DNConf has not been initialized."); + long reportInterval = (newVal == null ? DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT : + Long.parseLong(newVal)); + result = Long.toString(reportInterval); + dnConf.setCacheReportInterval(reportInterval); + LOG.info("RECONFIGURE* changed {} to {}", property, newVal); + return result; + } catch (IllegalArgumentException e) { + throw new ReconfigurationException(property, newVal, getConf().get(property), e); + } + } + /** * Get a list of the keys of the re-configurable properties in configuration. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 881722b2639..e0be9e57202 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT; @@ -406,4 +408,45 @@ public class TestDataNodeReconfiguration { dn.getConf().get(DFS_DATANODE_MAX_RECEIVER_THREADS_KEY)); } } + + @Test + public void testCacheReportReconfiguration() + throws ReconfigurationException { + int cacheReportInterval = 300 * 1000; + for (int i = 0; i < NUM_DATA_NODE; i++) { + DataNode dn = cluster.getDataNodes().get(i); + + // Try invalid values. + try { + dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, "text"); + fail("ReconfigurationException expected"); + } catch (ReconfigurationException expected) { + assertTrue("expecting NumberFormatException", + expected.getCause() instanceof NumberFormatException); + } + try { + dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, String.valueOf(-1)); + fail("ReconfigurationException expected"); + } catch (ReconfigurationException expected) { + assertTrue("expecting IllegalArgumentException", + expected.getCause() instanceof IllegalArgumentException); + } + + // Change properties. + dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, + String.valueOf(cacheReportInterval)); + + // Verify change. + assertEquals(String.format("%s has wrong value", DFS_CACHEREPORT_INTERVAL_MSEC_KEY), + cacheReportInterval, dn.getDnConf().getCacheReportInterval()); + + // Revert to default. + dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, null); + assertEquals(String.format("%s has wrong value", DFS_CACHEREPORT_INTERVAL_MSEC_KEY), + DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT, dn.getDnConf().getCacheReportInterval()); + + assertNull(String.format("expect %s is not configured", DFS_CACHEREPORT_INTERVAL_MSEC_KEY), + dn.getConf().get(DFS_CACHEREPORT_INTERVAL_MSEC_KEY)); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index c8611da1c28..d22162b871f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -330,7 +330,7 @@ public class TestDFSAdmin { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("datanode", address, outs, errs); - assertEquals(5, outs.size()); + assertEquals(6, outs.size()); assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1)); }