From aa963ab441bee91400aab8d973ca505bf5b2e291 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 15 Sep 2015 17:19:59 +0530 Subject: [PATCH] HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter is failing in trunk (Contributed by Surendra Singh Lilhore) (cherry picked from commit a4405674919d14be89bc4da22db2f417b5ae6ac3) --- .../hadoop/metrics2/impl/MetricsConfig.java | 3 + .../metrics2/impl/MetricsSystemImpl.java | 24 +++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 20 +++++ .../fsdataset/impl/LazyPersistTestCase.java | 15 +--- .../fsdataset/impl/TestLazyWriter.java | 1 + .../test/resources/hadoop-metrics2.properties | 85 +++++++++++++++++++ 7 files changed, 127 insertions(+), 24 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index b863f55aa89..001b731b4c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -58,6 +58,9 @@ class MetricsConfig extends SubsetConfiguration { static final String PERIOD_KEY = "period"; static final int PERIOD_DEFAULT = 10; // seconds + // For testing, this will have the priority. + static final String PERIOD_MILLIS_KEY = "periodMillis"; + static final String QUEUE_CAPACITY_KEY = "queue.capacity"; static final int QUEUE_CAPACITY_DEFAULT = 1; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index 4843c0383b9..02cec364efb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -105,7 +105,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { private Map sourceConfigs, sinkConfigs; private boolean monitoring = false; private Timer timer; - private int period; // seconds + private long period; // milliseconds private long logicalTime; // number of timer invocations * period private ObjectName mbeanName; private boolean publishSelfMetrics = true; @@ -262,7 +262,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { checkNotNull(config, "config"); MetricsConfig conf = sourceConfigs.get(name); MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc, - source, injectedTags, period * 1000L, conf != null ? conf + source, injectedTags, period, conf != null ? conf : config.subset(SOURCE_KEY)); sources.put(name, sa); sa.start(); @@ -360,7 +360,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { return; } logicalTime = 0; - long millis = period * 1000L; + long millis = period; timer = new Timer("Timer for '"+ prefix +"' metrics system", true); timer.scheduleAtFixedRate(new TimerTask() { public void run() { @@ -371,7 +371,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { } } }, millis, millis); - LOG.info("Scheduled snapshot period at "+ period +" second(s)."); + LOG.info("Scheduled snapshot period at "+ (period/1000) +" second(s)."); } synchronized void onTimerEvent() { @@ -485,12 +485,15 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { private synchronized void configureSinks() { sinkConfigs = config.getInstanceConfigs(SINK_KEY); - int confPeriod = 0; + long confPeriodMillis = 0; for (Entry entry : sinkConfigs.entrySet()) { MetricsConfig conf = entry.getValue(); int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT); - confPeriod = confPeriod == 0 ? sinkPeriod - : ArithmeticUtils.gcd(confPeriod, sinkPeriod); + // Support configuring periodMillis for testing. + long sinkPeriodMillis = + conf.getLong(PERIOD_MILLIS_KEY, sinkPeriod * 1000); + confPeriodMillis = confPeriodMillis == 0 ? sinkPeriodMillis + : ArithmeticUtils.gcd(confPeriodMillis, sinkPeriodMillis); String clsName = conf.getClassName(""); if (clsName == null) continue; // sink can be registered later on String sinkName = entry.getKey(); @@ -504,8 +507,9 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { LOG.warn("Error creating sink '"+ sinkName +"'", e); } } - period = confPeriod > 0 ? confPeriod - : config.getInt(PERIOD_KEY, PERIOD_DEFAULT); + long periodSec = config.getInt(PERIOD_KEY, PERIOD_DEFAULT); + period = confPeriodMillis > 0 ? confPeriodMillis + : config.getLong(PERIOD_MILLIS_KEY, periodSec * 1000); } static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink, @@ -552,7 +556,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { private void registerSystemSource() { MetricsConfig sysConf = sourceConfigs.get(MS_NAME); sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC, - MetricsAnnotations.makeSource(this), injectedTags, period * 1000L, + MetricsAnnotations.makeSource(this), injectedTags, period, sysConf == null ? config.subset(SOURCE_KEY) : sysConf); sysSource.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7bcaccd78ad..5c334f2d66c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -991,6 +991,9 @@ Release 2.8.0 - UNRELEASED HDFS-9069. TestNameNodeMetricsLogger failing -port in use. (stevel) + HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter + is failing in trunk (Surendra Singh Lilhore via vinayakumarb) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index de577014805..5a3a25f9b72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -68,6 +68,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.UnhandledException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -133,6 +134,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.hdfs.tools.JMXGet; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; @@ -1863,4 +1865,22 @@ public class DFSTestUtil { } } } + + public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + final int currentValue = Integer.parseInt(jmx.getValue(metricName)); + LOG.info("Waiting for " + metricName + + " to reach value " + expectedValue + + ", current value = " + currentValue); + return currentValue == expectedValue; + } catch (Exception e) { + throw new UnhandledException("Test failed due to unexpected exception", e); + } + } + }, 1000, Integer.MAX_VALUE); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index 273babbff8c..6c49de5aea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -510,20 +510,7 @@ public abstract class LazyPersistTestCase { protected void waitForMetric(final String metricName, final int expectedValue) throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - try { - final int currentValue = Integer.parseInt(jmx.getValue(metricName)); - LOG.info("Waiting for " + metricName + - " to reach value " + expectedValue + - ", current value = " + currentValue); - return currentValue == expectedValue; - } catch (Exception e) { - throw new UnhandledException("Test failed due to unexpected exception", e); - } - } - }, 1000, Integer.MAX_VALUE); + DFSTestUtil.waitForMetric(jmx, metricName, expectedValue); } protected void triggerEviction(DataNode dn) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java index 6b16066e8c2..16807640fd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java @@ -72,6 +72,7 @@ public class TestLazyWriter extends LazyPersistTestCase { // for the previous one. Path path2 = new Path("/" + METHOD_NAME + ".02.dat"); makeTestFile(path2, BLOCK_SIZE, true); + waitForMetric("RamDiskBlocksEvicted", 1); verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1); verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties new file mode 100644 index 00000000000..abe046805fb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties @@ -0,0 +1,85 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# syntax: [prefix].[source|sink].[instance].[options] +# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details + +*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink +# default sampling period, in seconds +*.period=10 +*.periodMillis=100 + +# The namenode-metrics.out will contain metrics from all context +#namenode.sink.file.filename=namenode-metrics.out +# Specifying a special sampling period for namenode: +#namenode.sink.*.period=8 + +#datanode.sink.file.filename=datanode-metrics.out + +#resourcemanager.sink.file.filename=resourcemanager-metrics.out + +#nodemanager.sink.file.filename=nodemanager-metrics.out + +#mrappmaster.sink.file.filename=mrappmaster-metrics.out + +#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out + +# the following example split metrics of different +# context to different sinks (in this case files) +#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink +#nodemanager.sink.file_jvm.context=jvm +#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out +#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink +#nodemanager.sink.file_mapred.context=mapred +#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out + +# +# Below are for sending metrics to Ganglia +# +# for Ganglia 3.0 support +# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30 +# +# for Ganglia 3.1 support +# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 + +# *.sink.ganglia.period=10 + +# default for supportsparse is false +# *.sink.ganglia.supportsparse=true + +#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both +#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 + +# Tag values to use for the ganglia prefix. If not defined no tags are used. +# If '*' all tags are used. If Specifying multiple tags separate them with +# commas. Note that the last segment of the property name is the context name. +# +#*.sink.ganglia.tagsForPrefix.jvm=ProcesName +#*.sink.ganglia.tagsForPrefix.dfs= +#*.sink.ganglia.tagsForPrefix.rpc= +#*.sink.ganglia.tagsForPrefix.mapred= + +#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649