HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter is failing in trunk (Contributed by Surendra Singh Lilhore)
(cherry picked from commit a440567491
)
This commit is contained in:
parent
eacc18677a
commit
aa963ab441
|
@ -58,6 +58,9 @@ class MetricsConfig extends SubsetConfiguration {
|
|||
static final String PERIOD_KEY = "period";
|
||||
static final int PERIOD_DEFAULT = 10; // seconds
|
||||
|
||||
// For testing, this will have the priority.
|
||||
static final String PERIOD_MILLIS_KEY = "periodMillis";
|
||||
|
||||
static final String QUEUE_CAPACITY_KEY = "queue.capacity";
|
||||
static final int QUEUE_CAPACITY_DEFAULT = 1;
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
|
||||
private boolean monitoring = false;
|
||||
private Timer timer;
|
||||
private int period; // seconds
|
||||
private long period; // milliseconds
|
||||
private long logicalTime; // number of timer invocations * period
|
||||
private ObjectName mbeanName;
|
||||
private boolean publishSelfMetrics = true;
|
||||
|
@ -262,7 +262,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
checkNotNull(config, "config");
|
||||
MetricsConfig conf = sourceConfigs.get(name);
|
||||
MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc,
|
||||
source, injectedTags, period * 1000L, conf != null ? conf
|
||||
source, injectedTags, period, conf != null ? conf
|
||||
: config.subset(SOURCE_KEY));
|
||||
sources.put(name, sa);
|
||||
sa.start();
|
||||
|
@ -360,7 +360,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
return;
|
||||
}
|
||||
logicalTime = 0;
|
||||
long millis = period * 1000L;
|
||||
long millis = period;
|
||||
timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
|
||||
timer.scheduleAtFixedRate(new TimerTask() {
|
||||
public void run() {
|
||||
|
@ -371,7 +371,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
}
|
||||
}
|
||||
}, millis, millis);
|
||||
LOG.info("Scheduled snapshot period at "+ period +" second(s).");
|
||||
LOG.info("Scheduled snapshot period at "+ (period/1000) +" second(s).");
|
||||
}
|
||||
|
||||
synchronized void onTimerEvent() {
|
||||
|
@ -485,12 +485,15 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
|
||||
private synchronized void configureSinks() {
|
||||
sinkConfigs = config.getInstanceConfigs(SINK_KEY);
|
||||
int confPeriod = 0;
|
||||
long confPeriodMillis = 0;
|
||||
for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
|
||||
MetricsConfig conf = entry.getValue();
|
||||
int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
|
||||
confPeriod = confPeriod == 0 ? sinkPeriod
|
||||
: ArithmeticUtils.gcd(confPeriod, sinkPeriod);
|
||||
// Support configuring periodMillis for testing.
|
||||
long sinkPeriodMillis =
|
||||
conf.getLong(PERIOD_MILLIS_KEY, sinkPeriod * 1000);
|
||||
confPeriodMillis = confPeriodMillis == 0 ? sinkPeriodMillis
|
||||
: ArithmeticUtils.gcd(confPeriodMillis, sinkPeriodMillis);
|
||||
String clsName = conf.getClassName("");
|
||||
if (clsName == null) continue; // sink can be registered later on
|
||||
String sinkName = entry.getKey();
|
||||
|
@ -504,8 +507,9 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
LOG.warn("Error creating sink '"+ sinkName +"'", e);
|
||||
}
|
||||
}
|
||||
period = confPeriod > 0 ? confPeriod
|
||||
: config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
|
||||
long periodSec = config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
|
||||
period = confPeriodMillis > 0 ? confPeriodMillis
|
||||
: config.getLong(PERIOD_MILLIS_KEY, periodSec * 1000);
|
||||
}
|
||||
|
||||
static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink,
|
||||
|
@ -552,7 +556,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
private void registerSystemSource() {
|
||||
MetricsConfig sysConf = sourceConfigs.get(MS_NAME);
|
||||
sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC,
|
||||
MetricsAnnotations.makeSource(this), injectedTags, period * 1000L,
|
||||
MetricsAnnotations.makeSource(this), injectedTags, period,
|
||||
sysConf == null ? config.subset(SOURCE_KEY) : sysConf);
|
||||
sysSource.start();
|
||||
}
|
||||
|
|
|
@ -991,6 +991,9 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-9069. TestNameNodeMetricsLogger failing -port in use.
|
||||
(stevel)
|
||||
|
||||
HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter
|
||||
is failing in trunk (Surendra Singh Lilhore via vinayakumarb)
|
||||
|
||||
Release 2.7.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -68,6 +68,7 @@ import java.util.concurrent.TimeoutException;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.UnhandledException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -133,6 +134,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.hdfs.tools.JMXGet;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -1863,4 +1865,22 @@ public class DFSTestUtil {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue)
|
||||
throws TimeoutException, InterruptedException {
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
try {
|
||||
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
|
||||
LOG.info("Waiting for " + metricName +
|
||||
" to reach value " + expectedValue +
|
||||
", current value = " + currentValue);
|
||||
return currentValue == expectedValue;
|
||||
} catch (Exception e) {
|
||||
throw new UnhandledException("Test failed due to unexpected exception", e);
|
||||
}
|
||||
}
|
||||
}, 1000, Integer.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -510,20 +510,7 @@ public abstract class LazyPersistTestCase {
|
|||
|
||||
protected void waitForMetric(final String metricName, final int expectedValue)
|
||||
throws TimeoutException, InterruptedException {
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
try {
|
||||
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
|
||||
LOG.info("Waiting for " + metricName +
|
||||
" to reach value " + expectedValue +
|
||||
", current value = " + currentValue);
|
||||
return currentValue == expectedValue;
|
||||
} catch (Exception e) {
|
||||
throw new UnhandledException("Test failed due to unexpected exception", e);
|
||||
}
|
||||
}
|
||||
}, 1000, Integer.MAX_VALUE);
|
||||
DFSTestUtil.waitForMetric(jmx, metricName, expectedValue);
|
||||
}
|
||||
|
||||
protected void triggerEviction(DataNode dn) {
|
||||
|
|
|
@ -72,6 +72,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
// for the previous one.
|
||||
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
|
||||
makeTestFile(path2, BLOCK_SIZE, true);
|
||||
waitForMetric("RamDiskBlocksEvicted", 1);
|
||||
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
|
||||
verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# syntax: [prefix].[source|sink].[instance].[options]
|
||||
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
|
||||
|
||||
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
# default sampling period, in seconds
|
||||
*.period=10
|
||||
*.periodMillis=100
|
||||
|
||||
# The namenode-metrics.out will contain metrics from all context
|
||||
#namenode.sink.file.filename=namenode-metrics.out
|
||||
# Specifying a special sampling period for namenode:
|
||||
#namenode.sink.*.period=8
|
||||
|
||||
#datanode.sink.file.filename=datanode-metrics.out
|
||||
|
||||
#resourcemanager.sink.file.filename=resourcemanager-metrics.out
|
||||
|
||||
#nodemanager.sink.file.filename=nodemanager-metrics.out
|
||||
|
||||
#mrappmaster.sink.file.filename=mrappmaster-metrics.out
|
||||
|
||||
#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
|
||||
|
||||
# the following example split metrics of different
|
||||
# context to different sinks (in this case files)
|
||||
#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
#nodemanager.sink.file_jvm.context=jvm
|
||||
#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
|
||||
#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
#nodemanager.sink.file_mapred.context=mapred
|
||||
#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
|
||||
|
||||
#
|
||||
# Below are for sending metrics to Ganglia
|
||||
#
|
||||
# for Ganglia 3.0 support
|
||||
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
|
||||
#
|
||||
# for Ganglia 3.1 support
|
||||
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
|
||||
|
||||
# *.sink.ganglia.period=10
|
||||
|
||||
# default for supportsparse is false
|
||||
# *.sink.ganglia.supportsparse=true
|
||||
|
||||
#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
|
||||
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
|
||||
|
||||
# Tag values to use for the ganglia prefix. If not defined no tags are used.
|
||||
# If '*' all tags are used. If Specifying multiple tags separate them with
|
||||
# commas. Note that the last segment of the property name is the context name.
|
||||
#
|
||||
#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
|
||||
#*.sink.ganglia.tagsForPrefix.dfs=
|
||||
#*.sink.ganglia.tagsForPrefix.rpc=
|
||||
#*.sink.ganglia.tagsForPrefix.mapred=
|
||||
|
||||
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
Loading…
Reference in New Issue