HBASE-10781 Remove hadoop-one-compat module and all references to hadoop1

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1579449 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2014-03-19 23:08:55 +00:00
parent 1f5c0d14fc
commit 37654f77a6
61 changed files with 62 additions and 4406 deletions

View File

@ -1,133 +0,0 @@
#!/bin/bash
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Generates a pom.xml with a haoop1 or a hadoop2 suffix which can
# then be used for generating hadoop1 or hadoop2 suitable hbase's
# fit for publishing to a maven repository. Use these poms also
# making tarballs to release. The original pom.xml is untouched.
#
# This script exists because we cannot figure how to publish
# into the local (or remote) repositories artifacts that can be
# used by downstream projects (with poms describing necessary
# includes). See HBASE-8224 and HBASE-8488 for background.
#
# Generation is done by replacing values in original pom and
# enabling appropriate profile using the '!' trick in the
# hbase.profile property (this is fragile!) so no need to specify
# profile on the command line. The original pom.xml should be
# what we maintain adding in new depdencies, etc., as needed.
# This script should make it through most any change to the
# original.
#
# Here is how you would build an hbase against hadoop2 and publish
# the artifacts to your local repo:
#
# First run this script passing in current project version and what
# version you would like the generated artifacts to have. Include
# either -hadoop1 if built against hadoop1 or -hadoop2 if build against
# hadoop2. These substrings are expected as part of the new version.
#
# $ bash -x ./dev-support/generate-hadoopX-poms.sh 0.95.2-SNAPSHOT 0.95.2-hadoop2-SNAPSHOT
#
# This will generate new poms beside current pom.xml made from the
# origin pom.xml but with a hadoop1 or hadoop2 suffix dependent on
# what you passed for a new version. Now build passing generated
# pom name as the pom mvn should use. For example, say we were building
# hbase against hadoop2:
#
#$ mvn clean install -DskipTests -f pom.xml.hadoop2
#
# TODO: Generate new poms into target dirs so doesn't pollute src tree.
# It is a little awkward to do since parent pom needs to be able to find
# the child modules and the child modules need to be able to get to the
# parent.
function usage {
echo "Usage: $0 CURRENT_VERSION NEW_VERSION"
echo "For example, $0 0.95.2-SNAPSHOT 0.95.2-hadoop1-SNAPSHOT"
echo "Presumes VERSION has hadoop1 or hadoop2 in it."
exit 1
}
if [[ "$#" -ne 2 ]]; then usage; fi
old_hbase_version="$1"
new_hbase_version="$2"
# Get hadoop version from the new hbase version
hadoop_version=`echo "$new_hbase_version" | sed -n 's/.*-\(hadoop[12]\).*/\1/p'`
if [[ -z $hadoop_version ]]; then usage ; fi
# Get dir to operate in
hbase_home="${HBASE_HOME}"
if [[ -z "$hbase_home" ]]; then
here="`dirname \"$0\"`" # relative
here="`( cd \"$here\" && pwd )`" # absolutized and normalized
if [ -z "$here" ] ; then
# error; for some reason, the path is not accessible
# to the script (e.g. permissions re-evaled after suid)
exit 1 # fail
fi
hbase_home="`dirname \"$here\"`"
fi
# Now figure which profile to activate.
h1=
h2=
default='<name>!hadoop.profile<\/name>'
notdefault='<name>hadoop.profile<\/name>'
case "${hadoop_version}" in
hadoop1)
h1="${default}"
h2="${notdefault}<value>2.0<\/value>"
;;
hadoop2)
h1="${notdefault}<value>1.1<\/value>"
h2="${default}"
;;
*) echo "Unknown ${hadoop_version}"
usage
;;
esac
pom=pom.xml
nupom="$pom.$hadoop_version"
poms=`find $hbase_home -name ${pom}`
for p in $poms; do
nuname="`dirname $p`/${nupom}"
# Now we do search and replace of explicit strings. The best
# way of seeing what the below does is by doing a diff between
# the original pom and the generated pom (pom.xml.hadoop1 or
# pom.xml.hadoop2). We replace the compat.module variable with
# either hbase-hadoop1-compat or hbase-hadoop2-compat, we
# replace the version string in all poms, we change modules
# to include reference to the non-standard pom name, we
# adjust relative paths so child modules can find the parent pom,
# and we enable/disable hadoop 1 and hadoop 2 profiles as
# appropriate removing a comment string too. We output the
# new pom beside the original.
sed -e "s/\${compat.module}/hbase-${hadoop_version}-compat/" \
-e "s/${old_hbase_version}/${new_hbase_version}/" \
-e "s/\(<module>[^<]*\)/\1\/${nupom}/" \
-e "s/\(relativePath>\.\.\)/\1\/${nupom}/" \
-e "s/<!--h1-->.*name>.*/${h1}/" \
-e "s/<!--h2-->.*<name>.*/${h2}/" \
-e '/--Below formatting for .*poms\.sh--/d' \
-e 's/\(<pomFileName>\)[^<]*/\1${nupom}/' \
$p > "$nuname"
done

View File

@ -44,7 +44,7 @@ if [ "$MAVEN_OPTS" != "" ]; then
fi
# Make a dir to save tgzs in.
d=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
d=`date -u +"%Y%m%dT%H%M%SZ"`
archivedir="${HBASE_HOME}/../`basename $0`.$d"
echo "Archive dir ${archivedir}"
mkdir -p "${archivedir}"
@ -54,10 +54,10 @@ function tgz_mover {
}
function deploy {
MAVEN_OPTS="${mvnopts}" ${mvn} -f pom.xml.$1 clean install -DskipTests -Prelease
MAVEN_OPTS="${mvnopts}" ${mvn} -f pom.xml.$1 install -DskipTests site assembly:single -Prelease
MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests -Prelease
MAVEN_OPTS="${mvnopts}" ${mvn} install -DskipTests site assembly:single -Prelease
tgz_mover
MAVEN_OPTS="${mvnopts}" ${mvn} -f pom.xml.$1 deploy -DskipTests -Papache-release
MAVEN_OPTS="${mvnopts}" ${mvn} deploy -DskipTests -Papache-release
}
# Build src tarball
@ -65,8 +65,7 @@ MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests assembly:single -Dassem
tgz_mover
# Now do the two builds, one for hadoop1, then hadoop2
deploy "hadoop1"
deploy "hadoop2"
deploy
echo "DONE"
echo "Check the content of ${archivedir}. If good, sign and push to people.apache.org"

View File

@ -19,8 +19,7 @@
*/
-->
<!--Shared by hadoop-one-compat.xml and hadoop-two-compat.xml.
Does common copying-->
<!--Shared. Does common copying-->
<component>
<fileSets>
<!--Copy over the site if built as docs dir -->

View File

@ -1,46 +0,0 @@
<?xml version="1.0"?>
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<!--This 'all' id is not appended to the produced bundle because we do this: http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers -->
<id>bin</id>
<formats>
<format>tar.gz</format>
</formats>
<componentDescriptors>
<componentDescriptor>src/main/assembly/components.xml</componentDescriptor>
</componentDescriptors>
<moduleSets>
<moduleSet>
<!-- Enable access to all projects in the current multimodule build. Eclipse
says this is an error, but builds from the command line just fine. -->
<useAllReactorProjects>true</useAllReactorProjects>
<!-- Binaries for the dependencies also go in the lib directory -->
<binaries>
<outputDirectory>lib</outputDirectory>
<unpack>false</unpack>
<dependencySets>
<dependencySet/>
</dependencySets>
</binaries>
</moduleSet>
</moduleSets>
</assembly>

View File

@ -1,147 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase</artifactId>
<groupId>org.apache.hbase</groupId>
<version>0.99.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>hbase-hadoop1-compat</artifactId>
<name>HBase - Hadoop One Compatibility</name>
<description> Interfaces to be implemented in order to smooth over hadoop version differences
</description>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<configuration>
<skip>true</skip>
</configuration>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven.assembly.version}</version>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<!-- Always skip the second part executions, since we only run simple unit
tests in this module -->
<executions>
<execution>
<id>secondPartTestsExecution</id>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
<configuration>
<skip>true</skip>
</configuration>
</execution>
</executions>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop-one.version}</version><!--$NO-MVN-MAN-VER$-->
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.yammer.metrics</groupId>
<artifactId>metrics-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop-one.version}</version><!--$NO-MVN-MAN-VER$-->
<optional>true</optional>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
</dependencies>
<profiles>
<!-- Skip the tests in this module -->
<profile>
<id>skipHadoopOneCompatTests</id>
<activation>
<property>
<name>skipHadoopOneCompatTests</name>
</property>
</activation>
<properties>
<surefire.skipFirstPart>true</surefire.skipFirstPart>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,59 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import java.util.HashMap;
public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory {
private static enum SourceStorage {
INSTANCE;
HashMap<String, MetricsHBaseServerSource>
sources =
new HashMap<String, MetricsHBaseServerSource>();
}
@Override
public MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper) {
return getSource(serverName, wrapper);
}
private static synchronized MetricsHBaseServerSource getSource(String serverName,
MetricsHBaseServerWrapper wrapper) {
String context = createContextName(serverName);
MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context);
if (source == null) {
//Create the source.
source = new MetricsHBaseServerSourceImpl(
METRICS_NAME,
METRICS_DESCRIPTION,
context.toLowerCase(),
context + METRICS_JMX_CONTEXT_SUFFIX, wrapper);
//Store back in storage
SourceStorage.INSTANCE.sources.put(context, source);
}
return source;
}
}

View File

@ -1,124 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
implements MetricsHBaseServerSource {
private final MetricsHBaseServerWrapper wrapper;
private final MetricMutableCounterLong authorizationSuccesses;
private final MetricMutableCounterLong authorizationFailures;
private final MetricMutableCounterLong authenticationSuccesses;
private final MetricMutableCounterLong authenticationFailures;
private final MetricMutableCounterLong sentBytes;
private final MetricMutableCounterLong receivedBytes;
private MetricMutableHistogram queueCallTime;
private MetricMutableHistogram processCallTime;
public MetricsHBaseServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MetricsHBaseServerWrapper wrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.wrapper = wrapper;
this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME,
AUTHORIZATION_SUCCESSES_DESC, 0l);
this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME,
AUTHORIZATION_FAILURES_DESC, 0l);
this.authenticationSuccesses = this.getMetricsRegistry().newCounter(
AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0l);
this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME,
AUTHENTICATION_FAILURES_DESC, 0l);
this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME,
SENT_BYTES_DESC, 0l);
this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME,
RECEIVED_BYTES_DESC, 0l);
this.queueCallTime = this.getMetricsRegistry().newHistogram(QUEUE_CALL_TIME_NAME,
QUEUE_CALL_TIME_DESC);
this.processCallTime = this.getMetricsRegistry().newHistogram(PROCESS_CALL_TIME_NAME,
PROCESS_CALL_TIME_DESC);
}
@Override
public void authorizationSuccess() {
authorizationSuccesses.incr();
}
@Override
public void authorizationFailure() {
authorizationFailures.incr();
}
@Override
public void authenticationFailure() {
authenticationFailures.incr();
}
@Override
public void authenticationSuccess() {
authenticationSuccesses.incr();
}
@Override
public void sentBytes(long count) {
this.sentBytes.incr(count);
}
@Override
public void receivedBytes(int count) {
this.receivedBytes.incr(count);
}
@Override
public void dequeuedCall(int qTime) {
queueCallTime.add(qTime);
}
@Override
public void processedCall(int processingTime) {
processCallTime.add(processingTime);
}
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (wrapper != null) {
mrb.addGauge(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC, wrapper.getTotalQueueSize())
.addGauge(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC, wrapper.getGeneralQueueLength())
.addGauge(REPLICATION_QUEUE_NAME,
REPLICATION_QUEUE_DESC, wrapper.getReplicationQueueLength())
.addGauge(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC, wrapper.getPriorityQueueLength())
.addGauge(NUM_OPEN_CONNECTIONS_NAME,
NUM_OPEN_CONNECTIONS_DESC, wrapper.getNumOpenConnections());
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -1,58 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.mapreduce;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
/**
* Utility methods to interact with a job.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class JobUtil {
private static final Log LOG = LogFactory.getLog(JobUtil.class);
protected JobUtil() {
super();
}
/**
* Initializes the staging directory and returns the path.
*
* @param conf system configuration
* @return staging directory path
* @throws IOException
* @throws InterruptedException
*/
public static Path getStagingDir(Configuration conf)
throws IOException, InterruptedException {
JobClient jobClient = new JobClient(new JobConf(conf));
return JobSubmissionFiles.getStagingDir(jobClient, conf);
}
}

View File

@ -1,74 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource {
private MetricMutableGaugeLong ritGauge;
private MetricMutableGaugeLong ritCountOverThresholdGauge;
private MetricMutableGaugeLong ritOldestAgeGauge;
private MetricMutableHistogram assignTimeHisto;
private MetricMutableHistogram bulkAssignTimeHisto;
public MetricsAssignmentManagerSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsAssignmentManagerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext, String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
super.init();
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
assignTimeHisto = metricsRegistry.newHistogram(ASSIGN_TIME_NAME);
bulkAssignTimeHisto = metricsRegistry.newHistogram(BULK_ASSIGN_TIME_NAME);
}
@Override
public void updateAssignmentTime(long time) {
assignTimeHisto.add(time);
}
@Override
public void updateBulkAssignTime(long time) {
bulkAssignTimeHisto.add(time);
}
public void setRIT(int ritCount) {
ritGauge.set(ritCount);
}
public void setRITCountOverThreshold(int ritCount) {
ritCountOverThresholdGauge.set(ritCount);
}
public void setRITOldestAge(long ritCount) {
ritOldestAgeGauge.set(ritCount);
}
}

View File

@ -1,69 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource {
private MetricMutableHistogram splitSizeHisto;
private MetricMutableHistogram splitTimeHisto;
private MetricMutableHistogram metaSplitTimeHisto;
private MetricMutableHistogram metaSplitSizeHisto;
public MetricsMasterFilesystemSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsMasterFilesystemSourceImpl(String metricsName,
String metricsDescription,
String metricsContext, String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
splitSizeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
splitTimeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
}
@Override
public void updateSplitTime(long time) {
splitTimeHisto.add(time);
}
@Override
public void updateSplitSize(long size) {
splitSizeHisto.add(size);
}
@Override
public void updateMetaWALSplitTime(long time) {
metaSplitTimeHisto.add(time);
}
@Override
public void updateMetaWALSplitSize(long size) {
metaSplitSizeHisto.add(size);
}
}

View File

@ -1,37 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
/**
* Factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
private static enum FactoryStorage {
INSTANCE;
MetricsMasterSource masterSource;
}
@Override
public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
if (FactoryStorage.INSTANCE.masterSource == null) {
FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
}
return FactoryStorage.INSTANCE.masterSource;
}
}

View File

@ -1,103 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop1 implementation of MetricsMasterSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
@InterfaceAudience.Private
public class MetricsMasterSourceImpl
extends BaseSourceImpl implements MetricsMasterSource {
private final MetricsMasterWrapper masterWrapper;
private MetricMutableCounterLong clusterRequestsCounter;
public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
}
public MetricsMasterSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MetricsMasterWrapper masterWrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.masterWrapper = masterWrapper;
}
@Override
public void init() {
super.init();
clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
}
public void incRequests(final int inc) {
this.clusterRequestsCounter.incr(inc);
}
/**
* Method to export all the metrics.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// masterWrapper can be null because this function is called inside of init.
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(MASTER_ACTIVE_TIME_NAME,
MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime())
.addGauge(MASTER_START_TIME_NAME,
MASTER_START_TIME_DESC, masterWrapper.getStartTime())
.addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
.tag(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC,
masterWrapper.getRegionServers())
.addGauge(NUM_REGION_SERVERS_NAME,
NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getNumRegionServers())
.tag(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC,
masterWrapper.getDeadRegionServers())
.addGauge(NUM_DEAD_REGION_SERVERS_NAME,
NUMBER_OF_DEAD_REGION_SERVERS_DESC,
masterWrapper.getNumDeadRegionServers())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, masterWrapper.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, masterWrapper.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, masterWrapper.getClusterId())
.tag(IS_ACTIVE_MASTER_NAME,
IS_ACTIVE_MASTER_DESC,
String.valueOf(masterWrapper.getIsActiveMaster()));
}
metricsRegistry.snapshot(metricsRecordBuilder, all);
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements MetricsSnapshotSource {
private MetricMutableHistogram snapshotTimeHisto;
private MetricMutableHistogram snapshotCloneTimeHisto;
private MetricMutableHistogram snapshotRestoreTimeHisto;
public MetricsSnapshotSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsSnapshotSourceImpl(String metricsName,
String metricsDescription,
String metricsContext, String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
snapshotTimeHisto = metricsRegistry.newHistogram(
SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC);
snapshotCloneTimeHisto = metricsRegistry.newHistogram(
SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC);
snapshotRestoreTimeHisto = metricsRegistry.newHistogram(
SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC);
}
@Override
public void updateSnapshotTime(long time) {
snapshotTimeHisto.add(time);
}
@Override
public void updateSnapshotCloneTime(long time) {
snapshotCloneTimeHisto.add(time);
}
@Override
public void updateSnapshotRestoreTime(long time) {
snapshotRestoreTimeHisto.add(time);
}
}

View File

@ -1,57 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsBalancerSourceImpl extends BaseSourceImpl implements MetricsBalancerSource{
private MetricMutableHistogram blanceClusterHisto;
private MetricMutableCounterLong miscCount;
public MetricsBalancerSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsBalancerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext, String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
blanceClusterHisto = metricsRegistry.newHistogram(BALANCE_CLUSTER);
miscCount = metricsRegistry.newCounter(MISC_INVOATION_COUNT, "", 0L);
}
@Override
public void updateBalanceCluster(long time) {
blanceClusterHisto.add(time);
}
@Override
public void incrMiscInvocations() {
miscCount.incr();
}
}

View File

@ -1,184 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.*;
import org.apache.hadoop.metrics2.source.JvmMetricsSource;
/**
* Hadoop 1 implementation of BaseSource (using metrics2 framework). It handles registration to
* DefaultMetricsSystem and creation of the metrics registry.
*
* All MetricsSource's in hbase-hadoop1-compat should derive from this class.
*/
public class BaseSourceImpl implements BaseSource, MetricsSource {
private static enum DefaultMetricsSystemInitializer {
INSTANCE;
private boolean inited = false;
private JvmMetricsSource jvmMetricsSource;
synchronized void init(String name) {
if (inited) return;
inited = true;
DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
jvmMetricsSource = JvmMetricsSource.create(name, "");
}
}
protected final DynamicMetricsRegistry metricsRegistry;
protected final String metricsName;
protected final String metricsDescription;
protected final String metricsContext;
protected final String metricsJmxContext;
public BaseSourceImpl(
String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
this.metricsName = metricsName;
this.metricsDescription = metricsDescription;
this.metricsContext = metricsContext;
this.metricsJmxContext = metricsJmxContext;
metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext);
DefaultMetricsSystemInitializer.INSTANCE.init(metricsName);
//Register this instance.
DefaultMetricsSystem.INSTANCE.registerSource(metricsJmxContext, metricsDescription, this);
init();
}
public void init() {
this.metricsRegistry.clearMetrics();
}
/**
* Set a single gauge to a value.
*
* @param gaugeName gauge name
* @param value the new value of the gauge.
*/
public void setGauge(String gaugeName, long value) {
MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value);
gaugeInt.set(value);
}
/**
* Add some amount to a gauge.
*
* @param gaugeName The name of the gauge to increment.
* @param delta The amount to increment the gauge by.
*/
public void incGauge(String gaugeName, long delta) {
MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
gaugeInt.incr(delta);
}
/**
* Decrease the value of a named gauge.
*
* @param gaugeName The name of the gauge.
* @param delta the ammount to subtract from a gauge value.
*/
public void decGauge(String gaugeName, long delta) {
MetricMutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l);
gaugeInt.decr(delta);
}
/**
* Increment a named counter by some value.
*
* @param key the name of the counter
* @param delta the ammount to increment
*/
public void incCounters(String key, long delta) {
MetricMutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l);
counter.incr(delta);
}
@Override
public void updateHistogram(String name, long value) {
MetricMutableHistogram histo = metricsRegistry.getHistogram(name);
histo.add(value);
}
@Override
public void updateQuantile(String name, long value) {
MetricMutableQuantiles histo = metricsRegistry.getQuantile(name);
histo.add(value);
}
/**
* Remove a named metric.
*
* @param key
*/
public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
JmxCacheBuster.clearJmxCache();
}
/**
* Method to export all the metrics.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
metricsRegistry.snapshot(mrb, all);
}
/**
* Used to get at the DynamicMetricsRegistry.
* @return DynamicMetricsRegistry
*/
public DynamicMetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
public String getMetricsContext() {
return metricsContext;
}
public String getMetricsDescription() {
return metricsDescription;
}
public String getMetricsJmxContext() {
return metricsJmxContext;
}
public String getMetricsName() {
return metricsName;
}
}

View File

@ -1,41 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.util.MBeans;
import javax.management.ObjectName;
/**
* Hadoop1 metrics2 implementation of an object that registers MBeans.
*/
public class MBeanSourceImpl implements MBeanSource {
/**
* Register an mbean with the underlying metrics system
* @param serviceName Metrics service/system name
* @param metricsName name of the metrics obejct to expose
* @param theMbean the actual MBean
* @return ObjectName from jmx
*/
@Override
public ObjectName register(String serviceName, String metricsName, Object theMbean) {
return MBeans.register(serviceName, metricsName, theMbean);
}
}

View File

@ -1,98 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import java.util.TreeSet;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
implements MetricsRegionAggregateSource {
private final Log LOG = LogFactory.getLog(this.getClass());
// lock to guard against concurrent access to regionSources
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final TreeSet<MetricsRegionSourceImpl> regionSources =
new TreeSet<MetricsRegionSourceImpl>();
public MetricsRegionAggregateSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsRegionAggregateSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void register(MetricsRegionSource source) {
lock.writeLock().lock();
try {
regionSources.add((MetricsRegionSourceImpl) source);
} finally {
lock.writeLock().unlock();
}
}
@Override
public void deregister(MetricsRegionSource source) {
lock.writeLock().lock();
try {
regionSources.remove(source);
} finally {
lock.writeLock().unlock();
}
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (regionSources != null) {
lock.readLock().lock();
try {
for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
regionMetricSource.snapshot(mrb, all);
}
} finally {
lock.readLock().unlock();
}
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper.
*/
public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
public static enum FactoryStorage {
INSTANCE;
private Object aggLock = new Object();
private MetricsRegionAggregateSourceImpl aggImpl;
}
private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
synchronized (FactoryStorage.INSTANCE.aggLock) {
if (FactoryStorage.INSTANCE.aggImpl == null) {
FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
}
return FactoryStorage.INSTANCE.aggImpl;
}
}
@Override
public MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) {
return new MetricsRegionServerSourceImpl(regionServerWrapper);
}
@Override
public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) {
return new MetricsRegionSourceImpl(wrapper, getAggregate());
}
}

View File

@ -1,214 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop1 implementation of MetricsRegionServerSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource {
final MetricsRegionServerWrapper rsWrap;
private final MetricHistogram putHisto;
private final MetricHistogram deleteHisto;
private final MetricHistogram getHisto;
private final MetricHistogram incrementHisto;
private final MetricHistogram appendHisto;
private final MetricHistogram replayHisto;
private final MetricMutableCounterLong slowPut;
private final MetricMutableCounterLong slowDelete;
private final MetricMutableCounterLong slowGet;
private final MetricMutableCounterLong slowIncrement;
private final MetricMutableCounterLong slowAppend;
public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
}
public MetricsRegionServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MetricsRegionServerWrapper rsWrap) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.rsWrap = rsWrap;
putHisto = getMetricsRegistry().newHistogram(MUTATE_KEY);
slowPut = getMetricsRegistry().newCounter(SLOW_MUTATE_KEY, SLOW_MUTATE_DESC, 0l);
deleteHisto = getMetricsRegistry().newHistogram(DELETE_KEY);
slowDelete = getMetricsRegistry().newCounter(SLOW_DELETE_KEY, SLOW_DELETE_DESC, 0l);
getHisto = getMetricsRegistry().newHistogram(GET_KEY);
slowGet = getMetricsRegistry().newCounter(SLOW_GET_KEY, SLOW_GET_DESC, 0l);
incrementHisto = getMetricsRegistry().newHistogram(INCREMENT_KEY);
slowIncrement = getMetricsRegistry().newCounter(SLOW_INCREMENT_KEY, SLOW_INCREMENT_DESC, 0l);
appendHisto = getMetricsRegistry().newHistogram(APPEND_KEY);
slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0l);
replayHisto = getMetricsRegistry().newHistogram(REPLAY_KEY);
}
@Override
public void updatePut(long t) {
putHisto.add(t);
}
@Override
public void updateDelete(long t) {
deleteHisto.add(t);
}
@Override
public void updateGet(long t) {
getHisto.add(t);
}
@Override
public void updateIncrement(long t) {
incrementHisto.add(t);
}
@Override
public void updateAppend(long t) {
appendHisto.add(t);
}
@Override
public void updateReplay(long t) {
replayHisto.add(t);
}
@Override
public void incrSlowPut() {
slowPut.incr();
}
@Override
public void incrSlowDelete() {
slowDelete.incr();
}
@Override
public void incrSlowGet() {
slowGet.incr();
}
@Override
public void incrSlowIncrement() {
slowIncrement.incr();
}
@Override
public void incrSlowAppend() {
slowAppend.incr();
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
.addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
.addGauge(HLOGFILE_COUNT, HLOGFILE_COUNT_DESC, rsWrap.getNumHLogFiles())
.addGauge(HLOGFILE_SIZE, HLOGFILE_SIZE_DESC, rsWrap.getHLogFileSize())
.addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
.addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
.addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
.addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode())
.addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
.addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
.addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
.addCounter(CHECK_MUTATE_FAILED_COUNT,
CHECK_MUTATE_FAILED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksFailed())
.addCounter(CHECK_MUTATE_PASSED_COUNT,
CHECK_MUTATE_PASSED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksPassed())
.addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
.addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
.addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
.addGauge(NUMBER_OF_MUTATIONS_WITHOUT_WAL,
NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC,
rsWrap.getNumMutationsWithoutWAL())
.addGauge(DATA_SIZE_WITHOUT_WAL,
DATA_SIZE_WITHOUT_WAL_DESC,
rsWrap.getDataInMemoryWithoutWAL())
.addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
.addGauge(COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getCompactionQueueSize())
.addGauge(LARGE_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getLargeCompactionQueueSize())
.addGauge(SMALL_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getSmallCompactionQueueSize())
.addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
.addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
.addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
.addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
.addCounter(BLOCK_CACHE_HIT_COUNT,
BLOCK_CACHE_HIT_COUNT_DESC,
rsWrap.getBlockCacheHitCount())
.addCounter(BLOCK_CACHE_MISS_COUNT,
BLOCK_COUNT_MISS_COUNT_DESC,
rsWrap.getBlockCacheMissCount())
.addCounter(BLOCK_CACHE_EVICTION_COUNT,
BLOCK_CACHE_EVICTION_COUNT_DESC,
rsWrap.getBlockCacheEvictedCount())
.addGauge(BLOCK_CACHE_HIT_PERCENT,
BLOCK_CACHE_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitPercent())
.addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitCachingPercent())
.addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -1,199 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
public class MetricsRegionSourceImpl implements MetricsRegionSource {
private final MetricsRegionWrapper regionWrapper;
private boolean closed = false;
private MetricsRegionAggregateSourceImpl agg;
private DynamicMetricsRegistry registry;
private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class);
private String regionNamePrefix;
private String regionPutKey;
private String regionDeleteKey;
private String regionGetKey;
private String regionIncrementKey;
private String regionAppendKey;
private String regionScanNextKey;
private MetricMutableCounterLong regionPut;
private MetricMutableCounterLong regionDelete;
private MetricMutableCounterLong regionIncrement;
private MetricMutableCounterLong regionAppend;
private MetricMutableHistogram regionGet;
private MetricMutableHistogram regionScanNext;
public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
MetricsRegionAggregateSourceImpl aggregate) {
this.regionWrapper = regionWrapper;
agg = aggregate;
agg.register(this);
LOG.debug("Creating new MetricsRegionSourceImpl for table " +
regionWrapper.getTableName() + " " + regionWrapper.getRegionName());
registry = agg.getMetricsRegistry();
regionNamePrefix = "namespace_" + regionWrapper.getNamespace() +
"_table_" + regionWrapper.getTableName() +
"_region_" + regionWrapper.getRegionName() +
"_metric_";
String suffix = "Count";
regionPutKey = regionNamePrefix + MetricsRegionServerSource.MUTATE_KEY + suffix;
regionPut = registry.getLongCounter(regionPutKey, 0l);
regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
regionAppend = registry.getLongCounter(regionAppendKey, 0l);
regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY;
regionGet = registry.newHistogram(regionGetKey);
regionScanNextKey = regionNamePrefix + MetricsRegionServerSource.SCAN_NEXT_KEY;
regionScanNext = registry.newHistogram(regionScanNextKey);
}
@Override
public void close() {
closed = true;
agg.deregister(this);
LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
registry.removeMetric(regionPutKey);
registry.removeMetric(regionDeleteKey);
registry.removeMetric(regionIncrementKey);
registry.removeMetric(regionAppendKey);
registry.removeMetric(regionGetKey);
registry.removeMetric(regionScanNextKey);
JmxCacheBuster.clearJmxCache();
}
@Override
public void updatePut() {
regionPut.incr();
}
@Override
public void updateDelete() {
regionDelete.incr();
}
@Override
public void updateGet(long getSize) {
regionGet.add(getSize);
}
@Override
public void updateScan(long scanSize) {
regionScanNext.add(scanSize);
}
@Override
public void updateIncrement() {
regionIncrement.incr();
}
@Override
public void updateAppend() {
regionAppend.incr();
}
@Override
public MetricsRegionAggregateSource getAggregateSource() {
return agg;
}
@Override
public int compareTo(MetricsRegionSource source) {
if (!(source instanceof MetricsRegionSourceImpl))
return -1;
MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source;
return this.regionWrapper.getRegionName()
.compareTo(impl.regionWrapper.getRegionName());
}
@Override
public boolean equals(Object obj) {
if (obj == this) return true;
if (!(obj instanceof MetricsRegionSourceImpl)) return false;
return compareTo((MetricsRegionSourceImpl)obj) == 0;
}
@Override
public int hashCode() {
return this.regionWrapper.getRegionName().hashCode();
}
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
if (closed) return;
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
MetricsRegionServerSource.STORE_COUNT_DESC,
this.regionWrapper.getNumStores());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
MetricsRegionServerSource.STOREFILE_COUNT_DESC,
this.regionWrapper.getNumStoreFiles());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
MetricsRegionServerSource.MEMSTORE_SIZE_DESC,
this.regionWrapper.getMemstoreSize());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC,
this.regionWrapper.getStoreFileSize());
mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
MetricsRegionServerSource.READ_REQUEST_COUNT_DESC,
this.regionWrapper.getReadRequestCount());
mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC,
this.regionWrapper.getWriteRequestCount());
mrb.addCounter(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
MetricsRegionSource.COMPACTIONS_COMPLETED_DESC,
this.regionWrapper.getNumCompactionsCompleted());
mrb.addCounter(regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT,
MetricsRegionSource.NUM_BYTES_COMPACTED_DESC,
this.regionWrapper.getNumBytesCompacted());
mrb.addCounter(regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT,
MetricsRegionSource.NUM_FILES_COMPACTED_DESC,
this.regionWrapper.getNumFilesCompacted());
}
}

View File

@ -1,75 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
/**
* Hadoop1 implementation of MetricsMasterSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsEditsReplaySourceImpl
extends BaseSourceImpl implements MetricsEditsReplaySource {
private static final Log LOG = LogFactory.getLog(MetricsEditsReplaySourceImpl.class.getName());
private MetricMutableHistogram replayTimeHisto;
private MetricMutableHistogram replayBatchSizeHisto;
private MetricMutableHistogram replayDataSizeHisto;
public MetricsEditsReplaySourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsEditsReplaySourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
super.init();
replayTimeHisto = metricsRegistry.newHistogram(REPLAY_TIME_NAME, REPLAY_TIME_DESC);
replayBatchSizeHisto =
metricsRegistry.newHistogram(REPLAY_BATCH_SIZE_NAME, REPLAY_BATCH_SIZE_DESC);
replayDataSizeHisto =
metricsRegistry.newHistogram(REPLAY_DATA_SIZE_NAME, REPLAY_DATA_SIZE_DESC);
}
@Override
public void updateReplayTime(long time) {
replayTimeHisto.add(time);
}
@Override
public void updateReplayBatchSize(long size) {
replayBatchSizeHisto.add(size);
}
@Override
public void updateReplayDataSize(long size) {
replayDataSizeHisto.add(size);
}
}

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Class that transitions metrics from HLog's MetricsWAL into the metrics subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern.
*/
public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSource {
private final MetricHistogram appendSizeHisto;
private final MetricHistogram appendTimeHisto;
private final MetricMutableCounterLong appendCount;
private final MetricMutableCounterLong slowAppendCount;
private final MetricHistogram syncTimeHisto;
public MetricsWALSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsWALSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
//Create and store the metrics that will be used.
appendTimeHisto = this.getMetricsRegistry().newHistogram(APPEND_TIME, APPEND_TIME_DESC);
appendSizeHisto = this.getMetricsRegistry().newHistogram(APPEND_SIZE, APPEND_SIZE_DESC);
appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0l);
slowAppendCount = this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0l);
syncTimeHisto = this.getMetricsRegistry().newHistogram(SYNC_TIME, SYNC_TIME_DESC);
}
@Override
public void incrementAppendSize(long size) {
appendSizeHisto.add(size);
}
@Override
public void incrementAppendTime(long time) {
appendTimeHisto.add(time);
}
@Override
public void incrementAppendCount() {
appendCount.incr();
}
@Override
public void incrementSlowAppendCount() {
slowAppendCount.incr();
}
@Override
public void incrementSyncTime(long time) {
syncTimeHisto.add(time);
}
}

View File

@ -1,42 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
* Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
MetricsReplicationSource {
public MetricsReplicationSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
MetricsReplicationSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
}

View File

@ -1,113 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
private MetricMutableCounterLong request;
private MetricMutableCounterLong sucGet;
private MetricMutableCounterLong sucPut;
private MetricMutableCounterLong sucDel;
private MetricMutableCounterLong sucScan;
private MetricMutableCounterLong fGet;
private MetricMutableCounterLong fPut;
private MetricMutableCounterLong fDel;
private MetricMutableCounterLong fScan;
public MetricsRESTSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
}
public MetricsRESTSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
super.init();
request = getMetricsRegistry().getLongCounter(REQUEST_KEY, 0l);
sucGet = getMetricsRegistry().getLongCounter(SUCCESSFUL_GET_KEY, 0l);
sucPut = getMetricsRegistry().getLongCounter(SUCCESSFUL_PUT_KEY, 0l);
sucDel = getMetricsRegistry().getLongCounter(SUCCESSFUL_DELETE_KEY, 0l);
sucScan = getMetricsRegistry().getLongCounter(SUCCESSFUL_SCAN_KEY, 0L);
fGet = getMetricsRegistry().getLongCounter(FAILED_GET_KEY, 0l);
fPut = getMetricsRegistry().getLongCounter(FAILED_PUT_KEY, 0l);
fDel = getMetricsRegistry().getLongCounter(FAILED_DELETE_KEY, 0l);
fScan = getMetricsRegistry().getLongCounter(FAILED_SCAN_KEY, 0l);
}
@Override
public void incrementRequests(int inc) {
request.incr(inc);
}
@Override
public void incrementSucessfulGetRequests(int inc) {
sucGet.incr(inc);
}
@Override
public void incrementSucessfulPutRequests(int inc) {
sucPut.incr(inc);
}
@Override
public void incrementSucessfulDeleteRequests(int inc) {
sucDel.incr(inc);
}
@Override
public void incrementFailedGetRequests(int inc) {
fGet.incr(inc);
}
@Override
public void incrementFailedPutRequests(int inc) {
fPut.incr(inc);
}
@Override
public void incrementFailedDeleteRequests(int inc) {
fDel.incr(inc);
}
@Override
public void incrementSucessfulScanRequests(int inc) {
sucScan.incr(inc);
}
@Override
public void incrementFailedScanRequests(int inc) {
fScan.incr(inc);
}
}

View File

@ -1,52 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift;
/**
* Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat
* library.
*/
public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
/**
* A singleton used to make sure that only one thrift metrics source per server type is ever
* created.
*/
private static enum FactoryStorage {
INSTANCE;
MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_ONE_METRICS_CONTEXT,
THRIFT_ONE_JMX_CONTEXT);
MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_TWO_METRICS_CONTEXT,
THRIFT_TWO_JMX_CONTEXT);
}
@Override
public MetricsThriftServerSource createThriftOneSource() {
return FactoryStorage.INSTANCE.thriftOne;
}
@Override
public MetricsThriftServerSource createThriftTwoSource() {
return FactoryStorage.INSTANCE.thriftTwo;
}
}

View File

@ -1,99 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
import org.apache.hadoop.metrics2.lib.MetricMutableStat;
/**
* Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource}
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
MetricsThriftServerSource {
private MetricMutableHistogram batchGetStat;
private MetricMutableHistogram batchMutateStat;
private MetricMutableHistogram queueTimeStat;
private MetricMutableHistogram thriftCallStat;
private MetricMutableHistogram thriftSlowCallStat;
private MetricMutableGaugeLong callQueueLenGauge;
public MetricsThriftServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void init() {
super.init();
batchGetStat = getMetricsRegistry().newHistogram(BATCH_GET_KEY);
batchMutateStat = getMetricsRegistry().newHistogram(BATCH_MUTATE_KEY);
queueTimeStat = getMetricsRegistry().newHistogram(TIME_IN_QUEUE_KEY);
thriftCallStat = getMetricsRegistry().newHistogram(THRIFT_CALL_KEY);
thriftSlowCallStat = getMetricsRegistry().newHistogram(SLOW_THRIFT_CALL_KEY);
callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0);
}
@Override
public void incTimeInQueue(long time) {
queueTimeStat.add(time);
}
@Override
public void setCallQueueLen(int len) {
callQueueLenGauge.set(len);
}
@Override
public void incNumRowKeysInBatchGet(int diff) {
batchGetStat.add(diff);
}
@Override
public void incNumRowKeysInBatchMutate(int diff) {
batchMutateStat.add(diff);
}
@Override
public void incMethodTime(String name, long time) {
MetricMutableHistogram s = getMetricsRegistry().getHistogram(name);
s.add(time);
}
@Override
public void incCall(long time) {
thriftCallStat.add(time);
}
@Override
public void incSlowCall(long time) {
thriftSlowCallStat.add(time);
}
}

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
/**
* JMX caches the beans that have been exported; even after the values are removed from hadoop's
* metrics system the keys and old values will still remain. This class stops and restarts the
* Hadoop metrics system, forcing JMX to clear the cache of exported metrics.
*
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
* are package private.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="LI_LAZY_INIT_STATIC",
justification="Yeah, its weird but its what we want")
public class JmxCacheBuster {
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
private static Object lock = new Object();
private static ScheduledFuture fut = null;
private static MetricsExecutor executor = new MetricsExecutorImpl();
/**
* For JMX to forget about all previously exported metrics.
*/
public static void clearJmxCache() {
//If there are more then 100 ms before the executor will run then everything should be merged.
if (fut == null || (!fut.isDone() && fut.getDelay(TimeUnit.MILLISECONDS) > 100)) return;
synchronized (lock) {
fut = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5, TimeUnit.SECONDS);
}
}
static class JmxCacheBusterRunnable implements Runnable {
@Override
public void run() {
LOG.trace("Clearing JMX mbean cache.");
// This is pretty extreme but it's the best way that
// I could find to get metrics to be removed.
try {
DefaultMetricsSystem.INSTANCE.stop();
DefaultMetricsSystem.INSTANCE.start();
} catch (Exception exception ) {
LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
}
}
}
}

View File

@ -1,444 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
/**
* An optional metrics registry class for creating and maintaining a
* collection of MetricsMutables, making writing metrics source easier.
* NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one
* feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class
* should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry.
* This implementation also provides handy methods for creating metrics dynamically.
* Another difference is that metricsMap & tagsMap implementation is substituted with
* concurrent map, as we allow dynamic metrics additions/removals.
*/
public class DynamicMetricsRegistry {
private final Log LOG = LogFactory.getLog(this.getClass());
/** key for the context tag */
public static final String CONTEXT_KEY = "context";
/** description for the context tag */
public static final String CONTEXT_DESC = "Metrics context";
private final ConcurrentMap<String, MetricMutable> metricsMap =
new ConcurrentHashMap<String, MetricMutable>();
private final ConcurrentMap<String, MetricsTag> tagsMap =
new ConcurrentHashMap<String, MetricsTag>();
private final String name;
private final MetricMutableFactory mf;
/**
* Construct the registry with a record name
* @param name of the record of the metrics
*/
public DynamicMetricsRegistry(String name) {
this.name = name;
this.mf = new MetricMutableFactory();
}
/**
* Construct the registry with a name and a metric factory
* @param name of the record of the metrics
* @param factory for creating new mutable metrics
*/
public DynamicMetricsRegistry(String name, MetricMutableFactory factory) {
this.name = name;
this.mf = factory;
}
/**
* @return the name of the metrics registry
*/
public String name() {
return name;
}
/**
* Get a metric by name
* @param name of the metric
* @return the metric object
*/
public MetricMutable get(String name) {
return metricsMap.get(name);
}
/**
* Create a mutable integer counter
* @param name of the metric
* @param description of the metric
* @param initValue of the metric
* @return a new counter object
*/
public MetricMutableCounterInt
newCounter(String name, String description, int initValue) {
MetricMutableCounterInt ret = mf.newCounter(name, description, initValue);
return addNewMetricIfAbsent(name, ret, MetricMutableCounterInt.class);
}
/**
* Create a mutable long integer counter
* @param name of the metric
* @param description of the metric
* @param initValue of the metric
* @return a new counter object
*/
public MetricMutableCounterLong
newCounter(String name, String description, long initValue) {
MetricMutableCounterLong ret = mf.newCounter(name, description, initValue);
return addNewMetricIfAbsent(name, ret, MetricMutableCounterLong.class);
}
/**
* Create a mutable integer gauge
* @param name of the metric
* @param description of the metric
* @param initValue of the metric
* @return a new gauge object
*/
public MetricMutableGaugeInt
newGauge(String name, String description, int initValue) {
MetricMutableGaugeInt ret = mf.newGauge(name, description, initValue);
return addNewMetricIfAbsent(name, ret, MetricMutableGaugeInt.class);
}
/**
* Create a mutable long integer gauge
* @param name of the metric
* @param description of the metric
* @param initValue of the metric
* @return a new gauge object
*/
public MetricMutableGaugeLong
newGauge(String name, String description, long initValue) {
MetricMutableGaugeLong ret = mf.newGauge(name, description, initValue);
return addNewMetricIfAbsent(name, ret, MetricMutableGaugeLong.class);
}
/**
* Create a mutable metric with stats
* @param name of the metric
* @param description of the metric
* @param sampleName of the metric (e.g., "ops")
* @param valueName of the metric (e.g., "time" or "latency")
* @param extended produce extended stat (stdev, min/max etc.) if true.
* @return a new metric object
*/
public MetricMutableStat newStat(String name, String description,
String sampleName, String valueName,
boolean extended) {
MetricMutableStat ret =
mf.newStat(name, description, sampleName, valueName, extended);
return addNewMetricIfAbsent(name, ret, MetricMutableStat.class);
}
/**
* Create a mutable metric with stats
* @param name of the metric
* @param description of the metric
* @param sampleName of the metric (e.g., "ops")
* @param valueName of the metric (e.g., "time" or "latency")
* @return a new metric object
*/
public MetricMutableStat newStat(String name, String description,
String sampleName, String valueName) {
return newStat(name, description, sampleName, valueName, false);
}
/**
* Create a mutable metric with stats using the name only
* @param name of the metric
* @return a new metric object
*/
public MetricMutableStat newStat(String name) {
return newStat(name, "", "ops", "time", false);
}
/**
* Create a new histogram.
* @param name Name of the histogram.
* @return A new MutableHistogram
*/
public MetricMutableHistogram newHistogram(String name) {
return newHistogram(name, "");
}
/**
* Create a new histogram.
* @param name The name of the histogram
* @param desc The description of the data in the histogram.
* @return A new MutableHistogram
*/
public MetricMutableHistogram newHistogram(String name, String desc) {
MetricMutableHistogram histo = new MetricMutableHistogram(name, desc);
return addNewMetricIfAbsent(name, histo, MetricMutableHistogram.class);
}
/**
* Create a new MutableQuantile(A more accurate histogram).
* @param name The name of the histogram
* @return a new MutableQuantile
*/
public MetricMutableQuantiles newQuantile(String name) {
return newQuantile(name, "");
}
/**
* Create a new MutableQuantile(A more accurate histogram).
* @param name The name of the histogram
* @param desc Description of the data.
* @return a new MutableQuantile
*/
public MetricMutableQuantiles newQuantile(String name, String desc) {
MetricMutableQuantiles histo = new MetricMutableQuantiles(name, desc);
return addNewMetricIfAbsent(name, histo, MetricMutableQuantiles.class);
}
/**
* Set the metrics context tag
* @param name of the context
* @return the registry itself as a convenience
*/
public DynamicMetricsRegistry setContext(String name) {
return tag(CONTEXT_KEY, CONTEXT_DESC, name);
}
/**
* Add a tag to the metrics
* @param name of the tag
* @param description of the tag
* @param value of the tag
* @return the registry (for keep adding tags)
*/
public DynamicMetricsRegistry tag(String name, String description, String value) {
return tag(name, description, value, false);
}
/**
* Add a tag to the metrics
* @param name of the tag
* @param description of the tag
* @param value of the tag
* @param override existing tag if true
* @return the registry (for keep adding tags)
*/
public DynamicMetricsRegistry tag(String name, String description, String value,
boolean override) {
MetricsTag tag = new MetricsTag(name, description, value);
if (!override) {
MetricsTag existing = tagsMap.putIfAbsent(name, tag);
if (existing != null) {
throw new MetricsException("Tag "+ name +" already exists!");
}
return this;
}
tagsMap.put(name, tag);
return this;
}
/**
* Get the tags
* @return the tags set
*/
public Set<Entry<String, MetricsTag>> tags() {
return tagsMap.entrySet();
}
/**
* Get the metrics
* @return the metrics set
*/
public Set<Entry<String, MetricMutable>> metrics() {
return metricsMap.entrySet();
}
/**
* Sample all the mutable metrics and put the snapshot in the builder
* @param builder to contain the metrics snapshot
* @param all get all the metrics even if the values are not changed.
*/
public void snapshot(MetricsRecordBuilder builder, boolean all) {
for (Entry<String, MetricsTag> entry : tags()) {
builder.add(entry.getValue());
}
for (Entry<String, MetricMutable> entry : metrics()) {
entry.getValue().snapshot(builder, all);
}
}
/**
* Removes metric by name
* @param name name of the metric to remove
*/
public void removeMetric(String name) {
metricsMap.remove(name);
}
/**
* Get a MetricMutableGaugeLong from the storage. If it is not there
* atomically put it.
*
* @param gaugeName name of the gauge to create or get.
* @param potentialStartingValue value of the new counter if we have to create it.
* @return a metric object
*/
public MetricMutableGaugeLong getLongGauge(String gaugeName,
long potentialStartingValue) {
//Try and get the guage.
MetricMutable metric = metricsMap.get(gaugeName);
//If it's not there then try and put a new one in the storage.
if (metric == null) {
//Create the potential new gauge.
MetricMutableGaugeLong newGauge = mf.newGauge(gaugeName, "",
potentialStartingValue);
// Try and put the gauge in. This is atomic.
metric = metricsMap.putIfAbsent(gaugeName, newGauge);
//If the value we get back is null then the put was successful and we will
// return that. Otherwise gaugeLong should contain the thing that was in
// before the put could be completed.
if (metric == null) {
return newGauge;
}
}
if (!(metric instanceof MetricMutableGaugeLong)) {
throw new MetricsException("Metric already exists in registry for metric name: " +
name + " and not of type MetricMutableGaugeLong");
}
return (MetricMutableGaugeLong) metric;
}
/**
* Get a MetricMutableCounterLong from the storage. If it is not there
* atomically put it.
*
* @param counterName Name of the counter to get
* @param potentialStartingValue starting value if we have to create a new counter
* @return a metric object
*/
public MetricMutableCounterLong getLongCounter(String counterName,
long potentialStartingValue) {
//See getLongGauge for description on how this works.
MetricMutable counter = metricsMap.get(counterName);
if (counter == null) {
MetricMutableCounterLong newCounter =
mf.newCounter(counterName, "", potentialStartingValue);
counter = metricsMap.putIfAbsent(counterName, newCounter);
if (counter == null) {
return newCounter;
}
}
if (!(counter instanceof MetricMutableCounterLong)) {
throw new MetricsException("Metric already exists in registry for metric name: " +
name + "and not of type MetricMutableCounterLong");
}
return (MetricMutableCounterLong) counter;
}
public MetricMutableHistogram getHistogram(String histoName) {
//See getLongGauge for description on how this works.
MetricMutable histo = metricsMap.get(histoName);
if (histo == null) {
MetricMutableHistogram newHisto =
new MetricMutableHistogram(histoName, "");
histo = metricsMap.putIfAbsent(histoName, newHisto);
if (histo == null) {
return newHisto;
}
}
if (!(histo instanceof MetricMutableHistogram)) {
throw new MetricsException("Metric already exists in registry for metric name: " +
name + "and not of type MetricMutableHistogram");
}
return (MetricMutableHistogram) histo;
}
public MetricMutableQuantiles getQuantile(String histoName) {
//See getLongGauge for description on how this works.
MetricMutable histo = metricsMap.get(histoName);
if (histo == null) {
MetricMutableQuantiles newHisto =
new MetricMutableQuantiles(histoName, "");
histo = metricsMap.putIfAbsent(histoName, newHisto);
if (histo == null) {
return newHisto;
}
}
if (!(histo instanceof MetricMutableQuantiles)) {
throw new MetricsException("Metric already exists in registry for metric name: " +
name + "and not of type MetricMutableQuantiles");
}
return (MetricMutableQuantiles) histo;
}
private<T extends MetricMutable> T
addNewMetricIfAbsent(String name,
T ret,
Class<T> metricClass) {
//If the value we get back is null then the put was successful and we will
// return that. Otherwise metric should contain the thing that was in
// before the put could be completed.
MetricMutable metric = metricsMap.putIfAbsent(name, ret);
if (metric == null) {
return ret;
}
return returnExistingWithCast(metric, metricClass, name);
}
private<T> T returnExistingWithCast(MetricMutable metric,
Class<T> metricClass, String name) {
if (!metricClass.isAssignableFrom(metric.getClass())) {
throw new MetricsException("Metric already exists in registry for metric name: " +
name + " and not of type " + metricClass);
}
return (T) metric;
}
public void clearMetrics() {
metricsMap.clear();
}
}

View File

@ -1,121 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import java.util.concurrent.atomic.AtomicLong;
/**
* A histogram implementation that runs in constant space, and exports to hadoop's metrics2 system.
*/
public class MetricMutableHistogram extends MetricMutable implements MetricHistogram {
private static final int DEFAULT_SAMPLE_SIZE = 2046;
// the bias towards sampling from more recent data.
// Per Cormode et al. an alpha of 0.015 strongly biases to the last 5 minutes
private static final double DEFAULT_ALPHA = 0.015;
private final Sample sample;
private final AtomicLong min;
private final AtomicLong max;
private final AtomicLong sum;
private final AtomicLong count;
public MetricMutableHistogram(String name, String description) {
super(name, description);
sample = new ExponentiallyDecayingSample(DEFAULT_SAMPLE_SIZE, DEFAULT_ALPHA);
count = new AtomicLong();
min = new AtomicLong(Long.MAX_VALUE);
max = new AtomicLong(Long.MIN_VALUE);
sum = new AtomicLong();
}
public void add(final long val) {
setChanged();
count.incrementAndGet();
sample.update(val);
setMax(val);
setMin(val);
sum.getAndAdd(val);
}
private void setMax(final long potentialMax) {
boolean done = false;
while (!done) {
final long currentMax = max.get();
done = currentMax >= potentialMax
|| max.compareAndSet(currentMax, potentialMax);
}
}
private void setMin(long potentialMin) {
boolean done = false;
while (!done) {
final long currentMin = min.get();
done = currentMin <= potentialMin
|| min.compareAndSet(currentMin, potentialMin);
}
}
public long getMax() {
if (count.get() > 0) {
return max.get();
}
return 0L;
}
public long getMin() {
if (count.get() > 0) {
return min.get();
}
return 0L;
}
public double getMean() {
long cCount = count.get();
if (cCount > 0) {
return sum.get() / (double) cCount;
}
return 0.0;
}
@Override
public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) {
if (all || changed()) {
clearChanged();
final Snapshot s = sample.getSnapshot();
metricsRecordBuilder.addCounter(name + NUM_OPS_METRIC_NAME, "", count.get());
metricsRecordBuilder.addGauge(name + MIN_METRIC_NAME, "", getMin());
metricsRecordBuilder.addGauge(name + MAX_METRIC_NAME, "", getMax());
metricsRecordBuilder.addGauge(name + MEAN_METRIC_NAME, "", getMean());
metricsRecordBuilder.addGauge(name + MEDIAN_METRIC_NAME, "", s.getMedian());
metricsRecordBuilder.addGauge(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, "", s.get75thPercentile());
metricsRecordBuilder.addGauge(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, "", s.get95thPercentile());
metricsRecordBuilder.addGauge(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, "", s.get99thPercentile());
}
}
}

View File

@ -1,138 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
import org.apache.hadoop.metrics2.util.MetricSampleQuantiles;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Watches a stream of long values, maintaining online estimates of specific quantiles with provably
* low error bounds. This is particularly useful for accurate high-percentile (e.g. 95th, 99th)
* latency metrics.
*/
@InterfaceAudience.Private
public class MetricMutableQuantiles extends MetricMutable implements MetricHistogram {
static final MetricQuantile[] quantiles = {new MetricQuantile(0.50, 0.050),
new MetricQuantile(0.75, 0.025), new MetricQuantile(0.90, 0.010),
new MetricQuantile(0.95, 0.005), new MetricQuantile(0.99, 0.001)};
static final String[] quantilesSuffix = {"_Median",
"_75th_percentile", "_90th_percentile",
"_95th_percentile", "_99th_percentile"};
private final int interval;
private MetricSampleQuantiles estimator;
private long previousCount = 0;
private MetricsExecutor executor;
protected Map<MetricQuantile, Long> previousSnapshot = null;
/**
* Instantiates a new {@link MetricMutableQuantiles} for a metric that rolls itself over on the
* specified time interval.
*
* @param name of the metric
* @param description long-form textual description of the metric
* @param sampleName type of items in the stream (e.g., "Ops")
* @param valueName type of the values
* @param interval rollover interval (in seconds) of the estimator
*/
public MetricMutableQuantiles(String name, String description, String sampleName,
String valueName, int interval) {
super(name, description);
estimator = new MetricSampleQuantiles(quantiles);
executor = new MetricsExecutorImpl();
this.interval = interval;
executor.getExecutor().scheduleAtFixedRate(new RolloverSample(this),
interval,
interval,
TimeUnit.SECONDS);
}
public MetricMutableQuantiles(String name, String description) {
this(name, description, "Ops", "", 60);
}
@Override
public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
if (all || changed()) {
builder.addCounter(name + "NumOps", description, previousCount);
for (int i = 0; i < quantiles.length; i++) {
long newValue = 0;
// If snapshot is null, we failed to update since the window was empty
if (previousSnapshot != null) {
newValue = previousSnapshot.get(quantiles[i]);
}
builder.addGauge(name + quantilesSuffix[i], description, newValue);
}
if (changed()) {
clearChanged();
}
}
}
public synchronized void add(long value) {
estimator.insert(value);
}
public int getInterval() {
return interval;
}
/** Runnable used to periodically roll over the internal {@link org.apache.hadoop.metrics2.util.MetricSampleQuantiles} every interval. */
private static class RolloverSample implements Runnable {
MetricMutableQuantiles parent;
public RolloverSample(MetricMutableQuantiles parent) {
this.parent = parent;
}
@Override
public void run() {
synchronized (parent) {
try {
parent.previousCount = parent.estimator.getCount();
parent.previousSnapshot = parent.estimator.snapshot();
} catch (IOException e) {
// Couldn't get a new snapshot because the window was empty
parent.previousCount = 0;
parent.previousSnapshot = null;
}
parent.estimator.clear();
}
parent.setChanged();
}
}
}

View File

@ -1,66 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by MetricMutableQuantiles{@link MetricMutableQuantiles}
*/
public class MetricsExecutorImpl implements MetricsExecutor {
@Override
public ScheduledExecutorService getExecutor() {
return ExecutorSingleton.INSTANCE.scheduler;
}
@Override
public void stop() {
if (!getExecutor().isShutdown()) {
getExecutor().shutdown();
}
}
private enum ExecutorSingleton {
INSTANCE;
private final ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutorThreadFactory("HBase-Metrics2-"));
}
private static class ThreadPoolExecutorThreadFactory implements ThreadFactory {
private final String name;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private ThreadPoolExecutorThreadFactory(String name) {
this.name = name;
}
@Override
public Thread newThread(Runnable runnable) {
Thread t = new Thread(runnable, name + threadNumber.getAndIncrement());
t.setDaemon(true);
return t;
}
}
}

View File

@ -1,60 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.util;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Specifies a quantile (with error bounds) to be watched by a
* {@link MetricSampleQuantiles} object.
*/
@InterfaceAudience.Private
public class MetricQuantile {
public final double quantile;
public final double error;
public MetricQuantile(double quantile, double error) {
this.quantile = quantile;
this.error = error;
}
@Override
public boolean equals(Object aThat) {
if (this == aThat) {
return true;
}
if (!(aThat instanceof MetricQuantile)) {
return false;
}
MetricQuantile that = (MetricQuantile) aThat;
long qbits = Double.doubleToLongBits(quantile);
long ebits = Double.doubleToLongBits(error);
return qbits == Double.doubleToLongBits(that.quantile)
&& ebits == Double.doubleToLongBits(that.error);
}
@Override
public int hashCode() {
return (int) (Double.doubleToLongBits(quantile) ^ Double
.doubleToLongBits(error));
}
}

View File

@ -1,307 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.util;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.ListIterator;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm
* for streaming calculation of targeted high-percentile epsilon-approximate
* quantiles.
*
* This is a generalization of the earlier work by Greenwald and Khanna (GK),
* which essentially allows different error bounds on the targeted quantiles,
* which allows for far more efficient calculation of high-percentiles.
*
* See: Cormode, Korn, Muthukrishnan, and Srivastava
* "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005
*
* Greenwald and Khanna,
* "Space-efficient online computation of quantile summaries" in SIGMOD 2001
*
*/
@InterfaceAudience.Private
public class MetricSampleQuantiles {
/**
* Total number of items in stream
*/
private long count = 0;
/**
* Current list of sampled items, maintained in sorted order with error bounds
*/
private LinkedList<SampleItem> samples;
/**
* Buffers incoming items to be inserted in batch. Items are inserted into
* the buffer linearly. When the buffer fills, it is flushed into the samples
* array in its entirety.
*/
private long[] buffer = new long[500];
private int bufferCount = 0;
/**
* Array of Quantiles that we care about, along with desired error.
*/
private final MetricQuantile quantiles[];
public MetricSampleQuantiles(MetricQuantile[] quantiles) {
this.quantiles = Arrays.copyOf(quantiles, quantiles.length);
this.samples = new LinkedList<SampleItem>();
}
/**
* Specifies the allowable error for this rank, depending on which quantiles
* are being targeted.
*
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide
* the range of this rank can be.
*
* @param rank
* the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
double minError = size + 1;
for (MetricQuantile q : quantiles) {
double error;
if (rank <= q.quantile * size) {
error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile);
} else {
error = (2.0 * q.error * rank) / q.quantile;
}
if (error < minError) {
minError = error;
}
}
return minError;
}
/**
* Add a new value from the stream.
*
* @param v
*/
synchronized public void insert(long v) {
buffer[bufferCount] = v;
bufferCount++;
count++;
if (bufferCount == buffer.length) {
insertBatch();
compress();
}
}
/**
* Merges items from buffer into the samples array in one pass.
* This is more efficient than doing an insert on every item.
*/
private void insertBatch() {
if (bufferCount == 0) {
return;
}
Arrays.sort(buffer, 0, bufferCount);
// Base case: no samples
int start = 0;
if (samples.size() == 0) {
SampleItem newItem = new SampleItem(buffer[0], 1, 0);
samples.add(newItem);
start++;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem item = it.next();
for (int i = start; i < bufferCount; i++) {
long v = buffer[i];
while (it.nextIndex() < samples.size() && item.value < v) {
item = it.next();
}
// If we found that bigger item, back up so we insert ourselves before it
if (item.value > v) {
it.previous();
}
// We use different indexes for the edge comparisons, because of the above
// if statement that adjusts the iterator
int delta;
if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) {
delta = 0;
} else {
delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1;
}
SampleItem newItem = new SampleItem(v, 1, delta);
it.add(newItem);
item = newItem;
}
bufferCount = 0;
}
/**
* Try to remove extraneous items from the set of sampled items. This checks
* if an item is unnecessary based on the desired error bounds, and merges it
* with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem next = it.next();
while (it.hasNext()) {
prev = next;
next = it.next();
if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) {
next.g += prev.g;
// Remove prev. it.remove() kills the last thing returned.
it.previous();
it.previous();
it.remove();
// it.next() is now equal to next, skip it back forward again
it.next();
}
}
}
/**
* Get the estimated value at the specified quantile.
*
* @param quantile Queried quantile, e.g. 0.50 or 0.99.
* @return Estimated value at that quantile.
*/
private long query(double quantile) throws IOException {
if (samples.size() == 0) {
throw new IOException("No samples present");
}
int rankMin = 0;
int desired = (int) (quantile * count);
for (int i = 1; i < samples.size(); i++) {
SampleItem prev = samples.get(i - 1);
SampleItem cur = samples.get(i);
rankMin += prev.g;
if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) {
return prev.value;
}
}
// edge case of wanting max value
return samples.get(samples.size() - 1).value;
}
/**
* Get a snapshot of the current values of all the tracked quantiles.
*
* @return snapshot of the tracked quantiles
* @throws IOException
* if no items have been added to the estimator
*/
synchronized public Map<MetricQuantile, Long> snapshot() throws IOException {
// flush the buffer first for best results
insertBatch();
Map<MetricQuantile, Long> values = new HashMap<MetricQuantile, Long>(quantiles.length);
for (int i = 0; i < quantiles.length; i++) {
values.put(quantiles[i], query(quantiles[i].quantile));
}
return values;
}
/**
* Returns the number of items that the estimator has processed
*
* @return count total number of items processed
*/
synchronized public long getCount() {
return count;
}
/**
* Returns the number of samples kept by the estimator
*
* @return count current number of samples
*/
synchronized public int getSampleCount() {
return samples.size();
}
/**
* Resets the estimator, clearing out all previously inserted items
*/
synchronized public void clear() {
count = 0;
bufferCount = 0;
samples.clear();
}
/**
* Describes a measured value passed to the estimator, tracking additional
* metadata required by the CKMS algorithm.
*/
private static class SampleItem {
/**
* Value of the sampled item (e.g. a measured latency value)
*/
public final long value;
/**
* Difference between the lowest possible rank of the previous item, and
* the lowest possible rank of this item.
*
* The sum of the g of all previous items yields this item's lower bound.
*/
public int g;
/**
* Difference between the item's greatest possible rank and lowest possible
* rank.
*/
public final int delta;
public SampleItem(long value, int lowerDelta, int delta) {
this.value = value;
this.g = lowerDelta;
this.delta = delta;
}
@Override
public String toString() {
return String.format("%d, %d, %d", value, g, delta);
}
}
}

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.MetricsMasterFilesystemSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.MetricsSnapshotSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.balancer.MetricsBalancerSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.metrics.MBeanSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.metrics2.lib.MetricsExecutorImpl

View File

@ -1,43 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* Compatibility shim layer implementation for Hadoop-1.
*/
public class HadoopShimsImpl implements HadoopShims {
/**
* Returns a TaskAttemptContext instance created from the given parameters.
* @param job an instance of o.a.h.mapreduce.Job
* @param taskId an identifier for the task attempt id. Should be parsable by
* TaskAttemptId.forName()
* @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext
*/
@Override
@SuppressWarnings("unchecked")
public <T, J> T createTestTaskAttemptContext(J job, String taskId) {
Job j = (Job)job;
return (T)new TaskAttemptContext(j.getConfiguration(), TaskAttemptID.forName(taskId));
}
}

View File

@ -1,44 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSource;
import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MetricsMasterSourceImpl
*/
public class TestMetricsMasterSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
.getInstance(MetricsMasterSourceFactory.class);
MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
assertTrue(masterSource instanceof MetricsMasterSourceImpl);
assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
}

View File

@ -1,91 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
/**
* Test of the default BaseSource implementation for hadoop 1
*/
public class TestBaseSourceImpl {
private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
public void testSetGauge() throws Exception {
String key = "testset";
bmsi.setGauge(key, 100);
MetricMutableGaugeLong g = (MetricMutableGaugeLong) bmsi.metricsRegistry.get(key);
assertEquals(key, g.name);
bmsi.setGauge(key, 110);
assertSame(g, bmsi.metricsRegistry.get(key));
}
@Test
public void testIncGauge() throws Exception {
String key = "testincgauge";
bmsi.incGauge(key, 100);
MetricMutableGaugeLong g = (MetricMutableGaugeLong) bmsi.metricsRegistry.get(key);
assertEquals(key, g.name);
bmsi.incGauge(key, 10);
assertSame(g, bmsi.metricsRegistry.get(key));
}
@Test
public void testDecGauge() throws Exception {
String key = "testdec";
bmsi.decGauge(key, 100);
MetricMutableGaugeLong g = (MetricMutableGaugeLong) bmsi.metricsRegistry.get(key);
assertEquals(key, g.name);
bmsi.decGauge(key, 100);
assertSame(g, bmsi.metricsRegistry.get(key));
}
@Test
public void testIncCounters() throws Exception {
String key = "testinccounter";
bmsi.incCounters(key, 100);
MetricMutableCounterLong c = (MetricMutableCounterLong) bmsi.metricsRegistry.get(key);
assertEquals(key, c.name);
bmsi.incCounters(key, 100);
assertSame(c, bmsi.metricsRegistry.get(key));
}
@Test
public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrm", 100);
bmsi.removeMetric("testrm");
assertNull(bmsi.metricsRegistry.get("testrm"));
}
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MetricsRegionServerSourceImpl
*/
public class TestMetricsRegionServerSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionServerSource serverSource =
metricsRegionServerSourceFactory.createServer(null);
assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
assertSame(metricsRegionServerSourceFactory,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionServerSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
}
}

View File

@ -1,121 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMetricsRegionSourceImpl {
@Test
public void testCompareTo() throws Exception {
MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
assertEquals(0, one.compareTo(oneClone));
assertTrue( one.compareTo(two) < 0);
assertTrue( two.compareTo(one) > 0);
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class);
}
static class RegionWrapperStub implements MetricsRegionWrapper {
private String regionName;
public RegionWrapperStub(String regionName) {
this.regionName = regionName;
}
@Override
public String getTableName() {
return null;
}
@Override
public String getNamespace() {
return null;
}
@Override
public String getRegionName() {
return this.regionName;
}
@Override
public long getNumStores() {
return 0;
}
@Override
public long getNumStoreFiles() {
return 0;
}
@Override
public long getMemstoreSize() {
return 0;
}
@Override
public long getStoreFileSize() {
return 0;
}
@Override
public long getReadRequestCount() {
return 0;
}
@Override
public long getWriteRequestCount() {
return 0;
}
@Override
public long getNumFilesCompacted() {
return 0;
}
@Override
public long getNumBytesCompacted() {
return 0;
}
@Override
public long getNumCompactionsCompleted() {
return 0;
}
}
}

View File

@ -1,37 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
public class TestMetricsWALSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsWALSource walSource =
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
assertTrue(walSource instanceof MetricsWALSourceImpl);
assertSame(walSource,
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class));
}
}

View File

@ -1,39 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* Test to make sure that MetricsReplicationSourceImpl is hooked up to ServiceLoader
*/
public class TestReplicationMetricsSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsReplicationSource rms = CompatibilitySingletonFactory
.getInstance(MetricsReplicationSource.class);
assertTrue(rms instanceof MetricsReplicationSourceImpl);
}
}

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.rest.MetricsRESTSource;
import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test for hadoop1's version of MetricsRESTSource
*/
public class TestRESTMetricsSourceImpl {
@Test
public void ensureCompatRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
}
}

View File

@ -1,224 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.test;
import org.apache.hadoop.hbase.metrics.BaseSource;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.Metric;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.*;
/**
* A helper class that will allow tests to get into hadoop1's metrics2 values.
*/
public class MetricsAssertHelperImpl implements MetricsAssertHelper {
private Map<String, String> tags = new HashMap<String, String>();
private Map<String, Number> gauges = new HashMap<String, Number>();
private Map<String, Long> counters = new HashMap<String, Long>();
public class MockMetricsBuilder implements MetricsBuilder {
@Override
public MetricsRecordBuilder addRecord(String s) {
return new MockRecordBuilder();
}
}
public class MockRecordBuilder extends MetricsRecordBuilder {
@Override
public MetricsRecordBuilder tag(String s, String s1, String s2) {
tags.put(canonicalizeMetricName(s), s2);
return this;
}
@Override
public MetricsRecordBuilder add(MetricsTag metricsTag) {
tags.put(canonicalizeMetricName(metricsTag.name()), metricsTag.value());
return this;
}
@Override
public MetricsRecordBuilder setContext(String s) {
return this;
}
@Override
public MetricsRecordBuilder addCounter(String s, String s1, int i) {
counters.put(canonicalizeMetricName(s), Long.valueOf(i));
return this;
}
@Override
public MetricsRecordBuilder addCounter(String s, String s1, long l) {
counters.put(canonicalizeMetricName(s), Long.valueOf(l));
return this;
}
@Override
public MetricsRecordBuilder addGauge(String s, String s1, int i) {
gauges.put(canonicalizeMetricName(s), Long.valueOf(i));
return this;
}
@Override
public MetricsRecordBuilder addGauge(String s, String s1, long l) {
gauges.put(canonicalizeMetricName(s), Long.valueOf(l));
return this;
}
@Override
public MetricsRecordBuilder addGauge(String s, String s1, float v) {
gauges.put(canonicalizeMetricName(s), Double.valueOf(v));
return this;
}
@Override
public MetricsRecordBuilder addGauge(String s, String s1, double v) {
gauges.put(canonicalizeMetricName(s), Double.valueOf(v));
return this;
}
@Override
public MetricsRecordBuilder add(Metric metric) {
gauges.put(canonicalizeMetricName(metric.name()), metric.value());
return this;
}
}
@Override
public void init() {
// In hadoop 1 there's no minicluster mode so there's nothing to do here.
}
@Override
public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull("Should get counter "+cName + " but did not",counters.get(cName));
return counters.get(cName).longValue();
}
@Override
public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName));
return gauges.get(cName).doubleValue();
}
@Override
public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName));
return gauges.get(cName).longValue();
}
private void reset() {
tags.clear();
gauges.clear();
counters.clear();
}
private void getMetrics(BaseSource source) {
reset();
if (!(source instanceof MetricsSource)) {
assertTrue("The Source passed must be a MetricsSource", false);
}
MetricsSource impl = (MetricsSource) source;
impl.getMetrics(new MockMetricsBuilder(), true);
}
private String canonicalizeMetricName(String in) {
return in.toLowerCase().replaceAll("[^A-Za-z0-9 ]", "");
}
}

View File

@ -1,55 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test the hadoop 1 version of MetricsThriftServerSourceFactory
*/
public class TestThriftServerMetricsSourceFactoryImpl {
@Test
public void testCompatabilityRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
}
@Test
public void testCreateThriftOneSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
}
@Test
public void testCreateThriftTwoSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
}
}

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.HadoopShimsImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.test.MetricsAssertHelperImpl

119
pom.xml
View File

@ -899,7 +899,6 @@
<compileSource>1.6</compileSource>
<!-- Dependencies -->
<hadoop-two.version>2.3.0</hadoop-two.version>
<hadoop-one.version>1.2.1</hadoop-one.version>
<commons-cli.version>1.2</commons-cli.version>
<commons-codec.version>1.7</commons-codec.version>
<!-- pretty outdated -->
@ -1504,124 +1503,6 @@
profiles with activation properties matching the profile here.
Generally, it should be sufficient to copy the first
few lines of the profile you want to match. -->
<!-- profile against Hadoop 1.1.x. -->
<profile>
<id>hadoop-1.1</id>
<activation>
<property>
<!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
<!--h1--><name>hadoop.profile</name><value>1.1</value>
</property>
</activation>
<modules>
<module>hbase-hadoop1-compat</module>
</modules>
<properties>
<hadoop.version>${hadoop-one.version}</hadoop.version>
<compat.module>hbase-hadoop1-compat</compat.module>
<assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop.version}</version>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<optional>true</optional>
<scope>test</scope>
</dependency>
</dependencies>
</dependencyManagement>
</profile>
<!-- profile for building against Hadoop 1.0.x: -->
<profile>
<id>hadoop-1.0</id>
<activation>
<property>
<name>hadoop.profile</name>
<value>1.0</value>
</property>
</activation>
<modules>
<module>hbase-hadoop1-compat</module>
</modules>
<properties>
<hadoop.version>1.0.4</hadoop.version>
<!-- Need to set this for the Hadoop 1 compat module -->
<hadoop-one.version>${hadoop.version}</hadoop-one.version>
<compat.module>hbase-hadoop1-compat</compat.module>
<assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop.version}</version>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<optional>true</optional>
<scope>test</scope>
</dependency>
</dependencies>
</dependencyManagement>
</profile>
<!-- profile for building against Hadoop 2.0.x
This is the default.
-->

View File

@ -196,23 +196,34 @@ mvn compile -Dcompile-protobuf -Dprotoc.path=/opt/local/bin/protoc
<section xml:id="releasing">
<title>Releasing Apache HBase</title>
<para>HBase 0.96.x will run on hadoop 1.x or hadoop 2.x but building, you
must choose which to build against; we cannot make a single HBase binary
to run against both hadoop1 and hadoop2. Since we include the Hadoop we were built
<para>HBase 0.96.x will run on hadoop 1.x or hadoop 2.x. HBase 0.98 will run on
both also (but HBase 0.98 deprecates use of hadoop 1). HBase 1.x will NOT
run on hadoop 1. In what follows, we make a distinction between HBase 1.x
builds and the awkward process involved building HBase 0.96/0.98 for either
hadoop 1 or hadoop 2 targets.
</para>
<section><title>Building against HBase 0.96-0.98</title>
<para>Building 0.98 and 0.96, you must choose which hadoop to build against;
we cannot make a single HBase binary that can run against both hadoop1 and
hadoop2. Since we include the Hadoop we were built
against -- so we can do standalone mode -- the set of modules included
in the tarball changes dependent on whether the hadoop1 or hadoop2 target chosen.
You can tell which HBase you have -- whether it is for hadoop1 or hadoop2
by looking at the version; the HBase for hadoop1 will include 'hadoop1' in its
version. Ditto for hadoop2.
in the tarball changes dependent on whether the hadoop1 or hadoop2 target
is chosen. You can tell which HBase you have -- whether it is for hadoop1
or hadoop2 by looking at the version; the HBase for hadoop1 bundle will
include 'hadoop1' in its version. Ditto for hadoop2.
</para>
<para>Maven, our build system, natively will not let you have a single product
built against different dependencies. Its understandable. But neither could
built against different dependencies. It is understandable. But neither could
we convince maven to change the set of included modules and write out
the correct poms w/ appropriate dependencies even though we have two
build targets; one for hadoop1 and another for hadoop2. So, there is a prestep
required. This prestep takes as input the current pom.xmls and it generates hadoop1 or
hadoop2 versions. You then reference these generated poms when you build. Read on
for examples</para>
hadoop2 versions using a script in <filename>dev-tools</filename> called
<filename>generate-hadoopX-poms.sh</filename>. You then reference these generated
poms when you build. For now, just be aware of the difference between HBase 1.x
builds and those of HBase 0.96-0.98. Below we will come back to this difference
when we list out build instructions.</para>
</section>
<para xml:id="mvn.settings.file">Publishing to maven requires you sign the artifacts you want to upload. To have the
build do this for you, you need to make sure you have a properly configured
<filename>settings.xml</filename> in your local repository under <filename>.m2</filename>.
@ -260,10 +271,12 @@ mvn compile -Dcompile-protobuf -Dprotoc.path=/opt/local/bin/protoc
<section xml:id="maven.release">
<title>Making a Release Candidate</title>
<para>I'll explain by running through the process. See later in this section for more detail on particular steps.
These instructions are for building HBase 1.0.x. For building earlier versions, the process is different. See this section
under the respective release documentation folders.
</para>
<para>If you are making a point release (for example to quickly address a critical incompatability or security
problem) off of a release branch instead of a development branch the tagging instructions are slightly different.
I'll prefix those special steps with "Point Release Only".
I'll prefix those special steps with <emphasis>Point Release Only</emphasis>.
</para>
<para>I would advise before you go about making a release candidate, do a practise run by deploying a SNAPSHOT.
@ -272,7 +285,8 @@ mvn compile -Dcompile-protobuf -Dprotoc.path=/opt/local/bin/protoc
our hbase-it integration test suite for a few hours to 'burn in' the near-candidate bits.
</para>
<note>
<para>Point Release Only: At this point you should make svn copy of the previous release branch (ex: 0.96.1) with
<title>Point Release Only</title>
<para>At this point you should make svn copy of the previous release branch (ex: 0.96.1) with
the new point release tag (e.g. 0.96.1.1 tag). Any commits with changes or mentioned below for the point release
should be appled to the new tag.
</para>
@ -281,14 +295,11 @@ $ svn copy http://svn.apache.org/repos/asf/hbase/tags/0.96.1 http://svn.apache.o
$ svn checkout http://svn.apache.org/repos/asf/hbase/tags/0.96.1.1
</programlisting></para>
</note>
<para>The script <filename>dev-support/make_rc.sh</filename> automates most of this. It does all but the close of the
staging repository up in apache maven, the checking of the produced artifacts to ensure they are 'good' -- e.g.
undoing the produced tarballs, eyeballing them to make sure they look right then starting and checking all is
running properly -- and then the signing and pushing of the tarballs to people.apache.org. Familiarize yourself
by all that is involved by reading the below before resorting to this release candidate-making script.</para>
<para>The <link xlink:href="http://wiki.apache.org/hadoop/HowToRelease">Hadoop How To Release</link> wiki
page informs much of the below and may have more detail on particular sections so it is worth review.</para>
<para>Update CHANGES.txt with the changes since the last release.
Make sure the URL to the JIRA points to the properly location listing fixes for this release.
Adjust the version in all the poms appropriately. If you are making a release candidate, you must
@ -303,77 +314,48 @@ $ svn checkout http://svn.apache.org/repos/asf/hbase/tags/0.96.1.1
Update the documentation under <filename>src/main/docbkx</filename>. This usually involves copying the
latest from trunk making version-particular adjustments to suit this release candidate version.
</para>
<para>Now, build the src tarball. This tarball is hadoop version independent. It is just the pure src code and documentation without an hadoop1 or hadoop2 taint.
<para>Now, build the src tarball. This tarball is hadoop version independent. It is just the pure src code and documentation without a particular hadoop taint, etc.
Add the <varname>-Prelease</varname> profile when building; it checks files for licenses and will fail the build if unlicensed files present.
<programlisting>$ MAVEN_OPTS="-Xmx2g" mvn clean install -DskipTests assembly:single -Dassembly.file=hbase-assembly/src/main/assembly/src.xml -Prelease</programlisting>
Undo the tarball and make sure it looks good. A good test for the src tarball being 'complete' is to see if
you can build new tarballs from this source bundle. For example:
<programlisting>$ tar xzf hbase-0.96.0-src.tar.gz
$ cd hbase-0.96.0
$ bash ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop1-SNAPSHOT
$ bash ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop2-SNAPSHOT
$ export MAVEN=/home/stack/bin/mvn/bin/mvn
$ MAVEN_OPTS="-Xmx3g" $MAVEN -f pom.xml.hadoop1 clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
# Check the produced bin tarball is good -- run it, eyeball it, etc.
$ MAVEN_OPTS="-Xmx3g" $MAVEN -f pom.xml.hadoop2 clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
# Check the produced bin tarball is good -- run it, eyeball it, etc.</programlisting>
you can build new tarballs from this source bundle.
If the source tarball is good, save it off to a <emphasis>version directory</emphasis>, i.e a directory somewhere where you are collecting
all of the tarballs you will publish as part of the release candidate. For example if we were building a
hbase-0.96.0 release candidate, we might call the directory <filename>hbase-0.96.0RC0</filename>. Later
we will publish this directory as our release candidate up on people.apache.org/~you.
we will publish this directory as our release candidate up on people.apache.org/~YOU.
</para>
<para>Now we are into the making of the hadoop1 and hadoop2 specific binary builds. Lets do hadoop1 first.
First generate the hadoop1 poms.
<note>
<para>We cannot use maven to publish what is in essence two hbase artifacts both of the same version only
one is for hadoop1 and the other for hadoop2. So, we generate hadoop1 and hadoop2 particular poms
from the checked-in pom using a dev-tool script and we run two builds; one for hadoop1 artifacts
and one for the hadoop2 artifacts.
</para>
</note>
See the <filename>generate-hadoopX-poms.sh</filename> script usage for what it expects by way of arguments.
You will find it in the <filename>dev-support</filename> subdirectory. In the below, we generate hadoop1 poms with a version
of <varname>0.96.0-hadoop1</varname> (the script will look for a version of <varname>0.96.0</varname> in the current <filename>pom.xml</filename>).
<programlisting>$ ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop1</programlisting>
The script will work silently if all goes well. It will drop a <filename>pom.xml.hadoop1</filename> beside all <filename>pom.xml</filename>s in all modules.
</para>
<para>Now build the hadoop1 tarball. Note how we reference the new <filename>pom.xml.hadoop1</filename> explicitly.
We also add the <varname>-Prelease</varname> profile when building; it checks files for licenses and will fail the build if unlicensed files present.
<para>Now lets build the binary tarball.
Add the <varname>-Prelease</varname> profile when building; it checks files for licenses and will fail the build if unlicensed files present.
Do it in two steps. First install into the local repository and then generate documentation and assemble the tarball
(Otherwise build complains that hbase modules are not in maven repo when we try to do it all in the one go especially on fresh repo).
It seems that you need the install goal in both steps.
<programlisting>$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop1 clean install -DskipTests -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop1 install -DskipTests site assembly:single -Prelease</programlisting>
Undo the generated tarball and check it out. Look at doc. and see if it runs, etc. Are the set of modules appropriate: e.g. do we have a hbase-hadoop2-compat in the hadoop1 tarball?
<programlisting>$ MAVEN_OPTS="-Xmx3g" mvn clean install -DskipTests -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn install -DskipTests site assembly:single -Prelease</programlisting>
Undo the generated tarball and check it out. Look at doc. and see if it runs, etc.
If good, copy the tarball to the above mentioned <emphasis>version directory</emphasis>.
</para>
<note><para>Point Release Only: The following step that creates a new tag can be skipped since you've already created the point release tag</para></note>
<note><title>Point Release Only</title><para>The following step that creates a new tag can be skipped since you've already created the point release tag</para></note>
<para>I'll tag the release at this point since its looking good. If we find an issue later, we can delete the tag and start over. Release needs to be tagged when we do next step.</para>
<para>Now deploy hadoop1 hbase to mvn. Do the mvn deploy and tgz for a particular version all together in the one go else if you flip between hadoop1 and hadoop2 builds,
you might mal-publish poms and hbase-default.xml's (the version interpolations won't match).
<para>Now deploy hbase to the apache maven repository.
This time we use the <varname>apache-release</varname> profile instead of just <varname>release</varname> profile when doing mvn deploy;
it will invoke the apache pom referenced by our poms. It will also sign your artifacts published to mvn as long as your settings.xml in your local <filename>.m2</filename>
repository is configured correctly (your <filename>settings.xml</filename> adds your gpg password property to the apache profile).
<programlisting>$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop1 deploy -DskipTests -Papache-release</programlisting>
The last command above copies all artifacts for hadoop1 up to a temporary staging apache mvn repo in an 'open' state.
We'll need to do more work on these maven artifacts to make them generally available but before we do that,
lets get the hadoop2 build to the same stage as this hadoop1 build.
<programlisting>$ MAVEN_OPTS="-Xmx3g" mvn deploy -DskipTests -Papache-release</programlisting>
The last command above copies all artifacts up to a temporary staging apache mvn repo in an 'open' state.
We'll need to do more work on these maven artifacts to make them generally available.
</para>
<para>Lets do the hadoop2 artifacts (read above hadoop1 section closely before coming here because we don't repeat explaination in the below).
<programlisting># Generate the hadoop2 poms.
$ ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop2
# Install the hbase hadoop2 jars into local repo then build the doc and tarball
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop2 clean install -DskipTests -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop2 install -DskipTests site assembly:single -Prelease
# Undo the tgz and check it out. If good, copy the tarball to your 'version directory'. Now deploy to mvn.
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop2 deploy -DskipTests -Papache-release
</programlisting>
</para>
<para>The script <filename>dev-support/make_rc.sh</filename> automates alot of the above listed release steps.
It does not do the modification of the CHANGES.txt for the release, the close of the
staging repository up in apache maven (human intervention is needed here), the checking of
the produced artifacts to ensure they are 'good' -- e.g. undoing the produced tarballs, eyeballing them to make
sure they look right then starting and checking all is running properly -- and then the signing and pushing of
the tarballs to people.apache.org but it does the other stuff; it can come in handy.
</para>
<para>Now lets get back to what is up in maven. We should now have two sets of artifacts up in the apache
maven staging area both in the 'open' state (they may both be under the one staging if they were pushed to maven around the same time).
While in this 'open' state you can check out what you've published to make sure all is good. To do this, login at repository.apache.org
<para>Now lets get back to what is up in maven. Our artifacts should be up in maven repository in the staging area
in the 'open' state. While in this 'open' state you can check out what you've published to make sure all is good.
To do this, login at repository.apache.org
using your apache id. Find your artifacts in the staging repository. Browse the content. Make sure all artifacts made it up
and that the poms look generally good. If it checks out, 'close' the repo. This will make the artifacts publically available.
You will receive an email with the URL to give out for the temporary staging repository for others to use trying out this new
@ -385,7 +367,7 @@ or borked, just delete the 'open' staged artifacts.
<para>
See the <link xlink:href="https://github.com/saintstack/hbase-downstreamer">hbase-downstreamer</link> test for a simple
example of a project that is downstream of hbase an depends on it.
Check it out and run its simple test to make sure maven hbase-hadoop1 and hbase-hadoop2 are properly deployed to the maven repository.
Check it out and run its simple test to make sure maven artifacts are properly deployed to the maven repository.
Be sure to edit the pom to point at the proper staging repo. Make sure you are pulling from the repo when tests run and that you are not
getting from your local repo (pass -U or delete your local repo content and check maven is pulling from remote out of the staging repo).
</para>
@ -407,7 +389,7 @@ or borked, just delete the 'open' staged artifacts.
directly and are immediately available. Making a SNAPSHOT release, this is what you want to happen.</para>
<para>
At this stage we have three tarballs in our 'version directory' and two sets of artifacts up in maven in staging area in the
At this stage we have two tarballs in our 'version directory' and a set of artifacts up in maven in staging area in the
'closed' state publically available in a temporary staging repository whose URL you should have gotten in an email.
The above mentioned script, <filename>make_rc.sh</filename> does all of the above for you minus the check of the artifacts built,
the closing of the staging repository up in maven, and the tagging of the release. If you run the script, do your checks at this
@ -433,19 +415,9 @@ $ rsync -av 0.96.0RC0 people.apache.org:public_html
<title>Publishing a SNAPSHOT to maven</title>
<para>Make sure your <filename>settings.xml</filename> is set up properly (see above for how).
Make sure the hbase version includes <varname>-SNAPSHOT</varname> as a suffix. Here is how I published SNAPSHOTS of
a checked that had an hbase version of 0.96.0 in its poms.
First we generated the hadoop1 poms with a version that has a <varname>-SNAPSHOT</varname> suffix.
We then installed the build into the local repository. Then we deploy this build to apache. See the output for the location
up in apache to where the snapshot is copied. Notice how add the <varname>release</varname> profile
when install locally -- to find files that are without proper license -- and then the <varname>apache-release</varname>
profile to deploy to the apache maven repository.
<programlisting>$ ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop1-SNAPSHOT
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop1 clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop1 -DskipTests deploy -Papache-release</programlisting>
Next, do the same to publish the hadoop2 artifacts.
<programlisting>$ ./dev-support/generate-hadoopX-poms.sh 0.96.0 0.96.0-hadoop2-SNAPSHOT
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop2 clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn -f pom.xml.hadoop2 deploy -DskipTests -Papache-release</programlisting>
a release that had an hbase version of 0.96.0 in its poms.
<programlisting>$ MAVEN_OPTS="-Xmx3g" mvn clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
$ MAVEN_OPTS="-Xmx3g" mvn -DskipTests deploy -Papache-release</programlisting>
</para>
<para>The <filename>make_rc.sh</filename> script mentioned above in the
(see <xref linkend="maven.release"/>) can help you publish <varname>SNAPSHOTS</varname>.