HBASE-9194 Break HMaster metrics into multiple contexts
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1514513 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4bfc15f2b9
commit
01bdee029a
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSource;
|
||||
|
||||
public interface MetricsAssignmentManagerSource extends BaseSource {
|
||||
|
||||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "AssignmentManger";
|
||||
|
||||
/**
|
||||
* The context metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "master";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase master assingment manager.";
|
||||
|
||||
String RIT_COUNT_NAME = "ritCount";
|
||||
String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
|
||||
String RIT_OLDEST_AGE_NAME = "ritOldestAge";
|
||||
String ASSIGN_TIME_NAME = "assign";
|
||||
String BULK_ASSIGN_TIME_NAME = "bulkAssign";
|
||||
|
||||
void updateAssignmentTime(long time);
|
||||
|
||||
void updateBulkAssignTime(long time);
|
||||
|
||||
/**
|
||||
* Set the number of regions in transition.
|
||||
*
|
||||
* @param ritCount count of the regions in transition.
|
||||
*/
|
||||
void setRIT(int ritCount);
|
||||
|
||||
/**
|
||||
* Set the count of the number of regions that have been in transition over the threshold time.
|
||||
*
|
||||
* @param ritCountOverThreshold number of regions in transition for longer than threshold.
|
||||
*/
|
||||
void setRITCountOverThreshold(int ritCountOverThreshold);
|
||||
|
||||
/**
|
||||
* Set the oldest region in transition.
|
||||
*
|
||||
* @param age age of the oldest RIT.
|
||||
*/
|
||||
void setRITOldestAge(long age);
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSource;
|
||||
|
||||
public interface MetricsMasterFileSystemSource extends BaseSource {
|
||||
|
||||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "FileSystem";
|
||||
|
||||
/**
|
||||
* The context metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "master";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase master file system.";
|
||||
|
||||
String META_SPLIT_TIME_NAME = "metaHlogSplitTime";
|
||||
String META_SPLIT_SIZE_NAME = "metaHlogSplitSize";
|
||||
String SPLIT_TIME_NAME = "hlogSplitTime";
|
||||
String SPLIT_SIZE_NAME = "hlogSplitSize";
|
||||
|
||||
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
|
||||
String META_SPLIT_SIZE_DESC = "Size of META HLog files being split";
|
||||
String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
|
||||
String SPLIT_SIZE_DESC = "Size of HLog files being split";
|
||||
|
||||
|
||||
void updateMetaWALSplitTime(long time);
|
||||
|
||||
void updateMetaWALSplitSize(long size);
|
||||
|
||||
void updateSplitTime(long time);
|
||||
|
||||
void updateSplitSize(long size);
|
||||
|
||||
}
|
|
@ -55,17 +55,9 @@ public interface MetricsMasterSource extends BaseSource {
|
|||
String SERVER_NAME_NAME = "serverName";
|
||||
String CLUSTER_ID_NAME = "clusterId";
|
||||
String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
|
||||
String SPLIT_TIME_NAME = "hlogSplitTime";
|
||||
String SPLIT_SIZE_NAME = "hlogSplitSize";
|
||||
String SNAPSHOT_TIME_NAME = "snapshotTime";
|
||||
String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime";
|
||||
String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime";
|
||||
String META_SPLIT_TIME_NAME = "metaHlogSplitTime";
|
||||
String META_SPLIT_SIZE_NAME = "metaHlogSplitSize";
|
||||
|
||||
|
||||
String CLUSTER_REQUESTS_NAME = "clusterRequests";
|
||||
String RIT_COUNT_NAME = "ritCount";
|
||||
String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
|
||||
String RIT_OLDEST_AGE_NAME = "ritOldestAge";
|
||||
String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
|
||||
String MASTER_START_TIME_DESC = "Master Start Time";
|
||||
String AVERAGE_LOAD_DESC = "AverageLoad";
|
||||
|
@ -75,13 +67,8 @@ public interface MetricsMasterSource extends BaseSource {
|
|||
String SERVER_NAME_DESC = "Server Name";
|
||||
String CLUSTER_ID_DESC = "Cluster Id";
|
||||
String IS_ACTIVE_MASTER_DESC = "Is Active Master";
|
||||
String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
|
||||
String SPLIT_SIZE_DESC = "Size of HLog files being split";
|
||||
String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()";
|
||||
String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()";
|
||||
String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()";
|
||||
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
|
||||
String META_SPLIT_SIZE_DESC = "Size of META HLog files being split";
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Increment the number of requests the cluster has seen.
|
||||
|
@ -90,39 +77,7 @@ public interface MetricsMasterSource extends BaseSource {
|
|||
*/
|
||||
void incRequests(final int inc);
|
||||
|
||||
/**
|
||||
* Set the number of regions in transition.
|
||||
*
|
||||
* @param ritCount count of the regions in transition.
|
||||
*/
|
||||
void setRIT(int ritCount);
|
||||
|
||||
/**
|
||||
* Set the count of the number of regions that have been in transition over the threshold time.
|
||||
*
|
||||
* @param ritCountOverThreshold number of regions in transition for longer than threshold.
|
||||
*/
|
||||
void setRITCountOverThreshold(int ritCountOverThreshold);
|
||||
|
||||
/**
|
||||
* Set the oldest region in transition.
|
||||
*
|
||||
* @param age age of the oldest RIT.
|
||||
*/
|
||||
void setRITOldestAge(long age);
|
||||
|
||||
void updateSplitTime(long time);
|
||||
|
||||
void updateSplitSize(long size);
|
||||
|
||||
void updateSnapshotTime(long time);
|
||||
|
||||
void updateSnapshotCloneTime(long time);
|
||||
|
||||
void updateSnapshotRestoreTime(long time);
|
||||
|
||||
void updateMetaWALSplitTime(long time);
|
||||
|
||||
void updateMetaWALSplitSize(long size);
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSource;
|
||||
|
||||
public interface MetricsSnapshotSource extends BaseSource {
|
||||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Snapshots";
|
||||
|
||||
/**
|
||||
* The context metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "master";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase master server";
|
||||
|
||||
String SNAPSHOT_TIME_NAME = "snapshotTime";
|
||||
String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime";
|
||||
String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime";
|
||||
String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()";
|
||||
String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()";
|
||||
String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()";
|
||||
|
||||
void updateSnapshotTime(long time);
|
||||
|
||||
void updateSnapshotCloneTime(long time);
|
||||
|
||||
void updateSnapshotRestoreTime(long time);
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSource;
|
||||
|
||||
public interface MetricsBalancerSource extends BaseSource {
|
||||
|
||||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Balancer";
|
||||
|
||||
/**
|
||||
* The context metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "master";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
|
||||
String BALANCE_CLUSTER = "balancerCluster";
|
||||
String MISC_INVOATION_COUNT = "miscInvocationCount";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase master balancer";
|
||||
|
||||
void updateBalanceCluster(long time);
|
||||
|
||||
void incrMiscInvocations();
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
|
||||
|
||||
public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource {
|
||||
|
||||
private MetricMutableGaugeLong ritGauge;
|
||||
private MetricMutableGaugeLong ritCountOverThresholdGauge;
|
||||
private MetricMutableGaugeLong ritOldestAgeGauge;
|
||||
private MetricMutableHistogram assignTimeHisto;
|
||||
private MetricMutableHistogram bulkAssignTimeHisto;
|
||||
|
||||
public MetricsAssignmentManagerSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsAssignmentManagerSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
super.init();
|
||||
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
|
||||
ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
|
||||
ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
|
||||
assignTimeHisto = metricsRegistry.newHistogram(ASSIGN_TIME_NAME);
|
||||
bulkAssignTimeHisto = metricsRegistry.newHistogram(BULK_ASSIGN_TIME_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateAssignmentTime(long time) {
|
||||
assignTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBulkAssignTime(long time) {
|
||||
bulkAssignTimeHisto.add(time);
|
||||
}
|
||||
|
||||
public void setRIT(int ritCount) {
|
||||
ritGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITCountOverThreshold(int ritCount) {
|
||||
ritCountOverThresholdGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITOldestAge(long ritCount) {
|
||||
ritOldestAgeGauge.set(ritCount);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
|
||||
|
||||
public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource {
|
||||
|
||||
private MetricMutableHistogram splitSizeHisto;
|
||||
private MetricMutableHistogram splitTimeHisto;
|
||||
private MetricMutableHistogram metaSplitTimeHisto;
|
||||
private MetricMutableHistogram metaSplitSizeHisto;
|
||||
|
||||
public MetricsMasterFilesystemSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsMasterFilesystemSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
splitSizeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
|
||||
splitTimeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
|
||||
metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
|
||||
metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitTime(long time) {
|
||||
splitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitSize(long size) {
|
||||
splitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitTime(long time) {
|
||||
metaSplitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitSize(long size) {
|
||||
metaSplitSizeHisto.add(size);
|
||||
}
|
||||
}
|
|
@ -40,16 +40,7 @@ public class MetricsMasterSourceImpl
|
|||
|
||||
private final MetricsMasterWrapper masterWrapper;
|
||||
private MetricMutableCounterLong clusterRequestsCounter;
|
||||
private MetricMutableGaugeLong ritGauge;
|
||||
private MetricMutableGaugeLong ritCountOverThresholdGauge;
|
||||
private MetricMutableGaugeLong ritOldestAgeGauge;
|
||||
private MetricMutableHistogram splitTimeHisto;
|
||||
private MetricMutableHistogram splitSizeHisto;
|
||||
private MetricMutableStat snapshotTimeHisto;
|
||||
private MetricMutableStat snapshotCloneTimeHisto;
|
||||
private MetricMutableStat snapshotRestoreTimeHisto;
|
||||
private MetricMutableHistogram metaSplitTimeHisto;
|
||||
private MetricMutableHistogram metaSplitSizeHisto;
|
||||
|
||||
|
||||
public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
|
||||
|
@ -68,72 +59,12 @@ public class MetricsMasterSourceImpl
|
|||
public void init() {
|
||||
super.init();
|
||||
clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
|
||||
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
|
||||
ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
|
||||
ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
|
||||
splitSizeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
|
||||
splitTimeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
|
||||
snapshotTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC, "Ops", "Time", true);
|
||||
snapshotCloneTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC, "Ops", "Time", true);
|
||||
snapshotRestoreTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC, "Ops", "Time", true);
|
||||
metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
|
||||
metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
|
||||
}
|
||||
|
||||
public void incRequests(final int inc) {
|
||||
this.clusterRequestsCounter.incr(inc);
|
||||
}
|
||||
|
||||
public void setRIT(int ritCount) {
|
||||
ritGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITCountOverThreshold(int ritCount) {
|
||||
ritCountOverThresholdGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITOldestAge(long ritCount) {
|
||||
ritOldestAgeGauge.set(ritCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitTime(long time) {
|
||||
splitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitSize(long size) {
|
||||
splitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotTime(long time) {
|
||||
snapshotTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotCloneTime(long time) {
|
||||
snapshotCloneTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotRestoreTime(long time) {
|
||||
snapshotRestoreTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitTime(long time) {
|
||||
metaSplitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitSize(long size) {
|
||||
metaSplitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Method to export all the metrics.
|
||||
*
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
|
||||
|
||||
public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements MetricsSnapshotSource {
|
||||
|
||||
private MetricMutableHistogram snapshotTimeHisto;
|
||||
private MetricMutableHistogram snapshotCloneTimeHisto;
|
||||
private MetricMutableHistogram snapshotRestoreTimeHisto;
|
||||
|
||||
public MetricsSnapshotSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsSnapshotSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
snapshotTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC);
|
||||
snapshotCloneTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC);
|
||||
snapshotRestoreTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotTime(long time) {
|
||||
snapshotTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotCloneTime(long time) {
|
||||
snapshotCloneTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotRestoreTime(long time) {
|
||||
snapshotRestoreTimeHisto.add(time);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
|
||||
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
|
||||
|
||||
public class MetricsBalancerSourceImpl extends BaseSourceImpl implements MetricsBalancerSource{
|
||||
|
||||
private MetricMutableHistogram blanceClusterHisto;
|
||||
private MetricMutableCounterLong miscCount;
|
||||
|
||||
public MetricsBalancerSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsBalancerSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
blanceClusterHisto = metricsRegistry.newHistogram(BALANCE_CLUSTER);
|
||||
miscCount = metricsRegistry.newCounter(MISC_INVOATION_COUNT, "", 0L);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBalanceCluster(long time) {
|
||||
blanceClusterHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrMiscInvocations() {
|
||||
miscCount.incr();
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsMasterFilesystemSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsSnapshotSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.balancer.MetricsBalancerSourceImpl
|
|
@ -0,0 +1,73 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
|
||||
import org.apache.hadoop.metrics2.lib.MutableHistogram;
|
||||
|
||||
|
||||
public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource {
|
||||
|
||||
private MutableGaugeLong ritGauge;
|
||||
private MutableGaugeLong ritCountOverThresholdGauge;
|
||||
private MutableGaugeLong ritOldestAgeGauge;
|
||||
private MutableHistogram assignTimeHisto;
|
||||
private MutableHistogram bulkAssignTimeHisto;
|
||||
|
||||
public MetricsAssignmentManagerSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsAssignmentManagerSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
public void init() {
|
||||
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
|
||||
ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
|
||||
ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
|
||||
assignTimeHisto = metricsRegistry.newHistogram(ASSIGN_TIME_NAME);
|
||||
bulkAssignTimeHisto = metricsRegistry.newHistogram(BULK_ASSIGN_TIME_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateAssignmentTime(long time) {
|
||||
assignTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBulkAssignTime(long time) {
|
||||
bulkAssignTimeHisto.add(time);
|
||||
}
|
||||
|
||||
public void setRIT(int ritCount) {
|
||||
ritGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITCountOverThreshold(int ritCount) {
|
||||
ritCountOverThresholdGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITOldestAge(long ritCount) {
|
||||
ritOldestAgeGauge.set(ritCount);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MutableHistogram;
|
||||
|
||||
public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource {
|
||||
|
||||
private MutableHistogram splitSizeHisto;
|
||||
private MutableHistogram splitTimeHisto;
|
||||
private MutableHistogram metaSplitTimeHisto;
|
||||
private MutableHistogram metaSplitSizeHisto;
|
||||
|
||||
public MetricsMasterFilesystemSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsMasterFilesystemSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
splitSizeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
|
||||
splitTimeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
|
||||
metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
|
||||
metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitTime(long time) {
|
||||
splitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitSize(long size) {
|
||||
splitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitTime(long time) {
|
||||
metaSplitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitSize(long size) {
|
||||
metaSplitSizeHisto.add(size);
|
||||
}
|
||||
}
|
|
@ -37,16 +37,6 @@ public class MetricsMasterSourceImpl
|
|||
|
||||
private final MetricsMasterWrapper masterWrapper;
|
||||
private MutableCounterLong clusterRequestsCounter;
|
||||
private MutableGaugeLong ritGauge;
|
||||
private MutableGaugeLong ritCountOverThresholdGauge;
|
||||
private MutableGaugeLong ritOldestAgeGauge;
|
||||
private MutableHistogram splitTimeHisto;
|
||||
private MutableHistogram splitSizeHisto;
|
||||
private MutableStat snapshotTimeHisto;
|
||||
private MutableStat snapshotCloneTimeHisto;
|
||||
private MutableStat snapshotRestoreTimeHisto;
|
||||
private MutableHistogram metaSplitTimeHisto;
|
||||
private MutableHistogram metaSplitSizeHisto;
|
||||
|
||||
public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
|
||||
this(METRICS_NAME,
|
||||
|
@ -70,72 +60,12 @@ public class MetricsMasterSourceImpl
|
|||
public void init() {
|
||||
super.init();
|
||||
clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
|
||||
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
|
||||
ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, "", 0l);
|
||||
ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, "", 0l);
|
||||
splitSizeHisto = metricsRegistry.newHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC);
|
||||
splitTimeHisto = metricsRegistry.newHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC);
|
||||
snapshotTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC, "Ops", "Time", true);
|
||||
snapshotCloneTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC, "Ops", "Time", true);
|
||||
snapshotRestoreTimeHisto = metricsRegistry.newStat(
|
||||
SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC, "Ops", "Time", true);
|
||||
metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
|
||||
metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
|
||||
}
|
||||
|
||||
public void incRequests(final int inc) {
|
||||
this.clusterRequestsCounter.incr(inc);
|
||||
}
|
||||
|
||||
public void setRIT(int ritCount) {
|
||||
ritGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITCountOverThreshold(int ritCount) {
|
||||
ritCountOverThresholdGauge.set(ritCount);
|
||||
}
|
||||
|
||||
public void setRITOldestAge(long ritCount) {
|
||||
ritOldestAgeGauge.set(ritCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitTime(long time) {
|
||||
splitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSplitSize(long size) {
|
||||
splitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotTime(long time) {
|
||||
snapshotTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotCloneTime(long time) {
|
||||
snapshotCloneTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotRestoreTime(long time) {
|
||||
snapshotRestoreTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitTime(long time) {
|
||||
metaSplitTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMetaWALSplitSize(long size) {
|
||||
metaSplitSizeHisto.add(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
|
||||
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MutableHistogram;
|
||||
|
||||
public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements MetricsSnapshotSource {
|
||||
|
||||
private MutableHistogram snapshotTimeHisto;
|
||||
private MutableHistogram snapshotCloneTimeHisto;
|
||||
private MutableHistogram snapshotRestoreTimeHisto;
|
||||
|
||||
public MetricsSnapshotSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsSnapshotSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
snapshotTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC);
|
||||
snapshotCloneTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC);
|
||||
snapshotRestoreTimeHisto = metricsRegistry.newHistogram(
|
||||
SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotTime(long time) {
|
||||
snapshotTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotCloneTime(long time) {
|
||||
snapshotCloneTimeHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSnapshotRestoreTime(long time) {
|
||||
snapshotRestoreTimeHisto.add(time);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
|
||||
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
|
||||
import org.apache.hadoop.metrics2.lib.MutableHistogram;
|
||||
|
||||
public class MetricsBalancerSourceImpl extends BaseSourceImpl implements MetricsBalancerSource{
|
||||
|
||||
private MutableHistogram blanceClusterHisto;
|
||||
private MutableCounterLong miscCount;
|
||||
|
||||
public MetricsBalancerSourceImpl() {
|
||||
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
|
||||
}
|
||||
|
||||
public MetricsBalancerSourceImpl(String metricsName,
|
||||
String metricsDescription,
|
||||
String metricsContext, String metricsJmxContext) {
|
||||
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
blanceClusterHisto = metricsRegistry.newHistogram(BALANCE_CLUSTER);
|
||||
miscCount = metricsRegistry.newCounter(MISC_INVOATION_COUNT, "", 0L);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBalanceCluster(long time) {
|
||||
blanceClusterHisto.add(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrMiscInvocations() {
|
||||
miscCount.incr();
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsMasterFilesystemSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.MetricsSnapshotSourceImpl
|
|
@ -0,0 +1 @@
|
|||
org.apache.hadoop.hbase.master.balancer.MetricsBalancerSourceImpl
|
|
@ -123,6 +123,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
|
||||
private LoadBalancer balancer;
|
||||
|
||||
private final MetricsAssignmentManager metricsAssignmentManager;
|
||||
|
||||
private final TableLockManager tableLockManager;
|
||||
|
||||
final private KeyLocker<String> locker = new KeyLocker<String>();
|
||||
|
@ -182,9 +184,6 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
|
||||
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
|
||||
|
||||
// metrics instance to send metrics for RITs
|
||||
MetricsMaster metricsMaster;
|
||||
|
||||
private final RegionStates regionStates;
|
||||
|
||||
// The threshold to use bulk assigning. Using bulk assignment
|
||||
|
@ -273,7 +272,6 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
|
||||
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
|
||||
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
|
||||
this.metricsMaster = metricsMaster;// can be null only with tests.
|
||||
this.regionStates = new RegionStates(server, serverManager);
|
||||
|
||||
this.bulkAssignWaitTillAllAssigned =
|
||||
|
@ -286,6 +284,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
|
||||
TimeUnit.SECONDS, threadFactory);
|
||||
this.tableLockManager = tableLockManager;
|
||||
|
||||
this.metricsAssignmentManager = new MetricsAssignmentManager();
|
||||
}
|
||||
|
||||
void startTimeOutMonitor() {
|
||||
|
@ -1533,6 +1533,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
* @return true if successful
|
||||
*/
|
||||
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
|
||||
long startTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||
try {
|
||||
int regionCount = regions.size();
|
||||
if (regionCount == 0) {
|
||||
return true;
|
||||
|
@ -1697,6 +1699,9 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
}
|
||||
LOG.debug("Bulk assigning done for " + destination.toString());
|
||||
return true;
|
||||
} finally {
|
||||
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1821,6 +1826,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
*/
|
||||
private void assign(RegionState state,
|
||||
final boolean setOfflineInZK, final boolean forceNewPlan) {
|
||||
long startTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||
try {
|
||||
RegionState currentState = state;
|
||||
int versionOfOfflineNode = -1;
|
||||
RegionPlan plan = null;
|
||||
|
@ -2029,6 +2036,9 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
if (!tomActivated) {
|
||||
regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN);
|
||||
}
|
||||
} finally {
|
||||
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
|
||||
}
|
||||
}
|
||||
|
||||
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
|
||||
|
@ -2804,10 +2814,10 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
oldestRITTime = ritTime;
|
||||
}
|
||||
}
|
||||
if (this.metricsMaster != null) {
|
||||
this.metricsMaster.updateRITOldestAge(oldestRITTime);
|
||||
this.metricsMaster.updateRITCount(totalRITs);
|
||||
this.metricsMaster.updateRITCountOverThreshold(totalRITsOverThreshold);
|
||||
if (this.metricsAssignmentManager != null) {
|
||||
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
|
||||
this.metricsAssignmentManager.updateRITCount(totalRITs);
|
||||
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -764,7 +764,7 @@ MasterServices, Server {
|
|||
|
||||
this.masterActiveTime = System.currentTimeMillis();
|
||||
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
|
||||
this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery);
|
||||
this.fileSystemManager = new MasterFileSystem(this, this, masterRecovery);
|
||||
|
||||
this.tableDescriptors =
|
||||
new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
|
||||
|
|
|
@ -72,7 +72,7 @@ public class MasterFileSystem {
|
|||
// master status
|
||||
Server master;
|
||||
// metrics for master
|
||||
MetricsMaster metricsMaster;
|
||||
private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem();
|
||||
// Persisted unique cluster ID
|
||||
private ClusterId clusterId;
|
||||
// Keep around for convenience.
|
||||
|
@ -103,13 +103,11 @@ public class MasterFileSystem {
|
|||
}
|
||||
};
|
||||
|
||||
public MasterFileSystem(Server master, MasterServices services,
|
||||
MetricsMaster metricsMaster, boolean masterRecovery)
|
||||
public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
|
||||
throws IOException {
|
||||
this.conf = master.getConfiguration();
|
||||
this.master = master;
|
||||
this.services = services;
|
||||
this.metricsMaster = metricsMaster;
|
||||
// Set filesystem to be that of this.rootdir else we get complaints about
|
||||
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
|
||||
// default localfs. Presumption is that rootdir is fully-qualified before
|
||||
|
@ -410,11 +408,11 @@ public class MasterFileSystem {
|
|||
splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
|
||||
splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
|
||||
|
||||
if (this.metricsMaster != null) {
|
||||
if (this.metricsMasterFilesystem != null) {
|
||||
if (filter == META_FILTER) {
|
||||
this.metricsMaster.addMetaWALSplit(splitTime, splitLogSize);
|
||||
this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize);
|
||||
} else {
|
||||
this.metricsMaster.addSplit(splitTime, splitLogSize);
|
||||
this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
|
||||
public class MetricsAssignmentManager {
|
||||
|
||||
private final MetricsAssignmentManagerSource assignmentManagerSource;
|
||||
|
||||
public MetricsAssignmentManager() {
|
||||
assignmentManagerSource = CompatibilitySingletonFactory.getInstance(
|
||||
MetricsAssignmentManagerSource.class);
|
||||
}
|
||||
|
||||
public void updateAssignmentTime(long time) {
|
||||
assignmentManagerSource.updateAssignmentTime(time);
|
||||
}
|
||||
|
||||
public void updateBulkAssignTime(long time) {
|
||||
assignmentManagerSource.updateBulkAssignTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* set new value for number of regions in transition.
|
||||
* @param ritCount
|
||||
*/
|
||||
public void updateRITCount(int ritCount) {
|
||||
assignmentManagerSource.setRIT(ritCount);
|
||||
}
|
||||
|
||||
/**
|
||||
* update RIT count that are in this state for more than the threshold
|
||||
* as defined by the property rit.metrics.threshold.time.
|
||||
* @param ritCountOverThreshold
|
||||
*/
|
||||
public void updateRITCountOverThreshold(int ritCountOverThreshold) {
|
||||
assignmentManagerSource.setRITCountOverThreshold(ritCountOverThreshold);
|
||||
}
|
||||
/**
|
||||
* update the timestamp for oldest region in transition metrics.
|
||||
* @param timestamp
|
||||
*/
|
||||
public void updateRITOldestAge(long timestamp) {
|
||||
assignmentManagerSource.setRITOldestAge(timestamp);
|
||||
}
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -48,79 +49,10 @@ public class MetricsMaster {
|
|||
return masterSource;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a split
|
||||
* @param time time that the split took
|
||||
* @param size length of original HLogs that were split
|
||||
*/
|
||||
public synchronized void addSplit(long time, long size) {
|
||||
masterSource.updateSplitTime(time);
|
||||
masterSource.updateSplitSize(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a split
|
||||
* @param time time that the split took
|
||||
* @param size length of original HLogs that were split
|
||||
*/
|
||||
public synchronized void addMetaWALSplit(long time, long size) {
|
||||
masterSource.updateMetaWALSplitTime(time);
|
||||
masterSource.updateMetaWALSplitSize(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param inc How much to add to requests.
|
||||
*/
|
||||
public void incrementRequests(final int inc) {
|
||||
masterSource.incRequests(inc);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* set new value for number of regions in transition.
|
||||
* @param ritCount
|
||||
*/
|
||||
public void updateRITCount(int ritCount) {
|
||||
masterSource.setRIT(ritCount);
|
||||
}
|
||||
|
||||
/**
|
||||
* update RIT count that are in this state for more than the threshold
|
||||
* as defined by the property rit.metrics.threshold.time.
|
||||
* @param ritCountOverThreshold
|
||||
*/
|
||||
public void updateRITCountOverThreshold(int ritCountOverThreshold) {
|
||||
masterSource.setRITCountOverThreshold(ritCountOverThreshold);
|
||||
}
|
||||
/**
|
||||
* update the timestamp for oldest region in transition metrics.
|
||||
* @param timestamp
|
||||
*/
|
||||
public void updateRITOldestAge(long timestamp) {
|
||||
masterSource.setRITOldestAge(timestamp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot
|
||||
* @param time time that the snapshot took
|
||||
*/
|
||||
public void addSnapshot(long time) {
|
||||
masterSource.updateSnapshotTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot
|
||||
* @param time time that the snapshot restore took
|
||||
*/
|
||||
public void addSnapshotRestore(long time) {
|
||||
masterSource.updateSnapshotRestoreTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot cloned table
|
||||
* @param time time that the snapshot clone took
|
||||
*/
|
||||
public void addSnapshotClone(long time) {
|
||||
masterSource.updateSnapshotCloneTime(time);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
|
||||
public class MetricsMasterFileSystem {
|
||||
|
||||
private final MetricsMasterFileSystemSource source;
|
||||
|
||||
public MetricsMasterFileSystem() {
|
||||
source = CompatibilitySingletonFactory.getInstance(MetricsMasterFileSystemSource.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a split
|
||||
* @param time time that the split took
|
||||
* @param size length of original HLogs that were split
|
||||
*/
|
||||
public synchronized void addSplit(long time, long size) {
|
||||
source.updateSplitTime(time);
|
||||
source.updateSplitSize(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a split
|
||||
* @param time time that the split took
|
||||
* @param size length of original HLogs that were split
|
||||
*/
|
||||
public synchronized void addMetaWALSplit(long time, long size) {
|
||||
source.updateMetaWALSplitTime(time);
|
||||
source.updateMetaWALSplitSize(size);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
|
||||
public class MetricsSnapshot {
|
||||
|
||||
private final MetricsSnapshotSource source;
|
||||
|
||||
public MetricsSnapshot() {
|
||||
source = CompatibilitySingletonFactory.getInstance(MetricsSnapshotSource.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot
|
||||
* @param time time that the snapshot took
|
||||
*/
|
||||
public void addSnapshot(long time) {
|
||||
source.updateSnapshotTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot
|
||||
* @param time time that the snapshot restore took
|
||||
*/
|
||||
public void addSnapshotRestore(long time) {
|
||||
source.updateSnapshotRestoreTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a single instance of a snapshot cloned table
|
||||
* @param time time that the snapshot clone took
|
||||
*/
|
||||
public void addSnapshotClone(long time) {
|
||||
source.updateSnapshotCloneTime(time);
|
||||
}
|
||||
}
|
|
@ -342,6 +342,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
private Configuration config;
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class);
|
||||
|
||||
protected final MetricsBalancer metricsBalancer = new MetricsBalancer();
|
||||
protected MasterServices services;
|
||||
|
||||
@Override
|
||||
|
@ -409,6 +411,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
*/
|
||||
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
|
||||
List<ServerName> servers) {
|
||||
metricsBalancer.incrMiscInvocations();
|
||||
|
||||
if (regions.isEmpty() || servers.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
@ -452,6 +456,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
*/
|
||||
public Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions,
|
||||
List<ServerName> servers) {
|
||||
metricsBalancer.incrMiscInvocations();
|
||||
|
||||
Map<HRegionInfo, ServerName> assignments = new TreeMap<HRegionInfo, ServerName>();
|
||||
for (HRegionInfo region : regions) {
|
||||
assignments.put(region, randomAssignment(region, servers));
|
||||
|
@ -463,6 +469,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
* Used to assign a single region to a random server.
|
||||
*/
|
||||
public ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers) {
|
||||
metricsBalancer.incrMiscInvocations();
|
||||
|
||||
if (servers == null || servers.isEmpty()) {
|
||||
LOG.warn("Wanted to do random assignment but no servers to assign to");
|
||||
return null;
|
||||
|
@ -489,6 +497,9 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
*/
|
||||
public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions,
|
||||
List<ServerName> servers) {
|
||||
// Update metrics
|
||||
metricsBalancer.incrMiscInvocations();
|
||||
|
||||
// Group all of the old assignments by their hostname.
|
||||
// We can't group directly by ServerName since the servers all have
|
||||
// new start-codes.
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
|
||||
/**
|
||||
* Faced for exposing metrics about the balancer.
|
||||
*/
|
||||
public class MetricsBalancer {
|
||||
|
||||
private final MetricsBalancerSource source;
|
||||
|
||||
public MetricsBalancer() {
|
||||
source = CompatibilitySingletonFactory.getInstance(MetricsBalancerSource.class);
|
||||
}
|
||||
|
||||
public void balanceCluster(long time) {
|
||||
source.updateBalanceCluster(time);
|
||||
}
|
||||
|
||||
public void incrMiscInvocations() {
|
||||
source.incrMiscInvocations();
|
||||
}
|
||||
}
|
|
@ -246,6 +246,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
|
||||
long endTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||
|
||||
metricsBalancer.balanceCluster(endTime - startTime);
|
||||
|
||||
if (initCost > currentCost) {
|
||||
List<RegionPlan> plans = createRegionPlans(cluster);
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.TableExistsException;
|
|||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.MetricsMaster;
|
||||
import org.apache.hadoop.hbase.master.MetricsSnapshot;
|
||||
import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
|
@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
|||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
/**
|
||||
* Handler to Clone a snapshot.
|
||||
|
@ -65,17 +64,15 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
|||
private final SnapshotDescription snapshot;
|
||||
|
||||
private final ForeignExceptionDispatcher monitor;
|
||||
private final MetricsMaster metricsMaster;
|
||||
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
|
||||
private final MonitoredTask status;
|
||||
|
||||
private volatile boolean stopped = false;
|
||||
|
||||
public CloneSnapshotHandler(final MasterServices masterServices,
|
||||
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor,
|
||||
final MetricsMaster metricsMaster) {
|
||||
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) {
|
||||
super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor,
|
||||
masterServices.getConfiguration(), null, masterServices);
|
||||
this.metricsMaster = metricsMaster;
|
||||
|
||||
// Snapshot information
|
||||
this.snapshot = snapshot;
|
||||
|
@ -145,7 +142,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
|||
} else {
|
||||
status.markComplete("Snapshot '"+ snapshot.getName() +"' clone completed and table enabled!");
|
||||
}
|
||||
metricsMaster.addSnapshotClone(status.getCompletionTimestamp() - status.getStartTime());
|
||||
metricsSnapshot.addSnapshotClone(status.getCompletionTimestamp() - status.getStartTime());
|
||||
super.completed(exception);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,8 +64,8 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
|
|||
* @param masterServices master services provider
|
||||
*/
|
||||
public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
|
||||
final MasterServices masterServices, final MetricsMaster metricsMaster) {
|
||||
super(snapshot, masterServices, metricsMaster);
|
||||
final MasterServices masterServices) {
|
||||
super(snapshot, masterServices);
|
||||
|
||||
// setup the timer
|
||||
timeoutInjector = TakeSnapshotUtils.getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
|
||||
|
|
|
@ -50,8 +50,8 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
|
|||
private final ProcedureCoordinator coordinator;
|
||||
|
||||
public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
|
||||
final SnapshotManager manager, final MetricsMaster metricsMaster) {
|
||||
super(snapshot, master, metricsMaster);
|
||||
final SnapshotManager manager) {
|
||||
super(snapshot, master);
|
||||
this.coordinator = manager.getCoordinator();
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
|||
import org.apache.hadoop.hbase.executor.EventType;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.MetricsMaster;
|
||||
import org.apache.hadoop.hbase.master.MetricsSnapshot;
|
||||
import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.handler.TableEventHandler;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
|
@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
|||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
/**
|
||||
* Handler to Restore a snapshot.
|
||||
|
@ -64,16 +63,14 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
|||
private final SnapshotDescription snapshot;
|
||||
|
||||
private final ForeignExceptionDispatcher monitor;
|
||||
private final MetricsMaster metricsMaster;
|
||||
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
|
||||
private final MonitoredTask status;
|
||||
|
||||
private volatile boolean stopped = false;
|
||||
|
||||
public RestoreSnapshotHandler(final MasterServices masterServices,
|
||||
final SnapshotDescription snapshot, final HTableDescriptor htd,
|
||||
final MetricsMaster metricsMaster) throws IOException {
|
||||
final SnapshotDescription snapshot, final HTableDescriptor htd) throws IOException {
|
||||
super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices);
|
||||
this.metricsMaster = metricsMaster;
|
||||
|
||||
// Snapshot information
|
||||
this.snapshot = snapshot;
|
||||
|
@ -153,7 +150,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
|||
} else {
|
||||
status.markComplete("Restore snapshot '"+ snapshot.getName() +"'!");
|
||||
}
|
||||
metricsMaster.addSnapshotRestore(status.getCompletionTimestamp() - status.getStartTime());
|
||||
metricsSnapshot.addSnapshotRestore(status.getCompletionTimestamp() - status.getStartTime());
|
||||
super.completed(exception);
|
||||
}
|
||||
|
||||
|
|
|
@ -450,7 +450,7 @@ public class SnapshotManager implements Stoppable {
|
|||
|
||||
// Take the snapshot of the disabled table
|
||||
DisabledTableSnapshotHandler handler =
|
||||
new DisabledTableSnapshotHandler(snapshot, master, metricsMaster);
|
||||
new DisabledTableSnapshotHandler(snapshot, master);
|
||||
snapshotTable(snapshot, handler);
|
||||
}
|
||||
|
||||
|
@ -466,7 +466,7 @@ public class SnapshotManager implements Stoppable {
|
|||
|
||||
// Take the snapshot of the enabled table
|
||||
EnabledTableSnapshotHandler handler =
|
||||
new EnabledTableSnapshotHandler(snapshot, master, this, metricsMaster);
|
||||
new EnabledTableSnapshotHandler(snapshot, master, this);
|
||||
snapshotTable(snapshot, handler);
|
||||
}
|
||||
|
||||
|
@ -645,7 +645,7 @@ public class SnapshotManager implements Stoppable {
|
|||
|
||||
try {
|
||||
CloneSnapshotHandler handler =
|
||||
new CloneSnapshotHandler(master, snapshot, hTableDescriptor, metricsMaster).prepare();
|
||||
new CloneSnapshotHandler(master, snapshot, hTableDescriptor).prepare();
|
||||
this.executorService.submit(handler);
|
||||
this.restoreHandlers.put(tableName, handler);
|
||||
} catch (Exception e) {
|
||||
|
@ -738,7 +738,7 @@ public class SnapshotManager implements Stoppable {
|
|||
|
||||
try {
|
||||
RestoreSnapshotHandler handler =
|
||||
new RestoreSnapshotHandler(master, snapshot, hTableDescriptor, metricsMaster).prepare();
|
||||
new RestoreSnapshotHandler(master, snapshot, hTableDescriptor).prepare();
|
||||
this.executorService.submit(handler);
|
||||
restoreHandlers.put(tableName, handler);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -41,13 +41,12 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
|
|||
import org.apache.hadoop.hbase.executor.EventHandler;
|
||||
import org.apache.hadoop.hbase.executor.EventType;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.MetricsMaster;
|
||||
import org.apache.hadoop.hbase.master.MetricsSnapshot;
|
||||
import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
|
@ -72,7 +71,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
|
|||
|
||||
// none of these should ever be null
|
||||
protected final MasterServices master;
|
||||
protected final MetricsMaster metricsMaster;
|
||||
protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
|
||||
protected final SnapshotDescription snapshot;
|
||||
protected final Configuration conf;
|
||||
protected final FileSystem fs;
|
||||
|
@ -90,14 +89,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
|
|||
* @param snapshot descriptor of the snapshot to take
|
||||
* @param masterServices master services provider
|
||||
*/
|
||||
public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices,
|
||||
final MetricsMaster metricsMaster) {
|
||||
public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices) {
|
||||
super(masterServices, EventType.C_M_SNAPSHOT_TABLE);
|
||||
assert snapshot != null : "SnapshotDescription must not be nul1";
|
||||
assert masterServices != null : "MasterServices must not be nul1";
|
||||
|
||||
this.master = masterServices;
|
||||
this.metricsMaster = metricsMaster;
|
||||
this.snapshot = snapshot;
|
||||
this.snapshotTable = TableName.valueOf(snapshot.getTable());
|
||||
this.conf = this.master.getConfiguration();
|
||||
|
@ -187,7 +184,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
|
|||
completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
|
||||
status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshotTable
|
||||
+ " completed");
|
||||
metricsMaster.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
|
||||
metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
|
||||
} catch (Exception e) {
|
||||
status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " +
|
||||
snapshotTable + " because " + e.getMessage());
|
||||
|
|
|
@ -178,7 +178,7 @@ public class TestCatalogJanitor {
|
|||
private final AssignmentManager asm;
|
||||
|
||||
MockMasterServices(final Server server) throws IOException {
|
||||
this.mfs = new MasterFileSystem(server, this, null, false);
|
||||
this.mfs = new MasterFileSystem(server, this, false);
|
||||
this.asm = Mockito.mock(AssignmentManager.class);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue