HDFS-11339. Support File IO sampling for Datanode IO profiling hooks. Contributed by Hanisha Koneru.

This commit is contained in:
Arpit Agarwal 2017-01-16 14:43:29 -08:00
parent 52fdd387ec
commit be8e001f1f
3 changed files with 33 additions and 6 deletions

View File

@ -642,10 +642,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY = "dfs.image.transfer.chunksize"; public static final String DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY = "dfs.image.transfer.chunksize";
public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024; public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024;
//Keys with no defaults // Datanode File IO Stats
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory";
public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY = "dfs.datanode.fsdataset.volume.choosing.policy";
public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY = public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
"dfs.datanode.enable.fileio.profiling"; "dfs.datanode.enable.fileio.profiling";
public static final boolean DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT = public static final boolean DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT =
@ -654,6 +651,17 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
"dfs.datanode.enable.fileio.fault.injection"; "dfs.datanode.enable.fileio.fault.injection";
public static final boolean public static final boolean
DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false; DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false;
public static final String
DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
"dfs.datanode.fileio.profiling.sampling.fraction";
public static final double
DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT = 1.0;
//Keys with no defaults
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory";
public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY = "dfs.datanode.fsdataset.volume.choosing.policy";
public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold"; public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold";
public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB
public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction"; public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction";

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -26,6 +28,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import java.util.concurrent.ThreadLocalRandom;
/** /**
* Profiles the performance of the metadata and data related operations on * Profiles the performance of the metadata and data related operations on
@ -33,16 +36,30 @@ import javax.annotation.Nullable;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
class ProfilingFileIoEvents { class ProfilingFileIoEvents {
static final Log LOG = LogFactory.getLog(ProfilingFileIoEvents.class);
private final boolean isEnabled; private final boolean isEnabled;
private final int sampleRangeMax;
public ProfilingFileIoEvents(@Nullable Configuration conf) { public ProfilingFileIoEvents(@Nullable Configuration conf) {
if (conf != null) { if (conf != null) {
isEnabled = conf.getBoolean(DFSConfigKeys isEnabled = conf.getBoolean(DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY, DFSConfigKeys .DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY, DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT); .DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT);
double fileIOSamplingFraction = conf.getDouble(DFSConfigKeys
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
DFSConfigKeys
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT);
if (fileIOSamplingFraction > 1) {
LOG.warn(DFSConfigKeys
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY +
" value cannot be more than 1. Setting value to 1");
fileIOSamplingFraction = 1;
}
sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE);
} else { } else {
isEnabled = false; isEnabled = false;
sampleRangeMax = 0;
} }
} }
@ -69,7 +86,7 @@ class ProfilingFileIoEvents {
public long beforeFileIo(@Nullable FsVolumeSpi volume, public long beforeFileIo(@Nullable FsVolumeSpi volume,
FileIoProvider.OPERATION op, long len) { FileIoProvider.OPERATION op, long len) {
if (isEnabled) { if (isEnabled && ThreadLocalRandom.current().nextInt() < sampleRangeMax) {
DataNodeVolumeMetrics metrics = getVolumeMetrics(volume); DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
if (metrics != null) { if (metrics != null) {
return Time.monotonicNow(); return Time.monotonicNow();
@ -80,7 +97,7 @@ class ProfilingFileIoEvents {
public void afterFileIo(@Nullable FsVolumeSpi volume, public void afterFileIo(@Nullable FsVolumeSpi volume,
FileIoProvider.OPERATION op, long begin, long len) { FileIoProvider.OPERATION op, long begin, long len) {
if (isEnabled) { if (isEnabled && begin != 0) {
DataNodeVolumeMetrics metrics = getVolumeMetrics(volume); DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
if (metrics != null) { if (metrics != null) {
long latency = Time.monotonicNow() - begin; long latency = Time.monotonicNow() - begin;

View File

@ -107,6 +107,8 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
.add(DFSConfigKeys.DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY); .add(DFSConfigKeys.DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY);
configurationPropsToSkipCompare.add(DFSConfigKeys configurationPropsToSkipCompare.add(DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY); .DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY);
configurationPropsToSkipCompare.add(DFSConfigKeys
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY);
// Allocate // Allocate
xmlPropsToSkipCompare = new HashSet<String>(); xmlPropsToSkipCompare = new HashSet<String>();