HDFS-16519. Add throttler to EC reconstruction (#4101)
Reviewed-by: litao <tomleescut@gmail.com>
Signed-off-by: Takanobu Asanuma <tasanuma@apache.org>
(cherry picked from commit aebd55f788
)
This commit is contained in:
parent
9132eeb4dd
commit
b62a460fd9
|
@ -123,6 +123,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
"dfs.datanode.data.write.bandwidthPerSec";
|
||||
// A value of zero indicates no limit
|
||||
public static final long DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_DEFAULT = 0;
|
||||
public static final String DFS_DATANODE_EC_RECONSTRUCT_READ_BANDWIDTHPERSEC_KEY =
|
||||
"dfs.datanode.ec.reconstruct.read.bandwidthPerSec";
|
||||
public static final long DFS_DATANODE_EC_RECONSTRUCT_READ_BANDWIDTHPERSEC_DEFAULT =
|
||||
0; // A value of zero indicates no limit
|
||||
public static final String DFS_DATANODE_EC_RECONSTRUCT_WRITE_BANDWIDTHPERSEC_KEY =
|
||||
"dfs.datanode.ec.reconstruct.write.bandwidthPerSec";
|
||||
public static final long DFS_DATANODE_EC_RECONSTRUCT_WRITE_BANDWIDTHPERSEC_DEFAULT =
|
||||
0; // A value of zero indicates no limit
|
||||
@Deprecated
|
||||
public static final String DFS_DATANODE_READAHEAD_BYTES_KEY =
|
||||
HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY;
|
||||
|
|
|
@ -472,6 +472,9 @@ public class DataNode extends ReconfigurableBase
|
|||
|
||||
private long startTime = 0;
|
||||
|
||||
private DataTransferThrottler ecReconstuctReadThrottler;
|
||||
private DataTransferThrottler ecReconstuctWriteThrottler;
|
||||
|
||||
/**
|
||||
* Creates a dummy DataNode for testing purpose.
|
||||
*/
|
||||
|
@ -580,6 +583,16 @@ public class DataNode extends ReconfigurableBase
|
|||
|
||||
initOOBTimeout();
|
||||
this.storageLocationChecker = storageLocationChecker;
|
||||
long ecReconstuctReadBandwidth = conf.getLongBytes(
|
||||
DFSConfigKeys.DFS_DATANODE_EC_RECONSTRUCT_READ_BANDWIDTHPERSEC_KEY,
|
||||
DFSConfigKeys.DFS_DATANODE_EC_RECONSTRUCT_READ_BANDWIDTHPERSEC_DEFAULT);
|
||||
long ecReconstuctWriteBandwidth = conf.getLongBytes(
|
||||
DFSConfigKeys.DFS_DATANODE_EC_RECONSTRUCT_WRITE_BANDWIDTHPERSEC_KEY,
|
||||
DFSConfigKeys.DFS_DATANODE_EC_RECONSTRUCT_WRITE_BANDWIDTHPERSEC_DEFAULT);
|
||||
this.ecReconstuctReadThrottler = ecReconstuctReadBandwidth > 0 ?
|
||||
new DataTransferThrottler(100, ecReconstuctReadBandwidth) : null;
|
||||
this.ecReconstuctWriteThrottler = ecReconstuctWriteBandwidth > 0 ?
|
||||
new DataTransferThrottler(100, ecReconstuctWriteBandwidth) : null;
|
||||
}
|
||||
|
||||
@Override // ReconfigurableBase
|
||||
|
@ -3717,6 +3730,14 @@ public class DataNode extends ReconfigurableBase
|
|||
return shortCircuitRegistry;
|
||||
}
|
||||
|
||||
public DataTransferThrottler getEcReconstuctReadThrottler() {
|
||||
return ecReconstuctReadThrottler;
|
||||
}
|
||||
|
||||
public DataTransferThrottler getEcReconstuctWriteThrottler() {
|
||||
return ecReconstuctWriteThrottler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the disk error synchronously.
|
||||
*/
|
||||
|
|
|
@ -95,6 +95,10 @@ class StripedBlockReconstructor extends StripedReconstructor
|
|||
(int) Math.min(getStripedReader().getBufferSize(), remaining);
|
||||
|
||||
long start = Time.monotonicNow();
|
||||
long bytesToRead = (long) toReconstructLen * getStripedReader().getMinRequiredSources();
|
||||
if (getDatanode().getEcReconstuctReadThrottler() != null) {
|
||||
getDatanode().getEcReconstuctReadThrottler().throttle(bytesToRead);
|
||||
}
|
||||
// step1: read from minimum source DNs required for reconstruction.
|
||||
// The returned success list is the source DNs we do real read from
|
||||
getStripedReader().readMinimumSources(toReconstructLen);
|
||||
|
@ -105,6 +109,10 @@ class StripedBlockReconstructor extends StripedReconstructor
|
|||
long decodeEnd = Time.monotonicNow();
|
||||
|
||||
// step3: transfer data
|
||||
long bytesToWrite = (long) toReconstructLen * stripedWriter.getTargets();
|
||||
if (getDatanode().getEcReconstuctWriteThrottler() != null) {
|
||||
getDatanode().getEcReconstuctWriteThrottler().throttle(bytesToWrite);
|
||||
}
|
||||
if (stripedWriter.transferData2Targets() == 0) {
|
||||
String error = "Transfer failed for all targets.";
|
||||
throw new IOException(error);
|
||||
|
|
|
@ -508,4 +508,9 @@ class StripedReader {
|
|||
int getXmits() {
|
||||
return xmits;
|
||||
}
|
||||
|
||||
public int getMinRequiredSources() {
|
||||
return minRequiredSources;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -4635,6 +4635,24 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.ec.reconstruct.read.bandwidthPerSec</name>
|
||||
<value>0</value>
|
||||
<description>
|
||||
Specifies the maximum amount of bandwidth that the EC reconstruction can utilize for reading.
|
||||
When the bandwidth value is zero, there is no limit.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.ec.reconstruct.write.bandwidthPerSec</name>
|
||||
<value>0</value>
|
||||
<description>
|
||||
Specifies the maximum amount of bandwidth that the EC reconstruction can utilize for writing.
|
||||
When the bandwidth value is zero, there is no limit.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.fsdataset.factory</name>
|
||||
<value></value>
|
||||
|
|
Loading…
Reference in New Issue