HDFS-16811. Support DecommissionBackoffMonitor Parameters reconfigurablereconfigurable (#5068)
Signed-off-by: Tao Li <tomscut@apache.org>
This commit is contained in:
parent
b90dfdff3f
commit
e9319e696c
|
@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -70,10 +71,10 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
|
||||||
outOfServiceNodeBlocks = new HashMap<>();
|
outOfServiceNodeBlocks = new HashMap<>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The numbe of blocks to process when moving blocks to pendingReplication
|
* The number of blocks to process when moving blocks to pendingReplication
|
||||||
* before releasing and reclaiming the namenode lock.
|
* before releasing and reclaiming the namenode lock.
|
||||||
*/
|
*/
|
||||||
private int blocksPerLock;
|
private volatile int blocksPerLock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The number of blocks that have been checked on this tick.
|
* The number of blocks that have been checked on this tick.
|
||||||
|
@ -82,7 +83,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
|
||||||
/**
|
/**
|
||||||
* The maximum number of blocks to hold in PendingRep at any time.
|
* The maximum number of blocks to hold in PendingRep at any time.
|
||||||
*/
|
*/
|
||||||
private int pendingRepLimit;
|
private volatile int pendingRepLimit;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The list of blocks which have been placed onto the replication queue
|
* The list of blocks which have been placed onto the replication queue
|
||||||
|
@ -801,6 +802,26 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
|
public int getPendingRepLimit() {
|
||||||
|
return pendingRepLimit;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPendingRepLimit(int pendingRepLimit) {
|
||||||
|
this.pendingRepLimit = pendingRepLimit;
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
|
public int getBlocksPerLock() {
|
||||||
|
return blocksPerLock;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setBlocksPerLock(int blocksPerLock) {
|
||||||
|
this.blocksPerLock = blocksPerLock;
|
||||||
|
}
|
||||||
|
|
||||||
static class BlockStats {
|
static class BlockStats {
|
||||||
private LightWeightHashSet<Long> openFiles =
|
private LightWeightHashSet<Long> openFiles =
|
||||||
new LightWeightLinkedSet<>();
|
new LightWeightLinkedSet<>();
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.util.CyclicIteration;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
import org.apache.hadoop.util.ChunkedArrayList;
|
import org.apache.hadoop.util.ChunkedArrayList;
|
||||||
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -137,6 +138,28 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
|
||||||
return numNodesChecked;
|
return numNodesChecked;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
|
public int getPendingRepLimit() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setPendingRepLimit(int pendingRepLimit) {
|
||||||
|
// nothing.
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
|
public int getBlocksPerLock() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setBlocksPerLock(int blocksPerLock) {
|
||||||
|
// nothing.
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.debug("DatanodeAdminMonitor is running.");
|
LOG.debug("DatanodeAdminMonitor is running.");
|
||||||
|
|
|
@ -419,4 +419,30 @@ public class DatanodeAdminManager {
|
||||||
executor.submit(monitor).get();
|
executor.submit(monitor).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void refreshPendingRepLimit(int pendingRepLimit, String key) {
|
||||||
|
ensurePositiveInt(pendingRepLimit, key);
|
||||||
|
this.monitor.setPendingRepLimit(pendingRepLimit);
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public int getPendingRepLimit() {
|
||||||
|
return this.monitor.getPendingRepLimit();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void refreshBlocksPerLock(int blocksPerLock, String key) {
|
||||||
|
ensurePositiveInt(blocksPerLock, key);
|
||||||
|
this.monitor.setBlocksPerLock(blocksPerLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public int getBlocksPerLock() {
|
||||||
|
return this.monitor.getBlocksPerLock();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void ensurePositiveInt(int val, String key) {
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
(val > 0),
|
||||||
|
key + " = '" + val + "' is invalid. " +
|
||||||
|
"It should be a positive, non-zero integer value.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,4 +37,12 @@ public interface DatanodeAdminMonitorInterface extends Runnable {
|
||||||
void setBlockManager(BlockManager bm);
|
void setBlockManager(BlockManager bm);
|
||||||
void setDatanodeAdminManager(DatanodeAdminManager dnm);
|
void setDatanodeAdminManager(DatanodeAdminManager dnm);
|
||||||
void setNameSystem(Namesystem ns);
|
void setNameSystem(Namesystem ns);
|
||||||
|
|
||||||
|
int getPendingRepLimit();
|
||||||
|
|
||||||
|
void setPendingRepLimit(int pendingRepLimit);
|
||||||
|
|
||||||
|
int getBlocksPerLock();
|
||||||
|
|
||||||
|
void setBlocksPerLock(int blocksPerLock);
|
||||||
}
|
}
|
|
@ -205,6 +205,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COL
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT;
|
||||||
|
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
||||||
|
@ -353,7 +357,9 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
DFS_BLOCK_INVALIDATE_LIMIT_KEY,
|
DFS_BLOCK_INVALIDATE_LIMIT_KEY,
|
||||||
DFS_DATANODE_PEER_STATS_ENABLED_KEY,
|
DFS_DATANODE_PEER_STATS_ENABLED_KEY,
|
||||||
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
|
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
|
||||||
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY));
|
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK));
|
||||||
|
|
||||||
private static final String USAGE = "Usage: hdfs namenode ["
|
private static final String USAGE = "Usage: hdfs namenode ["
|
||||||
+ StartupOption.BACKUP.getName() + "] | \n\t["
|
+ StartupOption.BACKUP.getName() + "] | \n\t["
|
||||||
|
@ -2321,6 +2327,10 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
|
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
|
||||||
} else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
|
} else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
|
||||||
return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
|
return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
|
||||||
|
} else if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT) ||
|
||||||
|
(property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK))) {
|
||||||
|
return reconfigureDecommissionBackoffMonitorParameters(datanodeManager, property,
|
||||||
|
newVal);
|
||||||
} else {
|
} else {
|
||||||
throw new ReconfigurationException(property, newVal, getConf().get(
|
throw new ReconfigurationException(property, newVal, getConf().get(
|
||||||
property));
|
property));
|
||||||
|
@ -2601,6 +2611,34 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String reconfigureDecommissionBackoffMonitorParameters(
|
||||||
|
final DatanodeManager datanodeManager, final String property, final String newVal)
|
||||||
|
throws ReconfigurationException {
|
||||||
|
String newSetting = null;
|
||||||
|
try {
|
||||||
|
if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT)) {
|
||||||
|
int pendingRepLimit = (newVal == null ?
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT :
|
||||||
|
Integer.parseInt(newVal));
|
||||||
|
datanodeManager.getDatanodeAdminManager().refreshPendingRepLimit(pendingRepLimit,
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT);
|
||||||
|
newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getPendingRepLimit());
|
||||||
|
} else if (property.equals(
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK)) {
|
||||||
|
int blocksPerLock = (newVal == null ?
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT :
|
||||||
|
Integer.parseInt(newVal));
|
||||||
|
datanodeManager.getDatanodeAdminManager().refreshBlocksPerLock(blocksPerLock,
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK);
|
||||||
|
newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getBlocksPerLock());
|
||||||
|
}
|
||||||
|
LOG.info("RECONFIGURE* changed reconfigureDecommissionBackoffMonitorParameters {} to {}",
|
||||||
|
property, newSetting);
|
||||||
|
return newSetting;
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new ReconfigurationException(property, newVal, getConf().get(property), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override // ReconfigurableBase
|
@Override // ReconfigurableBase
|
||||||
protected Configuration getNewConf() {
|
protected Configuration getNewConf() {
|
||||||
|
|
|
@ -22,6 +22,8 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminBackoffMonitor;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminMonitorInterface;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -62,6 +64,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KE
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
|
||||||
|
|
||||||
public class TestNameNodeReconfigure {
|
public class TestNameNodeReconfigure {
|
||||||
|
@ -567,6 +571,87 @@ public class TestNameNodeReconfigure {
|
||||||
return containReport;
|
return containReport;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReconfigureDecommissionBackoffMonitorParameters()
|
||||||
|
throws ReconfigurationException, IOException {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setClass(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MONITOR_CLASS,
|
||||||
|
DatanodeAdminBackoffMonitor.class, DatanodeAdminMonitorInterface.class);
|
||||||
|
int defaultPendingRepLimit = 1000;
|
||||||
|
conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, defaultPendingRepLimit);
|
||||||
|
int defaultBlocksPerLock = 1000;
|
||||||
|
conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
|
||||||
|
defaultBlocksPerLock);
|
||||||
|
|
||||||
|
try (MiniDFSCluster newCluster = new MiniDFSCluster.Builder(conf).build()) {
|
||||||
|
newCluster.waitActive();
|
||||||
|
final NameNode nameNode = newCluster.getNameNode();
|
||||||
|
final DatanodeManager datanodeManager = nameNode.namesystem
|
||||||
|
.getBlockManager().getDatanodeManager();
|
||||||
|
|
||||||
|
// verify defaultPendingRepLimit.
|
||||||
|
assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(),
|
||||||
|
defaultPendingRepLimit);
|
||||||
|
|
||||||
|
// try invalid pendingRepLimit.
|
||||||
|
try {
|
||||||
|
nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
|
||||||
|
"non-numeric");
|
||||||
|
fail("Should not reach here");
|
||||||
|
} catch (ReconfigurationException e) {
|
||||||
|
assertEquals("Could not change property " +
|
||||||
|
"dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
|
||||||
|
defaultPendingRepLimit + "' to 'non-numeric'", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
|
||||||
|
"-1");
|
||||||
|
fail("Should not reach here");
|
||||||
|
} catch (ReconfigurationException e) {
|
||||||
|
assertEquals("Could not change property " +
|
||||||
|
"dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
|
||||||
|
defaultPendingRepLimit + "' to '-1'", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
// try correct pendingRepLimit.
|
||||||
|
nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
|
||||||
|
"20000");
|
||||||
|
assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(), 20000);
|
||||||
|
|
||||||
|
// verify defaultBlocksPerLock.
|
||||||
|
assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(),
|
||||||
|
defaultBlocksPerLock);
|
||||||
|
|
||||||
|
// try invalid blocksPerLock.
|
||||||
|
try {
|
||||||
|
nameNode.reconfigureProperty(
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
|
||||||
|
"non-numeric");
|
||||||
|
fail("Should not reach here");
|
||||||
|
} catch (ReconfigurationException e) {
|
||||||
|
assertEquals("Could not change property " +
|
||||||
|
"dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
|
||||||
|
defaultBlocksPerLock + "' to 'non-numeric'", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
nameNode.reconfigureProperty(
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "-1");
|
||||||
|
fail("Should not reach here");
|
||||||
|
} catch (ReconfigurationException e) {
|
||||||
|
assertEquals("Could not change property " +
|
||||||
|
"dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
|
||||||
|
defaultBlocksPerLock + "' to '-1'", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
// try correct blocksPerLock.
|
||||||
|
nameNode.reconfigureProperty(
|
||||||
|
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "10000");
|
||||||
|
assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(), 10000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void shutDown() throws IOException {
|
public void shutDown() throws IOException {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
|
|
@ -43,6 +43,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSN
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.text.TextStringBuilder;
|
import org.apache.commons.text.TextStringBuilder;
|
||||||
|
@ -438,7 +440,7 @@ public class TestDFSAdmin {
|
||||||
final List<String> outs = Lists.newArrayList();
|
final List<String> outs = Lists.newArrayList();
|
||||||
final List<String> errs = Lists.newArrayList();
|
final List<String> errs = Lists.newArrayList();
|
||||||
getReconfigurableProperties("namenode", address, outs, errs);
|
getReconfigurableProperties("namenode", address, outs, errs);
|
||||||
assertEquals(20, outs.size());
|
assertEquals(22, outs.size());
|
||||||
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
|
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
|
||||||
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
|
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
|
||||||
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
|
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
|
||||||
|
@ -449,8 +451,10 @@ public class TestDFSAdmin {
|
||||||
assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(7));
|
assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(7));
|
||||||
assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(8));
|
assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(8));
|
||||||
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(9));
|
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(9));
|
||||||
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(10));
|
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, outs.get(10));
|
||||||
assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(11));
|
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(11));
|
||||||
|
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(12));
|
||||||
|
assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(13));
|
||||||
assertEquals(errs.size(), 0);
|
assertEquals(errs.size(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue