HDFS-16772. refreshHostsReader should use the latest configuration (#4890)
This commit is contained in:
parent
f52b900a5f
commit
e68006cd70
@ -1327,8 +1327,8 @@ private void refreshHostsReader(Configuration conf) throws IOException {
|
||||
// Update the file names and refresh internal includes and excludes list.
|
||||
if (conf == null) {
|
||||
conf = new HdfsConfiguration();
|
||||
this.hostConfigManager.setConf(conf);
|
||||
}
|
||||
this.hostConfigManager.setConf(conf);
|
||||
this.hostConfigManager.refresh();
|
||||
}
|
||||
|
||||
|
@ -81,21 +81,21 @@ public void testDatanodeReportWithUpgradeDomain() throws Exception {
|
||||
datanode.setUpgradeDomain(ud1);
|
||||
hostsFileWriter.initIncludeHosts(
|
||||
new DatanodeAdminProperties[]{datanode});
|
||||
client.refreshNodes();
|
||||
cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals(all[0].getUpgradeDomain(), ud1);
|
||||
|
||||
datanode.setUpgradeDomain(null);
|
||||
hostsFileWriter.initIncludeHosts(
|
||||
new DatanodeAdminProperties[]{datanode});
|
||||
client.refreshNodes();
|
||||
cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
all = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals(all[0].getUpgradeDomain(), null);
|
||||
|
||||
datanode.setUpgradeDomain(ud2);
|
||||
hostsFileWriter.initIncludeHosts(
|
||||
new DatanodeAdminProperties[]{datanode});
|
||||
client.refreshNodes();
|
||||
cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
all = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals(all[0].getUpgradeDomain(), ud2);
|
||||
} finally {
|
||||
|
@ -2310,7 +2310,7 @@ private void testUpgradeDomain(boolean defineUpgradeDomain,
|
||||
dnProp.setPort(datanodeID.getXferPort());
|
||||
dnProp.setUpgradeDomain(upgradeDomain);
|
||||
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[]{dnProp});
|
||||
cluster.getFileSystem().refreshNodes();
|
||||
cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
}
|
||||
|
||||
// create files
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
@ -175,4 +176,38 @@ public void testHostsIncludeForDeadCount() throws Exception {
|
||||
hostsFileWriter.cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNewHostAndExcludeFile() throws Exception {
|
||||
Configuration conf = getConf();
|
||||
|
||||
HostsFileWriter writer1 = new HostsFileWriter();
|
||||
writer1.initialize(conf, "old_temp/decommission");
|
||||
writer1.initIncludeHosts(new String[]{"localhost:52", "127.0.0.1:7777"});
|
||||
|
||||
// Write all hosts to a new dfs.hosts file.
|
||||
HostsFileWriter writer2 = new HostsFileWriter();
|
||||
Configuration newConf = new Configuration(getConf());
|
||||
writer2.initialize(newConf, "new_temp/decommission");
|
||||
writer2.initIncludeHosts(new String[]{
|
||||
"localhost:52", "127.0.0.1:7777", "localhost:100"});
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
||||
assertEquals(2, ns.getNumDeadDataNodes());
|
||||
assertEquals(0, ns.getNumLiveDataNodes());
|
||||
|
||||
ns.getBlockManager().getDatanodeManager().refreshNodes(newConf);
|
||||
assertEquals(3, ns.getNumDeadDataNodes());
|
||||
assertEquals(0, ns.getNumLiveDataNodes());
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
writer1.cleanup();
|
||||
writer2.cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,11 +69,11 @@ public class TestUpgradeDomainBlockPlacementPolicy {
|
||||
static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
|
||||
private MiniDFSCluster cluster = null;
|
||||
private HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||
private Configuration conf = new HdfsConfiguration();
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
StaticMapping.resetMap();
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
|
||||
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
|
||||
@ -130,7 +130,7 @@ private void refreshDatanodeAdminProperties()
|
||||
datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||
datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||
hostsFileWriter.initIncludeHosts(datanodes);
|
||||
cluster.getFileSystem().refreshNodes();
|
||||
cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
|
||||
expectedDatanodeIDs.clear();
|
||||
expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
|
||||
@ -169,7 +169,7 @@ private void refreshDatanodeAdminProperties2()
|
||||
datanodes[2].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||
datanodes[3].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||
hostsFileWriter.initIncludeHosts(datanodes);
|
||||
cluster.getFileSystem().refreshNodes();
|
||||
cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
|
||||
expectedDatanodeIDs.clear();
|
||||
expectedDatanodeIDs.add(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
|
Loading…
x
Reference in New Issue
Block a user