HDFS-12473. Change hosts JSON file format.

This commit is contained in:
Ming Ma 2017-09-20 09:03:59 -07:00
parent 7e58b2478c
commit 230b85d586
6 changed files with 102 additions and 58 deletions

View File

@ -19,59 +19,86 @@
package org.apache.hadoop.hdfs.util; package org.apache.hadoop.hdfs.util;
import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectReader;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.Set; import java.util.List;
import java.util.HashSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Reader support for JSON based datanode configuration, an alternative * Reader support for JSON-based datanode configuration, an alternative format
* to the exclude/include files configuration. * to the exclude/include files configuration.
* The JSON file format is the array of elements where each element * The JSON file format defines the array of elements where each element
* in the array describes the properties of a datanode. The properties of * in the array describes the properties of a datanode. The properties of
* a datanode is defined in {@link DatanodeAdminProperties}. For example, * a datanode is defined by {@link DatanodeAdminProperties}. For example,
* *
* {"hostName": "host1"} * [
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"} * {"hostName": "host1"},
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
* ]
*/ */
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable @InterfaceStability.Unstable
public final class CombinedHostsFileReader { public final class CombinedHostsFileReader {
private static final ObjectReader READER =
new ObjectMapper().readerFor(DatanodeAdminProperties.class); public static final Logger LOG =
private static final JsonFactory JSON_FACTORY = new JsonFactory(); LoggerFactory.getLogger(CombinedHostsFileReader.class);
private CombinedHostsFileReader() { private CombinedHostsFileReader() {
} }
/** /**
* Deserialize a set of DatanodeAdminProperties from a json file. * Deserialize a set of DatanodeAdminProperties from a json file.
* @param hostsFile the input json file to read from. * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties * @return the set of DatanodeAdminProperties
* @throws IOException * @throws IOException
*/ */
public static Set<DatanodeAdminProperties> public static DatanodeAdminProperties[]
readFile(final String hostsFile) throws IOException { readFile(final String hostsFile) throws IOException {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>(); DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
ObjectMapper objectMapper = new ObjectMapper();
boolean tryOldFormat = false;
try (Reader input =
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class);
} catch (JsonMappingException jme) {
// The old format doesn't have json top-level token to enclose the array.
// For backward compatibility, try parsing the old format.
tryOldFormat = true;
LOG.warn("{} has invalid JSON format." +
"Try the old format without top-level token defined.", hostsFile);
}
if (tryOldFormat) {
ObjectReader objectReader =
objectMapper.readerFor(DatanodeAdminProperties.class);
JsonFactory jsonFactory = new JsonFactory();
List<DatanodeAdminProperties> all = new ArrayList<>();
try (Reader input = try (Reader input =
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) { new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
Iterator<DatanodeAdminProperties> iterator = Iterator<DatanodeAdminProperties> iterator =
READER.readValues(JSON_FACTORY.createParser(input)); objectReader.readValues(jsonFactory.createParser(input));
while (iterator.hasNext()) { while (iterator.hasNext()) {
DatanodeAdminProperties properties = iterator.next(); DatanodeAdminProperties properties = iterator.next();
allDNs.add(properties); all.add(properties);
} }
} }
allDNs = all.toArray(new DatanodeAdminProperties[all.size()]);
}
return allDNs; return allDNs;
} }
} }

View File

@ -32,20 +32,21 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
/** /**
* Writer support for JSON based datanode configuration, an alternative * Writer support for JSON-based datanode configuration, an alternative format
* to the exclude/include files configuration. * to the exclude/include files configuration.
* The JSON file format is the array of elements where each element * The JSON file format defines the array of elements where each element
* in the array describes the properties of a datanode. The properties of * in the array describes the properties of a datanode. The properties of
* a datanode is defined in {@link DatanodeAdminProperties}. For example, * a datanode is defined by {@link DatanodeAdminProperties}. For example,
* *
* {"hostName": "host1"} * [
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"} * {"hostName": "host1"},
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
* ]
*/ */
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable @InterfaceStability.Unstable
public final class CombinedHostsFileWriter { public final class CombinedHostsFileWriter {
private static final ObjectMapper MAPPER = new ObjectMapper();
private CombinedHostsFileWriter() { private CombinedHostsFileWriter() {
} }
@ -57,13 +58,11 @@ public final class CombinedHostsFileWriter {
*/ */
public static void writeFile(final String hostsFile, public static void writeFile(final String hostsFile,
final Set<DatanodeAdminProperties> allDNs) throws IOException { final Set<DatanodeAdminProperties> allDNs) throws IOException {
StringBuilder configs = new StringBuilder(); final ObjectMapper objectMapper = new ObjectMapper();
try (Writer output = try (Writer output =
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) { new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
for (DatanodeAdminProperties datanodeAdminProperties: allDNs) { objectMapper.writeValue(output, allDNs);
configs.append(MAPPER.writeValueAsString(datanodeAdminProperties));
}
output.write(configs.toString());
} }
} }
} }

View File

@ -39,7 +39,6 @@ import java.net.InetSocketAddress;
import java.util.Collection; import java.util.Collection;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.Set;
import com.google.common.base.Predicate; import com.google.common.base.Predicate;
@ -212,7 +211,7 @@ public class CombinedHostFileManager extends HostConfigManager {
} }
private void refresh(final String hostsFile) throws IOException { private void refresh(final String hostsFile) throws IOException {
HostProperties hostProps = new HostProperties(); HostProperties hostProps = new HostProperties();
Set<DatanodeAdminProperties> all = DatanodeAdminProperties[] all =
CombinedHostsFileReader.readFile(hostsFile); CombinedHostsFileReader.readFile(hostsFile);
for(DatanodeAdminProperties properties : all) { for(DatanodeAdminProperties properties : all) {
InetSocketAddress addr = parseEntry(hostsFile, InetSocketAddress addr = parseEntry(hostsFile,

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.util;
import java.io.File; import java.io.File;
import java.io.FileWriter; import java.io.FileWriter;
import java.util.Set;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before; import org.junit.Before;
@ -30,19 +28,21 @@ import org.junit.Test;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
/* /**
* Test for JSON based HostsFileReader * Test for JSON based HostsFileReader.
*/ */
public class TestCombinedHostsFileReader { public class TestCombinedHostsFileReader {
// Using /test/build/data/tmp directory to store temporary files // Using /test/build/data/tmp directory to store temporary files
static final String HOSTS_TEST_DIR = GenericTestUtils.getTestDir() static final String HOSTSTESTDIR = GenericTestUtils.getTestDir()
.getAbsolutePath(); .getAbsolutePath();
File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json"); private final File newFile = new File(HOSTSTESTDIR, "dfs.hosts.new.json");
static final String TEST_CACHE_DATA_DIR = static final String TESTCACHEDATADIR =
System.getProperty("test.cache.data", "build/test/cache"); System.getProperty("test.cache.data", "build/test/cache");
File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json"); private final File jsonFile = new File(TESTCACHEDATADIR, "dfs.hosts.json");
private final File legacyFile =
new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json");
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
@ -51,18 +51,28 @@ public class TestCombinedHostsFileReader {
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
// Delete test file after running tests // Delete test file after running tests
NEW_FILE.delete(); newFile.delete();
} }
/* /*
* Load the existing test json file * Load the legacy test json file
*/
@Test
public void testLoadLegacyJsonFile() throws Exception {
DatanodeAdminProperties[] all =
CombinedHostsFileReader.readFile(legacyFile.getAbsolutePath());
assertEquals(7, all.length);
}
/*
* Load the test json file
*/ */
@Test @Test
public void testLoadExistingJsonFile() throws Exception { public void testLoadExistingJsonFile() throws Exception {
Set<DatanodeAdminProperties> all = DatanodeAdminProperties[] all =
CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath()); CombinedHostsFileReader.readFile(jsonFile.getAbsolutePath());
assertEquals(7, all.size()); assertEquals(7, all.length);
} }
/* /*
@ -70,11 +80,11 @@ public class TestCombinedHostsFileReader {
*/ */
@Test @Test
public void testEmptyCombinedHostsFileReader() throws Exception { public void testEmptyCombinedHostsFileReader() throws Exception {
FileWriter hosts = new FileWriter(NEW_FILE); FileWriter hosts = new FileWriter(newFile);
hosts.write(""); hosts.write("");
hosts.close(); hosts.close();
Set<DatanodeAdminProperties> all = DatanodeAdminProperties[] all =
CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath()); CombinedHostsFileReader.readFile(newFile.getAbsolutePath());
assertEquals(0, all.size()); assertEquals(0, all.length);
} }
} }

View File

@ -1,7 +1,9 @@
{"hostName": "host1"} [
{"hostName": "host2", "upgradeDomain": "ud0"} {"hostName": "host1"},
{"hostName": "host3", "adminState": "DECOMMISSIONED"} {"hostName": "host2", "upgradeDomain": "ud0"},
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"} {"hostName": "host3", "adminState": "DECOMMISSIONED"},
{"hostName": "host5", "port": 8090} {"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"},
{"hostName": "host6", "adminState": "IN_MAINTENANCE"} {"hostName": "host5", "port": 8090},
{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"} {"hostName": "host6", "adminState": "IN_MAINTENANCE"},
{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}
]

View File

@ -0,0 +1,7 @@
{"hostName": "host1"}
{"hostName": "host2", "upgradeDomain": "ud0"}
{"hostName": "host3", "adminState": "DECOMMISSIONED"}
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
{"hostName": "host5", "port": 8090}
{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}