diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java index 8da5655eda5..aa8e4c1f24e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java @@ -19,58 +19,85 @@ package org.apache.hadoop.hdfs.util; import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; + import java.io.FileInputStream; import java.io.InputStreamReader; import java.io.IOException; import java.io.Reader; +import java.util.ArrayList; import java.util.Iterator; -import java.util.Set; -import java.util.HashSet; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** - * Reader support for JSON based datanode configuration, an alternative + * Reader support for JSON-based datanode configuration, an alternative format * to the exclude/include files configuration. - * The JSON file format is the array of elements where each element + * The JSON file format defines the array of elements where each element * in the array describes the properties of a datanode. The properties of - * a datanode is defined in {@link DatanodeAdminProperties}. For example, + * a datanode is defined by {@link DatanodeAdminProperties}. For example, * - * {"hostName": "host1"} - * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"} - * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} + * [ + * {"hostName": "host1"}, + * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}, + * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} + * ] */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable public final class CombinedHostsFileReader { - private static final ObjectReader READER = - new ObjectMapper().readerFor(DatanodeAdminProperties.class); - private static final JsonFactory JSON_FACTORY = new JsonFactory(); + + public static final Logger LOG = + LoggerFactory.getLogger(CombinedHostsFileReader.class); private CombinedHostsFileReader() { } /** * Deserialize a set of DatanodeAdminProperties from a json file. - * @param hostsFile the input json file to read from. + * @param hostsFile the input json file to read from * @return the set of DatanodeAdminProperties * @throws IOException */ - public static Set + public static DatanodeAdminProperties[] readFile(final String hostsFile) throws IOException { - HashSet allDNs = new HashSet<>(); + DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0]; + ObjectMapper objectMapper = new ObjectMapper(); + boolean tryOldFormat = false; try (Reader input = - new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) { - Iterator iterator = - READER.readValues(JSON_FACTORY.createParser(input)); - while (iterator.hasNext()) { - DatanodeAdminProperties properties = iterator.next(); - allDNs.add(properties); + new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) { + allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class); + } catch (JsonMappingException jme) { + // The old format doesn't have json top-level token to enclose the array. + // For backward compatibility, try parsing the old format. + tryOldFormat = true; + LOG.warn("{} has invalid JSON format." + + "Try the old format without top-level token defined.", hostsFile); + } + + if (tryOldFormat) { + ObjectReader objectReader = + objectMapper.readerFor(DatanodeAdminProperties.class); + JsonFactory jsonFactory = new JsonFactory(); + List all = new ArrayList<>(); + try (Reader input = + new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) { + Iterator iterator = + objectReader.readValues(jsonFactory.createParser(input)); + while (iterator.hasNext()) { + DatanodeAdminProperties properties = iterator.next(); + all.add(properties); + } } + allDNs = all.toArray(new DatanodeAdminProperties[all.size()]); } return allDNs; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java index d771191ff0a..2d9bbf5044c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java @@ -32,20 +32,21 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; /** - * Writer support for JSON based datanode configuration, an alternative + * Writer support for JSON-based datanode configuration, an alternative format * to the exclude/include files configuration. - * The JSON file format is the array of elements where each element + * The JSON file format defines the array of elements where each element * in the array describes the properties of a datanode. The properties of - * a datanode is defined in {@link DatanodeAdminProperties}. For example, + * a datanode is defined by {@link DatanodeAdminProperties}. For example, * - * {"hostName": "host1"} - * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"} - * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} + * [ + * {"hostName": "host1"}, + * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}, + * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"} + * ] */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable public final class CombinedHostsFileWriter { - private static final ObjectMapper MAPPER = new ObjectMapper(); private CombinedHostsFileWriter() { } @@ -57,13 +58,11 @@ public final class CombinedHostsFileWriter { */ public static void writeFile(final String hostsFile, final Set allDNs) throws IOException { - StringBuilder configs = new StringBuilder(); + final ObjectMapper objectMapper = new ObjectMapper(); + try (Writer output = new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) { - for (DatanodeAdminProperties datanodeAdminProperties: allDNs) { - configs.append(MAPPER.writeValueAsString(datanodeAdminProperties)); - } - output.write(configs.toString()); + objectMapper.writeValue(output, allDNs); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java index 6f9c35e0fcb..d6a09720643 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java @@ -39,7 +39,6 @@ import java.net.InetSocketAddress; import java.util.Collection; import java.util.Iterator; import java.util.Map; -import java.util.Set; import com.google.common.base.Predicate; @@ -212,7 +211,7 @@ public class CombinedHostFileManager extends HostConfigManager { } private void refresh(final String hostsFile) throws IOException { HostProperties hostProps = new HostProperties(); - Set all = + DatanodeAdminProperties[] all = CombinedHostsFileReader.readFile(hostsFile); for(DatanodeAdminProperties properties : all) { InetSocketAddress addr = parseEntry(hostsFile, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java index b48784fbcc7..cf021805518 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.util; import java.io.File; import java.io.FileWriter; -import java.util.Set; - import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Before; @@ -30,19 +28,21 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; -/* - * Test for JSON based HostsFileReader +/** + * Test for JSON based HostsFileReader. */ public class TestCombinedHostsFileReader { // Using /test/build/data/tmp directory to store temporary files - static final String HOSTS_TEST_DIR = GenericTestUtils.getTestDir() + static final String HOSTSTESTDIR = GenericTestUtils.getTestDir() .getAbsolutePath(); - File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json"); + private final File newFile = new File(HOSTSTESTDIR, "dfs.hosts.new.json"); - static final String TEST_CACHE_DATA_DIR = + static final String TESTCACHEDATADIR = System.getProperty("test.cache.data", "build/test/cache"); - File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json"); + private final File jsonFile = new File(TESTCACHEDATADIR, "dfs.hosts.json"); + private final File legacyFile = + new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json"); @Before public void setUp() throws Exception { @@ -51,18 +51,28 @@ public class TestCombinedHostsFileReader { @After public void tearDown() throws Exception { // Delete test file after running tests - NEW_FILE.delete(); + newFile.delete(); } /* - * Load the existing test json file + * Load the legacy test json file + */ + @Test + public void testLoadLegacyJsonFile() throws Exception { + DatanodeAdminProperties[] all = + CombinedHostsFileReader.readFile(legacyFile.getAbsolutePath()); + assertEquals(7, all.length); + } + + /* + * Load the test json file */ @Test public void testLoadExistingJsonFile() throws Exception { - Set all = - CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath()); - assertEquals(7, all.size()); + DatanodeAdminProperties[] all = + CombinedHostsFileReader.readFile(jsonFile.getAbsolutePath()); + assertEquals(7, all.length); } /* @@ -70,11 +80,11 @@ public class TestCombinedHostsFileReader { */ @Test public void testEmptyCombinedHostsFileReader() throws Exception { - FileWriter hosts = new FileWriter(NEW_FILE); + FileWriter hosts = new FileWriter(newFile); hosts.write(""); hosts.close(); - Set all = - CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath()); - assertEquals(0, all.size()); + DatanodeAdminProperties[] all = + CombinedHostsFileReader.readFile(newFile.getAbsolutePath()); + assertEquals(0, all.length); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json index 9c852e01eb0..615b17fc860 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json @@ -1,7 +1,9 @@ -{"hostName": "host1"} -{"hostName": "host2", "upgradeDomain": "ud0"} -{"hostName": "host3", "adminState": "DECOMMISSIONED"} -{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"} -{"hostName": "host5", "port": 8090} -{"hostName": "host6", "adminState": "IN_MAINTENANCE"} -{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"} +[ + {"hostName": "host1"}, + {"hostName": "host2", "upgradeDomain": "ud0"}, + {"hostName": "host3", "adminState": "DECOMMISSIONED"}, + {"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}, + {"hostName": "host5", "port": 8090}, + {"hostName": "host6", "adminState": "IN_MAINTENANCE"}, + {"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"} +] diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json new file mode 100644 index 00000000000..9c852e01eb0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json @@ -0,0 +1,7 @@ +{"hostName": "host1"} +{"hostName": "host2", "upgradeDomain": "ud0"} +{"hostName": "host3", "adminState": "DECOMMISSIONED"} +{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"} +{"hostName": "host5", "port": 8090} +{"hostName": "host6", "adminState": "IN_MAINTENANCE"} +{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}