HDFS-12473. Change hosts JSON file format.
This commit is contained in:
parent
6581f2dea3
commit
7dd662eafd
|
@ -18,60 +18,88 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import org.codehaus.jackson.JsonFactory;
|
||||
import org.codehaus.jackson.map.JsonMappingException;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.codehaus.jackson.JsonFactory;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.codehaus.jackson.map.ObjectReader;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Reader support for JSON based datanode configuration, an alternative
|
||||
* Reader support for JSON-based datanode configuration, an alternative format
|
||||
* to the exclude/include files configuration.
|
||||
* The JSON file format is the array of elements where each element
|
||||
* The JSON file format defines the array of elements where each element
|
||||
* in the array describes the properties of a datanode. The properties of
|
||||
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
||||
* a datanode is defined by {@link DatanodeAdminProperties}. For example,
|
||||
*
|
||||
* {"hostName": "host1"}
|
||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
||||
* [
|
||||
* {"hostName": "host1"},
|
||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
|
||||
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||
* ]
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
@InterfaceStability.Unstable
|
||||
public final class CombinedHostsFileReader {
|
||||
private static final ObjectReader READER =
|
||||
new ObjectMapper().reader(DatanodeAdminProperties.class);
|
||||
private static final JsonFactory JSON_FACTORY = new JsonFactory();
|
||||
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(CombinedHostsFileReader.class);
|
||||
|
||||
private CombinedHostsFileReader() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserialize a set of DatanodeAdminProperties from a json file.
|
||||
* @param hostsFile the input json file to read from.
|
||||
* @param hostsFile the input json file to read from
|
||||
* @return the set of DatanodeAdminProperties
|
||||
* @throws IOException
|
||||
*/
|
||||
public static Set<DatanodeAdminProperties>
|
||||
public static DatanodeAdminProperties[]
|
||||
readFile(final String hostsFile) throws IOException {
|
||||
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||
DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
|
||||
ObjectMapper objectMapper = new ObjectMapper();
|
||||
boolean tryOldFormat = false;
|
||||
try (Reader input =
|
||||
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
||||
allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class);
|
||||
} catch (JsonMappingException jme) {
|
||||
// The old format doesn't have json top-level token to enclose the array.
|
||||
// For backward compatibility, try parsing the old format.
|
||||
tryOldFormat = true;
|
||||
LOG.warn("{} has invalid JSON format." +
|
||||
"Try the old format without top-level token defined.", hostsFile);
|
||||
} catch(EOFException eof) {
|
||||
LOG.warn("{} is empty.", hostsFile);
|
||||
}
|
||||
|
||||
if (tryOldFormat) {
|
||||
JsonFactory jsonFactory = new JsonFactory();
|
||||
List<DatanodeAdminProperties> all = new ArrayList<>();
|
||||
try (Reader input =
|
||||
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
||||
Iterator<DatanodeAdminProperties> iterator =
|
||||
READER.readValues(JSON_FACTORY.createJsonParser(input));
|
||||
objectMapper.readValues(jsonFactory.createJsonParser(input),
|
||||
DatanodeAdminProperties.class);
|
||||
while (iterator.hasNext()) {
|
||||
DatanodeAdminProperties properties = iterator.next();
|
||||
allDNs.add(properties);
|
||||
all.add(properties);
|
||||
}
|
||||
}
|
||||
allDNs = all.toArray(new DatanodeAdminProperties[all.size()]);
|
||||
}
|
||||
return allDNs;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,20 +32,21 @@ import org.codehaus.jackson.map.ObjectMapper;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||
|
||||
/**
|
||||
* Writer support for JSON based datanode configuration, an alternative
|
||||
* Writer support for JSON-based datanode configuration, an alternative format
|
||||
* to the exclude/include files configuration.
|
||||
* The JSON file format is the array of elements where each element
|
||||
* The JSON file format defines the array of elements where each element
|
||||
* in the array describes the properties of a datanode. The properties of
|
||||
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
||||
* a datanode is defined by {@link DatanodeAdminProperties}. For example,
|
||||
*
|
||||
* {"hostName": "host1"}
|
||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
||||
* [
|
||||
* {"hostName": "host1"},
|
||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
|
||||
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||
* ]
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
@InterfaceStability.Unstable
|
||||
public final class CombinedHostsFileWriter {
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
private CombinedHostsFileWriter() {
|
||||
}
|
||||
|
||||
|
@ -57,13 +58,11 @@ public final class CombinedHostsFileWriter {
|
|||
*/
|
||||
public static void writeFile(final String hostsFile,
|
||||
final Set<DatanodeAdminProperties> allDNs) throws IOException {
|
||||
StringBuilder configs = new StringBuilder();
|
||||
final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
try (Writer output =
|
||||
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
|
||||
for (DatanodeAdminProperties datanodeAdminProperties: allDNs) {
|
||||
configs.append(MAPPER.writeValueAsString(datanodeAdminProperties));
|
||||
}
|
||||
output.write(configs.toString());
|
||||
objectMapper.writeValue(output, allDNs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.net.InetSocketAddress;
|
|||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
|
||||
|
@ -212,7 +211,7 @@ public class CombinedHostFileManager extends HostConfigManager {
|
|||
}
|
||||
private void refresh(final String hostsFile) throws IOException {
|
||||
HostProperties hostProps = new HostProperties();
|
||||
Set<DatanodeAdminProperties> all =
|
||||
DatanodeAdminProperties[] all =
|
||||
CombinedHostsFileReader.readFile(hostsFile);
|
||||
for(DatanodeAdminProperties properties : all) {
|
||||
InetSocketAddress addr = parseEntry(hostsFile,
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.util;
|
|||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Before;
|
||||
|
@ -30,19 +28,21 @@ import org.junit.Test;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/*
|
||||
* Test for JSON based HostsFileReader
|
||||
/**
|
||||
* Test for JSON based HostsFileReader.
|
||||
*/
|
||||
public class TestCombinedHostsFileReader {
|
||||
|
||||
// Using /test/build/data/tmp directory to store temporary files
|
||||
static final String HOSTS_TEST_DIR = GenericTestUtils.getTestDir()
|
||||
static final String HOSTSTESTDIR = GenericTestUtils.getTestDir()
|
||||
.getAbsolutePath();
|
||||
File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
|
||||
private final File newFile = new File(HOSTSTESTDIR, "dfs.hosts.new.json");
|
||||
|
||||
static final String TEST_CACHE_DATA_DIR =
|
||||
static final String TESTCACHEDATADIR =
|
||||
System.getProperty("test.cache.data", "build/test/cache");
|
||||
File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
|
||||
private final File jsonFile = new File(TESTCACHEDATADIR, "dfs.hosts.json");
|
||||
private final File legacyFile =
|
||||
new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json");
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
@ -51,18 +51,28 @@ public class TestCombinedHostsFileReader {
|
|||
@After
|
||||
public void tearDown() throws Exception {
|
||||
// Delete test file after running tests
|
||||
NEW_FILE.delete();
|
||||
newFile.delete();
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the existing test json file
|
||||
* Load the legacy test json file
|
||||
*/
|
||||
@Test
|
||||
public void testLoadLegacyJsonFile() throws Exception {
|
||||
DatanodeAdminProperties[] all =
|
||||
CombinedHostsFileReader.readFile(legacyFile.getAbsolutePath());
|
||||
assertEquals(7, all.length);
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the test json file
|
||||
*/
|
||||
@Test
|
||||
public void testLoadExistingJsonFile() throws Exception {
|
||||
Set<DatanodeAdminProperties> all =
|
||||
CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
|
||||
assertEquals(7, all.size());
|
||||
DatanodeAdminProperties[] all =
|
||||
CombinedHostsFileReader.readFile(jsonFile.getAbsolutePath());
|
||||
assertEquals(7, all.length);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -70,11 +80,11 @@ public class TestCombinedHostsFileReader {
|
|||
*/
|
||||
@Test
|
||||
public void testEmptyCombinedHostsFileReader() throws Exception {
|
||||
FileWriter hosts = new FileWriter(NEW_FILE);
|
||||
FileWriter hosts = new FileWriter(newFile);
|
||||
hosts.write("");
|
||||
hosts.close();
|
||||
Set<DatanodeAdminProperties> all =
|
||||
CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
|
||||
assertEquals(0, all.size());
|
||||
DatanodeAdminProperties[] all =
|
||||
CombinedHostsFileReader.readFile(newFile.getAbsolutePath());
|
||||
assertEquals(0, all.length);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
{"hostName": "host1"}
|
||||
{"hostName": "host2", "upgradeDomain": "ud0"}
|
||||
{"hostName": "host3", "adminState": "DECOMMISSIONED"}
|
||||
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
|
||||
{"hostName": "host5", "port": 8090}
|
||||
{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
|
||||
[
|
||||
{"hostName": "host1"},
|
||||
{"hostName": "host2", "upgradeDomain": "ud0"},
|
||||
{"hostName": "host3", "adminState": "DECOMMISSIONED"},
|
||||
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"},
|
||||
{"hostName": "host5", "port": 8090},
|
||||
{"hostName": "host6", "adminState": "IN_MAINTENANCE"},
|
||||
{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}
|
||||
]
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
{"hostName": "host1"}
|
||||
{"hostName": "host2", "upgradeDomain": "ud0"}
|
||||
{"hostName": "host3", "adminState": "DECOMMISSIONED"}
|
||||
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
|
||||
{"hostName": "host5", "port": 8090}
|
||||
{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
|
||||
{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}
|
Loading…
Reference in New Issue