HDFS-12473. Change hosts JSON file format.
This commit is contained in:
parent
a81167e2ec
commit
c54310a638
|
@ -18,59 +18,88 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.util;
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import org.codehaus.jackson.JsonFactory;
|
||||||
|
import org.codehaus.jackson.map.JsonMappingException;
|
||||||
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.Set;
|
import java.util.List;
|
||||||
import java.util.HashSet;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.codehaus.jackson.JsonFactory;
|
|
||||||
import org.codehaus.jackson.map.ObjectMapper;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reader support for JSON based datanode configuration, an alternative
|
* Reader support for JSON-based datanode configuration, an alternative format
|
||||||
* to the exclude/include files configuration.
|
* to the exclude/include files configuration.
|
||||||
* The JSON file format is the array of elements where each element
|
* The JSON file format defines the array of elements where each element
|
||||||
* in the array describes the properties of a datanode. The properties of
|
* in the array describes the properties of a datanode. The properties of
|
||||||
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
* a datanode is defined by {@link DatanodeAdminProperties}. For example,
|
||||||
*
|
*
|
||||||
* {"hostName": "host1"}
|
* [
|
||||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
* {"hostName": "host1"},
|
||||||
|
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
|
||||||
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||||
|
* ]
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public final class CombinedHostsFileReader {
|
public final class CombinedHostsFileReader {
|
||||||
|
|
||||||
|
public static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(CombinedHostsFileReader.class);
|
||||||
|
|
||||||
private CombinedHostsFileReader() {
|
private CombinedHostsFileReader() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deserialize a set of DatanodeAdminProperties from a json file.
|
* Deserialize a set of DatanodeAdminProperties from a json file.
|
||||||
* @param hostsFile the input json file to read from.
|
* @param hostsFile the input json file to read from
|
||||||
* @return the set of DatanodeAdminProperties
|
* @return the set of DatanodeAdminProperties
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Set<DatanodeAdminProperties>
|
public static DatanodeAdminProperties[]
|
||||||
readFile(final String hostsFile) throws IOException {
|
readFile(final String hostsFile) throws IOException {
|
||||||
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
|
||||||
ObjectMapper mapper = new ObjectMapper();
|
ObjectMapper objectMapper = new ObjectMapper();
|
||||||
|
boolean tryOldFormat = false;
|
||||||
|
try (Reader input =
|
||||||
|
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
||||||
|
allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class);
|
||||||
|
} catch (JsonMappingException jme) {
|
||||||
|
// The old format doesn't have json top-level token to enclose the array.
|
||||||
|
// For backward compatibility, try parsing the old format.
|
||||||
|
tryOldFormat = true;
|
||||||
|
LOG.warn("{} has invalid JSON format." +
|
||||||
|
"Try the old format without top-level token defined.", hostsFile);
|
||||||
|
} catch(EOFException eof) {
|
||||||
|
LOG.warn("{} is empty.", hostsFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tryOldFormat) {
|
||||||
|
JsonFactory jsonFactory = new JsonFactory();
|
||||||
|
List<DatanodeAdminProperties> all = new ArrayList<>();
|
||||||
try (Reader input =
|
try (Reader input =
|
||||||
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
||||||
Iterator<DatanodeAdminProperties> iterator =
|
Iterator<DatanodeAdminProperties> iterator =
|
||||||
mapper.readValues(new JsonFactory().createJsonParser(input),
|
objectMapper.readValues(jsonFactory.createJsonParser(input),
|
||||||
DatanodeAdminProperties.class);
|
DatanodeAdminProperties.class);
|
||||||
while (iterator.hasNext()) {
|
while (iterator.hasNext()) {
|
||||||
DatanodeAdminProperties properties = iterator.next();
|
DatanodeAdminProperties properties = iterator.next();
|
||||||
allDNs.add(properties);
|
all.add(properties);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
allDNs = all.toArray(new DatanodeAdminProperties[all.size()]);
|
||||||
|
}
|
||||||
return allDNs;
|
return allDNs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,22 +25,25 @@ import java.io.Writer;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.codehaus.jackson.map.ObjectMapper;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writer support for JSON based datanode configuration, an alternative
|
* Writer support for JSON-based datanode configuration, an alternative format
|
||||||
* to the exclude/include files configuration.
|
* to the exclude/include files configuration.
|
||||||
* The JSON file format is the array of elements where each element
|
* The JSON file format defines the array of elements where each element
|
||||||
* in the array describes the properties of a datanode. The properties of
|
* in the array describes the properties of a datanode. The properties of
|
||||||
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
* a datanode is defined by {@link DatanodeAdminProperties}. For example,
|
||||||
*
|
*
|
||||||
* {"hostName": "host1"}
|
* [
|
||||||
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
* {"hostName": "host1"},
|
||||||
|
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
|
||||||
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||||
|
* ]
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
@ -56,14 +59,11 @@ public final class CombinedHostsFileWriter {
|
||||||
*/
|
*/
|
||||||
public static void writeFile(final String hostsFile,
|
public static void writeFile(final String hostsFile,
|
||||||
final Set<DatanodeAdminProperties> allDNs) throws IOException {
|
final Set<DatanodeAdminProperties> allDNs) throws IOException {
|
||||||
StringBuilder configs = new StringBuilder();
|
final ObjectMapper objectMapper = new ObjectMapper();
|
||||||
|
|
||||||
try (Writer output =
|
try (Writer output =
|
||||||
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
|
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
|
||||||
for (DatanodeAdminProperties datanodeAdminProperties: allDNs) {
|
objectMapper.writeValue(output, allDNs);
|
||||||
ObjectMapper mapper = new ObjectMapper();
|
|
||||||
configs.append(mapper.writeValueAsString(datanodeAdminProperties));
|
|
||||||
}
|
|
||||||
output.write(configs.toString());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@ import java.net.InetSocketAddress;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
|
|
||||||
|
@ -194,7 +193,7 @@ public class CombinedHostFileManager extends HostConfigManager {
|
||||||
}
|
}
|
||||||
private void refresh(final String hostsFile) throws IOException {
|
private void refresh(final String hostsFile) throws IOException {
|
||||||
HostProperties hostProps = new HostProperties();
|
HostProperties hostProps = new HostProperties();
|
||||||
Set<DatanodeAdminProperties> all =
|
DatanodeAdminProperties[] all =
|
||||||
CombinedHostsFileReader.readFile(hostsFile);
|
CombinedHostsFileReader.readFile(hostsFile);
|
||||||
for(DatanodeAdminProperties properties : all) {
|
for(DatanodeAdminProperties properties : all) {
|
||||||
InetSocketAddress addr = parseEntry(hostsFile,
|
InetSocketAddress addr = parseEntry(hostsFile,
|
||||||
|
|
|
@ -20,28 +20,29 @@ package org.apache.hadoop.hdfs.util;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Test for JSON based HostsFileReader
|
* Test for JSON based HostsFileReader.
|
||||||
*/
|
*/
|
||||||
public class TestCombinedHostsFileReader {
|
public class TestCombinedHostsFileReader {
|
||||||
|
|
||||||
// Using /test/build/data/tmp directory to store temporary files
|
// Using /test/build/data/tmp directory to store temporary files
|
||||||
static final String HOSTS_TEST_DIR = new File(System.getProperty(
|
static final String HOSTSTESTDIR = GenericTestUtils.getTestDir()
|
||||||
"test.build.data", "/tmp")).getAbsolutePath();
|
.getAbsolutePath();
|
||||||
File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
|
private final File newFile = new File(HOSTSTESTDIR, "dfs.hosts.new.json");
|
||||||
|
|
||||||
static final String TEST_CACHE_DATA_DIR =
|
static final String TESTCACHEDATADIR =
|
||||||
System.getProperty("test.cache.data", "build/test/cache");
|
System.getProperty("test.cache.data", "build/test/cache");
|
||||||
File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
|
private final File jsonFile = new File(TESTCACHEDATADIR, "dfs.hosts.json");
|
||||||
|
private final File legacyFile =
|
||||||
|
new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json");
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
|
@ -50,18 +51,28 @@ public class TestCombinedHostsFileReader {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
// Delete test file after running tests
|
// Delete test file after running tests
|
||||||
NEW_FILE.delete();
|
newFile.delete();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load the existing test json file
|
* Load the legacy test json file
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testLoadLegacyJsonFile() throws Exception {
|
||||||
|
DatanodeAdminProperties[] all =
|
||||||
|
CombinedHostsFileReader.readFile(legacyFile.getAbsolutePath());
|
||||||
|
assertEquals(5, all.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load the test json file
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testLoadExistingJsonFile() throws Exception {
|
public void testLoadExistingJsonFile() throws Exception {
|
||||||
Set<DatanodeAdminProperties> all =
|
DatanodeAdminProperties[] all =
|
||||||
CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
|
CombinedHostsFileReader.readFile(jsonFile.getAbsolutePath());
|
||||||
assertEquals(5, all.size());
|
assertEquals(5, all.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -69,11 +80,11 @@ public class TestCombinedHostsFileReader {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testEmptyCombinedHostsFileReader() throws Exception {
|
public void testEmptyCombinedHostsFileReader() throws Exception {
|
||||||
FileWriter hosts = new FileWriter(NEW_FILE);
|
FileWriter hosts = new FileWriter(newFile);
|
||||||
hosts.write("");
|
hosts.write("");
|
||||||
hosts.close();
|
hosts.close();
|
||||||
Set<DatanodeAdminProperties> all =
|
DatanodeAdminProperties[] all =
|
||||||
CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
|
CombinedHostsFileReader.readFile(newFile.getAbsolutePath());
|
||||||
assertEquals(0, all.size());
|
assertEquals(0, all.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
{"hostName": "host1"}
|
[
|
||||||
{"hostName": "host2", "upgradeDomain": "ud0"}
|
{"hostName": "host1"},
|
||||||
{"hostName": "host3", "adminState": "DECOMMISSIONED"}
|
{"hostName": "host2", "upgradeDomain": "ud0"},
|
||||||
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
|
{"hostName": "host3", "adminState": "DECOMMISSIONED"},
|
||||||
{"hostName": "host5", "port": 8090}
|
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"},
|
||||||
|
{"hostName": "host5", "port": 8090}
|
||||||
|
]
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
{"hostName": "host1"}
|
||||||
|
{"hostName": "host2", "upgradeDomain": "ud0"}
|
||||||
|
{"hostName": "host3", "adminState": "DECOMMISSIONED"}
|
||||||
|
{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
|
||||||
|
{"hostName": "host5", "port": 8090}
|
Loading…
Reference in New Issue