svn merge -c 1177100 from trunk for HDFS-2355.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1189979 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
436ee96472
commit
58063c520f
|
@ -739,6 +739,9 @@ Release 0.23.0 - Unreleased
|
|||
HDFS-1869. mkdirs should use the supplied permission for all of the created
|
||||
directories. (Daryn Sharp via szetszwo)
|
||||
|
||||
HDFS-2355. Federation: enable using the same configuration file across
|
||||
all the nodes in the cluster. (suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
||||
|
|
|
@ -38,6 +38,7 @@ import java.util.Random;
|
|||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
|
@ -576,17 +577,6 @@ public class DFSUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured nameservice Id
|
||||
*
|
||||
* @param conf
|
||||
* Configuration object to lookup the nameserviceId
|
||||
* @return nameserviceId string from conf
|
||||
*/
|
||||
public static String getNameServiceId(Configuration conf) {
|
||||
return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
|
||||
/** Return used as percentage of capacity */
|
||||
public static float getPercentUsed(long used, long capacity) {
|
||||
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
|
||||
|
@ -696,4 +686,77 @@ public class DFSUtil {
|
|||
ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
|
||||
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the {@link NameNode} based on namenode RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getNamenodeNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the BackupNode based on backup node RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getBackupNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the secondary node based on secondary http address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getSecondaryNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the nameservice Id by matching the {@code addressKey} with the
|
||||
* the address of the local node.
|
||||
*
|
||||
* If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
|
||||
* configured, this method determines the nameservice Id by matching the local
|
||||
* nodes address with the configured addresses. When a match is found, it
|
||||
* returns the nameservice Id from the corresponding configuration key.
|
||||
*
|
||||
* @param conf Configuration
|
||||
* @param addressKey configuration key to get the address.
|
||||
* @return name service Id on success, null on failure.
|
||||
* @throws HadoopIllegalArgumentException on error
|
||||
*/
|
||||
private static String getNameServiceId(Configuration conf, String addressKey) {
|
||||
String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
if (nameserviceId != null) {
|
||||
return nameserviceId;
|
||||
}
|
||||
|
||||
Collection<String> ids = getNameServiceIds(conf);
|
||||
if (ids == null || ids.size() == 0) {
|
||||
// Not federation configuration, hence no nameservice Id
|
||||
return null;
|
||||
}
|
||||
|
||||
// Match the rpc address with that of local address
|
||||
int found = 0;
|
||||
for (String id : ids) {
|
||||
String addr = conf.get(getNameServiceIdKey(addressKey, id));
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
if (NetUtils.isLocalAddress(s.getAddress())) {
|
||||
nameserviceId = id;
|
||||
found++;
|
||||
}
|
||||
}
|
||||
if (found > 1) { // Only one address must match the local address
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Configuration has multiple RPC addresses that matches "
|
||||
+ "the local node's address. Please configure the system with "
|
||||
+ "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
if (found == 0) {
|
||||
throw new HadoopIllegalArgumentException("Configuration address "
|
||||
+ addressKey + " is missing in configuration with name service Id");
|
||||
}
|
||||
return nameserviceId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
|
@ -386,4 +387,9 @@ public class BackupNode extends NameNode {
|
|||
String getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getBackupNameServiceId(conf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
@ -453,11 +454,14 @@ public class NameNode {
|
|||
throws IOException {
|
||||
this.role = role;
|
||||
try {
|
||||
initializeGenericKeys(conf);
|
||||
initializeGenericKeys(conf, getNameServiceId(conf));
|
||||
initialize(conf);
|
||||
} catch (IOException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -762,16 +766,16 @@ public class NameNode {
|
|||
* @param conf
|
||||
* Configuration object to lookup specific key and to set the value
|
||||
* to the key passed. Note the conf object is modified
|
||||
* @param nameserviceId name service Id
|
||||
* @see DFSUtil#setGenericConf(Configuration, String, String...)
|
||||
*/
|
||||
public static void initializeGenericKeys(Configuration conf) {
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
public static void initializeGenericKeys(Configuration conf, String
|
||||
nameserviceId) {
|
||||
if ((nameserviceId == null) || nameserviceId.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
|
||||
|
||||
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
||||
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
|
||||
|
@ -779,6 +783,14 @@ public class NameNode {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name service Id for the node
|
||||
* @return name service Id or null if federation is not configured
|
||||
*/
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getNamenodeNameServiceId(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static void main(String argv[]) throws Exception {
|
||||
|
@ -792,5 +804,4 @@ public class NameNode {
|
|||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,10 +38,12 @@ import org.apache.commons.cli.ParseException;
|
|||
import org.apache.commons.cli.PosixParser;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
|
@ -171,12 +173,17 @@ public class SecondaryNameNode implements Runnable {
|
|||
public SecondaryNameNode(Configuration conf,
|
||||
CommandLineOpts commandLineOpts) throws IOException {
|
||||
try {
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
NameNode.initializeGenericKeys(conf,
|
||||
DFSUtil.getSecondaryNameServiceId(conf));
|
||||
initialize(conf, commandLineOpts);
|
||||
} catch(IOException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
} catch(HadoopIllegalArgumentException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,8 +29,7 @@ import java.util.Collection;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
public class TestDFSUtil {
|
||||
/**
|
||||
|
@ -77,49 +75,118 @@ public class TestDFSUtil {
|
|||
}
|
||||
|
||||
assertTrue("expected 1 corrupt files but got " + corruptCount,
|
||||
corruptCount == 1);
|
||||
corruptCount == 1);
|
||||
|
||||
// test an empty location
|
||||
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
|
||||
assertEquals(0, bs.length);
|
||||
}
|
||||
|
||||
|
||||
private Configuration setupAddress(String key) {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceId(Configuration)}
|
||||
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId from the configuration returned
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
public void getNameServiceId() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
// Test - The configured nameserviceIds are returned
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for namenode is determined based on matching the address with
|
||||
* local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getNameNodeNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getBackupNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getSecondaryNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
|
||||
* exception is thrown when multiple rpc addresses match the local node's
|
||||
* address
|
||||
*/
|
||||
@Test(expected = HadoopIllegalArgumentException.class)
|
||||
public void testGetNameServiceIdException() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
"localhost:9000");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
"localhost:9001");
|
||||
DFSUtil.getNamenodeNameServiceId(conf);
|
||||
fail("Expected exception is not thrown");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testGetNameServiceIds() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
|
||||
Iterator<String> it = nameserviceIds.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
assertEquals("nn1", it.next().toString());
|
||||
assertEquals("nn2", it.next().toString());
|
||||
}
|
||||
|
||||
// Tests default nameserviceId is returned
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNameServiceId(conf));
|
||||
|
||||
/**
|
||||
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
|
||||
* (Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
// Test - configured list of namenodes are returned
|
||||
final String NN1_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
final String NN3_ADDRESS = "localhost:9002";
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
NN2_ADDRESS);
|
||||
|
||||
Collection<InetSocketAddress> nnAddresses =
|
||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
Collection<InetSocketAddress> nnAddresses = DFSUtil
|
||||
.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(2, nnAddresses.size());
|
||||
Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
InetSocketAddress addr = iterator.next();
|
||||
assertEquals("localhost", addr.getHostName());
|
||||
assertEquals(9000, addr.getPort());
|
||||
|
@ -128,24 +195,17 @@ public class TestDFSUtil {
|
|||
assertEquals(9001, addr.getPort());
|
||||
|
||||
// Test - can look up nameservice ID from service address
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", nameserviceId);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn2", nameserviceId);
|
||||
InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress3,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertNull(nameserviceId);
|
||||
checkNameServiceId(conf, NN1_ADDRESS, "nn1");
|
||||
checkNameServiceId(conf, NN2_ADDRESS, "nn2");
|
||||
checkNameServiceId(conf, NN3_ADDRESS, null);
|
||||
}
|
||||
|
||||
public void checkNameServiceId(Configuration conf, String addr,
|
||||
String expectedNameServiceId) {
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals(expectedNameServiceId, nameserviceId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -157,17 +217,15 @@ public class TestDFSUtil {
|
|||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String DEFAULT_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
|
||||
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertTrue(isDefault);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertFalse(isDefault);
|
||||
}
|
||||
|
||||
|
@ -176,8 +234,8 @@ public class TestDFSUtil {
|
|||
public void testDefaultNamenode() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String hdfs_default = "hdfs://localhost:9999/";
|
||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
// default namenode address is returned.
|
||||
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(1, addrList.size());
|
||||
|
@ -191,9 +249,9 @@ public class TestDFSUtil {
|
|||
@Test
|
||||
public void testConfModification() throws IOException {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
|
||||
|
||||
// Set the nameservice specific keys with nameserviceId in the config key
|
||||
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
|
||||
|
@ -202,7 +260,7 @@ public class TestDFSUtil {
|
|||
}
|
||||
|
||||
// Initialize generic keys from specific keys
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
NameNode.initializeGenericKeys(conf, nameserviceId);
|
||||
|
||||
// Retrieve the keys without nameserviceId and Ensure generic keys are set
|
||||
// to the correct value
|
||||
|
@ -240,14 +298,14 @@ public class TestDFSUtil {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testGetServerInfo(){
|
||||
public void testGetServerInfo() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
||||
Assert.assertEquals("0.0.0.0:50470", httpsport);
|
||||
assertEquals("0.0.0.0:50470", httpsport);
|
||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||
Assert.assertEquals("0.0.0.0:50070", httpport);
|
||||
assertEquals("0.0.0.0:50070", httpport);
|
||||
}
|
||||
|
||||
}
|
|
@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockScanner {
|
|||
|
||||
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
|
||||
String nsId = DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(i));
|
||||
namenodesBuilder.append(nsId);
|
||||
namenodesBuilder.append(",");
|
||||
}
|
||||
|
@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockScanner {
|
|||
LOG.info(ex.getMessage());
|
||||
}
|
||||
|
||||
namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
|
||||
namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(2)));
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
|
||||
.toString());
|
||||
|
|
Loading…
Reference in New Issue