svn merge -c 1177100 from trunk for HDFS-2355.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1189979 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-10-27 19:57:50 +00:00
parent 436ee96472
commit 58063c520f
7 changed files with 239 additions and 90 deletions

View File

@ -739,6 +739,9 @@ Release 0.23.0 - Unreleased
HDFS-1869. mkdirs should use the supplied permission for all of the created HDFS-1869. mkdirs should use the supplied permission for all of the created
directories. (Daryn Sharp via szetszwo) directories. (Daryn Sharp via szetszwo)
HDFS-2355. Federation: enable using the same configuration file across
all the nodes in the cluster. (suresh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -38,6 +38,7 @@ import java.util.Random;
import java.util.StringTokenizer; import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
@ -576,17 +577,6 @@ public class DFSUtil {
} }
} }
/**
* Returns the configured nameservice Id
*
* @param conf
* Configuration object to lookup the nameserviceId
* @return nameserviceId string from conf
*/
public static String getNameServiceId(Configuration conf) {
return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
}
/** Return used as percentage of capacity */ /** Return used as percentage of capacity */
public static float getPercentUsed(long used, long capacity) { public static float getPercentUsed(long used, long capacity) {
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
@ -696,4 +686,77 @@ public class DFSUtil {
ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle, ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
NetUtils.getDefaultSocketFactory(conf), socketTimeout); NetUtils.getDefaultSocketFactory(conf), socketTimeout);
} }
/**
* Get name service Id for the {@link NameNode} based on namenode RPC address
* matching the local node address.
*/
public static String getNamenodeNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
}
/**
* Get name service Id for the BackupNode based on backup node RPC address
* matching the local node address.
*/
public static String getBackupNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
}
/**
* Get name service Id for the secondary node based on secondary http address
* matching the local node address.
*/
public static String getSecondaryNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
}
/**
* Get the nameservice Id by matching the {@code addressKey} with the
* the address of the local node.
*
* If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
* configured, this method determines the nameservice Id by matching the local
* nodes address with the configured addresses. When a match is found, it
* returns the nameservice Id from the corresponding configuration key.
*
* @param conf Configuration
* @param addressKey configuration key to get the address.
* @return name service Id on success, null on failure.
* @throws HadoopIllegalArgumentException on error
*/
private static String getNameServiceId(Configuration conf, String addressKey) {
String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
if (nameserviceId != null) {
return nameserviceId;
}
Collection<String> ids = getNameServiceIds(conf);
if (ids == null || ids.size() == 0) {
// Not federation configuration, hence no nameservice Id
return null;
}
// Match the rpc address with that of local address
int found = 0;
for (String id : ids) {
String addr = conf.get(getNameServiceIdKey(addressKey, id));
InetSocketAddress s = NetUtils.createSocketAddr(addr);
if (NetUtils.isLocalAddress(s.getAddress())) {
nameserviceId = id;
found++;
}
}
if (found > 1) { // Only one address must match the local address
throw new HadoopIllegalArgumentException(
"Configuration has multiple RPC addresses that matches "
+ "the local node's address. Please configure the system with "
+ "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
}
if (found == 0) {
throw new HadoopIllegalArgumentException("Configuration address "
+ addressKey + " is missing in configuration with name service Id");
}
return nameserviceId;
}
} }

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -386,4 +387,9 @@ public class BackupNode extends NameNode {
String getClusterId() { String getClusterId() {
return clusterId; return clusterId;
} }
@Override
protected String getNameServiceId(Configuration conf) {
return DFSUtil.getBackupNameServiceId(conf);
}
} }

View File

@ -30,6 +30,7 @@ import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -453,11 +454,14 @@ public class NameNode {
throws IOException { throws IOException {
this.role = role; this.role = role;
try { try {
initializeGenericKeys(conf); initializeGenericKeys(conf, getNameServiceId(conf));
initialize(conf); initialize(conf);
} catch (IOException e) { } catch (IOException e) {
this.stop(); this.stop();
throw e; throw e;
} catch (HadoopIllegalArgumentException e) {
this.stop();
throw e;
} }
} }
@ -762,16 +766,16 @@ public class NameNode {
* @param conf * @param conf
* Configuration object to lookup specific key and to set the value * Configuration object to lookup specific key and to set the value
* to the key passed. Note the conf object is modified * to the key passed. Note the conf object is modified
* @param nameserviceId name service Id
* @see DFSUtil#setGenericConf(Configuration, String, String...) * @see DFSUtil#setGenericConf(Configuration, String, String...)
*/ */
public static void initializeGenericKeys(Configuration conf) { public static void initializeGenericKeys(Configuration conf, String
final String nameserviceId = DFSUtil.getNameServiceId(conf); nameserviceId) {
if ((nameserviceId == null) || nameserviceId.isEmpty()) { if ((nameserviceId == null) || nameserviceId.isEmpty()) {
return; return;
} }
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS); DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@ -779,6 +783,14 @@ public class NameNode {
} }
} }
/**
* Get the name service Id for the node
* @return name service Id or null if federation is not configured
*/
protected String getNameServiceId(Configuration conf) {
return DFSUtil.getNamenodeNameServiceId(conf);
}
/** /**
*/ */
public static void main(String argv[]) throws Exception { public static void main(String argv[]) throws Exception {
@ -792,5 +804,4 @@ public class NameNode {
System.exit(-1); System.exit(-1);
} }
} }
} }

View File

@ -38,10 +38,12 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser; import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator; import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -171,12 +173,17 @@ public class SecondaryNameNode implements Runnable {
public SecondaryNameNode(Configuration conf, public SecondaryNameNode(Configuration conf,
CommandLineOpts commandLineOpts) throws IOException { CommandLineOpts commandLineOpts) throws IOException {
try { try {
NameNode.initializeGenericKeys(conf); NameNode.initializeGenericKeys(conf,
DFSUtil.getSecondaryNameServiceId(conf));
initialize(conf, commandLineOpts); initialize(conf, commandLineOpts);
} catch(IOException e) { } catch(IOException e) {
shutdown(); shutdown();
LOG.fatal("Failed to start secondary namenode. ", e); LOG.fatal("Failed to start secondary namenode. ", e);
throw e; throw e;
} catch(HadoopIllegalArgumentException e) {
shutdown();
LOG.fatal("Failed to start secondary namenode. ", e);
throw e;
} }
} }

View File

@ -29,8 +29,7 @@ import java.util.Collection;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import junit.framework.Assert; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
public class TestDFSUtil { public class TestDFSUtil {
/** /**
@ -84,42 +82,111 @@ public class TestDFSUtil {
assertEquals(0, bs.length); assertEquals(0, bs.length);
} }
private Configuration setupAddress(String key) {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
return conf;
}
/** /**
* Test for * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* {@link DFSUtil#getNameServiceIds(Configuration)} * nameserviceId from the configuration returned
* {@link DFSUtil#getNameServiceId(Configuration)}
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
*/ */
@Test @Test
public void testMultipleNamenodes() throws IOException { public void getNameServiceId() {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2"); conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
// Test - The configured nameserviceIds are returned /**
* Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
* nameserviceId for namenode is determined based on matching the address with
* local node's address
*/
@Test
public void getNameNodeNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getBackupNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getSecondaryNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
* exception is thrown when multiple rpc addresses match the local node's
* address
*/
@Test(expected = HadoopIllegalArgumentException.class)
public void testGetNameServiceIdException() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
"localhost:9000");
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
"localhost:9001");
DFSUtil.getNamenodeNameServiceId(conf);
fail("Expected exception is not thrown");
}
/**
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
*/
@Test
public void testGetNameServiceIds() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf); Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
Iterator<String> it = nameserviceIds.iterator(); Iterator<String> it = nameserviceIds.iterator();
assertEquals(2, nameserviceIds.size()); assertEquals(2, nameserviceIds.size());
assertEquals("nn1", it.next().toString()); assertEquals("nn1", it.next().toString());
assertEquals("nn2", it.next().toString()); assertEquals("nn2", it.next().toString());
}
// Tests default nameserviceId is returned /**
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1"); * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
assertEquals("nn1", DFSUtil.getNameServiceId(conf)); * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
* (Configuration)}
*/
@Test
public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
// Test - configured list of namenodes are returned // Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000"; final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001"; final String NN2_ADDRESS = "localhost:9001";
final String NN3_ADDRESS = "localhost:9002"; final String NN3_ADDRESS = "localhost:9002";
conf.set(DFSUtil.getNameServiceIdKey( conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS); NN1_ADDRESS);
conf.set(DFSUtil.getNameServiceIdKey( conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS); NN2_ADDRESS);
Collection<InetSocketAddress> nnAddresses = Collection<InetSocketAddress> nnAddresses = DFSUtil
DFSUtil.getNNServiceRpcAddresses(conf); .getNNServiceRpcAddresses(conf);
assertEquals(2, nnAddresses.size()); assertEquals(2, nnAddresses.size());
Iterator<InetSocketAddress> iterator = nnAddresses.iterator(); Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
assertEquals(2, nameserviceIds.size());
InetSocketAddress addr = iterator.next(); InetSocketAddress addr = iterator.next();
assertEquals("localhost", addr.getHostName()); assertEquals("localhost", addr.getHostName());
assertEquals(9000, addr.getPort()); assertEquals(9000, addr.getPort());
@ -128,24 +195,17 @@ public class TestDFSUtil {
assertEquals(9001, addr.getPort()); assertEquals(9001, addr.getPort());
// Test - can look up nameservice ID from service address // Test - can look up nameservice ID from service address
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS); checkNameServiceId(conf, NN1_ADDRESS, "nn1");
String nameserviceId = DFSUtil.getNameServiceIdFromAddress( checkNameServiceId(conf, NN2_ADDRESS, "nn2");
conf, testAddress1, checkNameServiceId(conf, NN3_ADDRESS, null);
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, }
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1", nameserviceId); public void checkNameServiceId(Configuration conf, String addr,
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS); String expectedNameServiceId) {
nameserviceId = DFSUtil.getNameServiceIdFromAddress( InetSocketAddress s = NetUtils.createSocketAddr(addr);
conf, testAddress2, String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(expectedNameServiceId, nameserviceId);
assertEquals("nn2", nameserviceId);
InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
conf, testAddress3,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertNull(nameserviceId);
} }
/** /**
@ -157,17 +217,15 @@ public class TestDFSUtil {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
final String DEFAULT_ADDRESS = "localhost:9000"; final String DEFAULT_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001"; final String NN2_ADDRESS = "localhost:9001";
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS); InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1, boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertTrue(isDefault); assertTrue(isDefault);
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS); InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2, isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertFalse(isDefault); assertFalse(isDefault);
} }
@ -176,8 +234,8 @@ public class TestDFSUtil {
public void testDefaultNamenode() throws IOException { public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
final String hdfs_default = "hdfs://localhost:9999/"; final String hdfs_default = "hdfs://localhost:9999/";
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default); conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
// If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that // If DFS_FEDERATION_NAMESERVICES is not set, verify that
// default namenode address is returned. // default namenode address is returned.
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf); List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1, addrList.size()); assertEquals(1, addrList.size());
@ -191,9 +249,9 @@ public class TestDFSUtil {
@Test @Test
public void testConfModification() throws IOException { public void testConfModification() throws IOException {
final HdfsConfiguration conf = new HdfsConfiguration(); final HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1"); conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1"); conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
final String nameserviceId = DFSUtil.getNameServiceId(conf); final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
// Set the nameservice specific keys with nameserviceId in the config key // Set the nameservice specific keys with nameserviceId in the config key
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) { for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
@ -202,7 +260,7 @@ public class TestDFSUtil {
} }
// Initialize generic keys from specific keys // Initialize generic keys from specific keys
NameNode.initializeGenericKeys(conf); NameNode.initializeGenericKeys(conf, nameserviceId);
// Retrieve the keys without nameserviceId and Ensure generic keys are set // Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value // to the correct value
@ -240,14 +298,14 @@ public class TestDFSUtil {
} }
@Test @Test
public void testGetServerInfo(){ public void testGetServerInfo() {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf); UserGroupInformation.setConfiguration(conf);
String httpsport = DFSUtil.getInfoServer(null, conf, true); String httpsport = DFSUtil.getInfoServer(null, conf, true);
Assert.assertEquals("0.0.0.0:50470", httpsport); assertEquals("0.0.0.0:50470", httpsport);
String httpport = DFSUtil.getInfoServer(null, conf, false); String httpport = DFSUtil.getInfoServer(null, conf, false);
Assert.assertEquals("0.0.0.0:50070", httpport); assertEquals("0.0.0.0:50070", httpport);
} }
} }

View File

@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockScanner {
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId(); String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i)); String nsId = DFSUtil.getNamenodeNameServiceId(cluster
.getConfiguration(i));
namenodesBuilder.append(nsId); namenodesBuilder.append(nsId);
namenodesBuilder.append(","); namenodesBuilder.append(",");
} }
@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockScanner {
LOG.info(ex.getMessage()); LOG.info(ex.getMessage());
} }
namenodesBuilder.append(DFSUtil.getNameServiceId(cluster namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
.getConfiguration(2))); .getConfiguration(2)));
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
.toString()); .toString());