HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is not configured. Contributed by Kihwal Lee.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1593470 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-05-09 01:46:42 +00:00
parent 63fadf0abd
commit eac832f92d
5 changed files with 95 additions and 15 deletions

View File

@ -438,6 +438,8 @@ Release 2.5.0 - UNRELEASED
HDFS-5381. ExtendedBlock#hashCode should use both blockId and block pool ID HDFS-5381. ExtendedBlock#hashCode should use both blockId and block pool ID
(Benoy Antony via Colin Patrick McCabe) (Benoy Antony via Colin Patrick McCabe)
HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is
not configured. (kihwal)
Release 2.4.1 - UNRELEASED Release 2.4.1 - UNRELEASED

View File

@ -273,10 +273,11 @@ public class NameNode implements NameNodeStatusMXBean {
private JvmPauseMonitor pauseMonitor; private JvmPauseMonitor pauseMonitor;
private ObjectName nameNodeStatusBeanName; private ObjectName nameNodeStatusBeanName;
/** /**
* The service name of the delegation token issued by the namenode. It is * The namenode address that clients will use to access this namenode
* the name service id in HA mode, or the rpc address in non-HA mode. * or the name service. For HA configurations using logical URI, it
* will be the logical address.
*/ */
private String tokenServiceName; private String clientNamenodeAddress;
/** Format a new filesystem. Destroys any filesystem that may already /** Format a new filesystem. Destroys any filesystem that may already
* exist at this location. **/ * exist at this location. **/
@ -319,7 +320,54 @@ public class NameNode implements NameNodeStatusMXBean {
* *
* @return The name service id in HA-mode, or the rpc address in non-HA mode * @return The name service id in HA-mode, or the rpc address in non-HA mode
*/ */
public String getTokenServiceName() { return tokenServiceName; } public String getTokenServiceName() {
return getClientNamenodeAddress();
}
/**
* Set the namenode address that will be used by clients to access this
* namenode or name service. This needs to be called before the config
* is overriden.
*/
public void setClientNamenodeAddress(Configuration conf) {
String nnAddr = conf.get(FS_DEFAULT_NAME_KEY);
if (nnAddr == null) {
// default fs is not set.
clientNamenodeAddress = null;
return;
}
LOG.info(FS_DEFAULT_NAME_KEY + " is " + nnAddr);
URI nnUri = URI.create(nnAddr);
String nnHost = nnUri.getHost();
if (nnHost == null) {
clientNamenodeAddress = null;
return;
}
if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) {
// host name is logical
clientNamenodeAddress = nnHost;
} else if (nnUri.getPort() > 0) {
// physical address with a valid port
clientNamenodeAddress = nnUri.getAuthority();
} else {
// the port is missing or 0. Figure out real bind address later.
clientNamenodeAddress = null;
return;
}
LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+ " this namenode/service.");
}
/**
* Get the namenode address to be used by clients.
* @return nn address
*/
public String getClientNamenodeAddress() {
return clientNamenodeAddress;
}
public static InetSocketAddress getAddress(String address) { public static InetSocketAddress getAddress(String address) {
return NetUtils.createSocketAddr(address, DEFAULT_PORT); return NetUtils.createSocketAddr(address, DEFAULT_PORT);
@ -535,9 +583,14 @@ public class NameNode implements NameNodeStatusMXBean {
loadNamesystem(conf); loadNamesystem(conf);
rpcServer = createRpcServer(conf); rpcServer = createRpcServer(conf);
final String nsId = getNameServiceId(conf); if (clientNamenodeAddress == null) {
tokenServiceName = HAUtil.isHAEnabled(conf, nsId) ? nsId : NetUtils // This is expected for MiniDFSCluster. Set it now using
.getHostPortString(rpcServer.getRpcAddress()); // the RPC server's bind address.
clientNamenodeAddress =
NetUtils.getHostPortString(rpcServer.getRpcAddress());
LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+ " this namenode/service.");
}
if (NamenodeRole.NAMENODE == role) { if (NamenodeRole.NAMENODE == role) {
httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setNameNodeAddress(getNameNodeAddress());
httpServer.setFSImage(getFSImage()); httpServer.setFSImage(getFSImage());
@ -683,6 +736,7 @@ public class NameNode implements NameNodeStatusMXBean {
throws IOException { throws IOException {
this.conf = conf; this.conf = conf;
this.role = role; this.role = role;
setClientNamenodeAddress(conf);
String nsId = getNameServiceId(conf); String nsId = getNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId); String namenodeId = HAUtil.getNameNodeId(conf, nsId);
this.haEnabled = HAUtil.isHAEnabled(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId);

View File

@ -139,8 +139,8 @@ public class DFSTestUtil {
String clusterId = StartupOption.FORMAT.getClusterId(); String clusterId = StartupOption.FORMAT.getClusterId();
if(clusterId == null || clusterId.isEmpty()) if(clusterId == null || clusterId.isEmpty())
StartupOption.FORMAT.setClusterId("testClusterID"); StartupOption.FORMAT.setClusterId("testClusterID");
// Use a copy of conf as it can be altered by namenode during format.
NameNode.format(conf); NameNode.format(new Configuration(conf));
} }
/** /**

View File

@ -760,8 +760,11 @@ public class MiniDFSCluster {
if (!federation && nnTopology.countNameNodes() == 1) { if (!federation && nnTopology.countNameNodes() == 1) {
NNConf onlyNN = nnTopology.getOnlyNameNode(); NNConf onlyNN = nnTopology.getOnlyNameNode();
// we only had one NN, set DEFAULT_NAME for it // we only had one NN, set DEFAULT_NAME for it. If not explicitly
conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + onlyNN.getIpcPort()); // specified initially, the port will be 0 to make NN bind to any
// available port. It will be set to the right address after
// NN is started.
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:" + onlyNN.getIpcPort());
} }
List<String> allNsIds = Lists.newArrayList(); List<String> allNsIds = Lists.newArrayList();
@ -777,6 +780,7 @@ public class MiniDFSCluster {
int nnCounter = 0; int nnCounter = 0;
for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) { for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) {
String nsId = nameservice.getId(); String nsId = nameservice.getId();
String lastDefaultFileSystem = null;
Preconditions.checkArgument( Preconditions.checkArgument(
!federation || nsId != null, !federation || nsId != null,
@ -860,10 +864,19 @@ public class MiniDFSCluster {
for (NNConf nn : nameservice.getNNs()) { for (NNConf nn : nameservice.getNNs()) {
initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
enableManagedDfsDirsRedundancy, nnCounter); enableManagedDfsDirsRedundancy, nnCounter);
createNameNode(nnCounter++, conf, numDataNodes, false, operation, createNameNode(nnCounter, conf, numDataNodes, false, operation,
clusterId, nsId, nn.getNnId()); clusterId, nsId, nn.getNnId());
// Record the last namenode uri
if (nameNodes[nnCounter] != null && nameNodes[nnCounter].conf != null) {
lastDefaultFileSystem =
nameNodes[nnCounter].conf.get(FS_DEFAULT_NAME_KEY);
}
nnCounter++;
}
if (!federation && lastDefaultFileSystem != null) {
// Set the default file system to the actual bind address of NN.
conf.set(FS_DEFAULT_NAME_KEY, lastDefaultFileSystem);
} }
} }
} }
@ -977,7 +990,8 @@ public class MiniDFSCluster {
operation.setClusterId(clusterId); operation.setClusterId(clusterId);
} }
// Start the NameNode // Start the NameNode after saving the default file system.
String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY);
String[] args = createArgs(operation); String[] args = createArgs(operation);
NameNode nn = NameNode.createNameNode(args, conf); NameNode nn = NameNode.createNameNode(args, conf);
if (operation == StartupOption.RECOVER) { if (operation == StartupOption.RECOVER) {
@ -1001,6 +1015,12 @@ public class MiniDFSCluster {
DFS_NAMENODE_HTTP_ADDRESS_KEY); DFS_NAMENODE_HTTP_ADDRESS_KEY);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
operation, new Configuration(conf)); operation, new Configuration(conf));
// Restore the default fs name
if (originalDefaultFs == null) {
conf.set(FS_DEFAULT_NAME_KEY, "");
} else {
conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs);
}
} }
/** /**

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -37,6 +38,7 @@ import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
@ -119,6 +121,8 @@ public class TestWebHDFSForHA {
@Test @Test
public void testFailoverAfterOpen() throws IOException { public void testFailoverAfterOpen() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
"://" + LOGICAL_NAME);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FileSystem fs = null; FileSystem fs = null;
final Path p = new Path("/test"); final Path p = new Path("/test");
@ -152,4 +156,4 @@ public class TestWebHDFSForHA {
} }
} }
} }
} }