HDFS-3003. Merge r1293338 from trunk to 0.23

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1298242 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-03-08 00:46:17 +00:00
parent f7f68d2106
commit 8893d46529
14 changed files with 34 additions and 31 deletions

View File

@ -116,6 +116,9 @@ Release 0.23.3 - UNRELEASED
HDFS-2878. Fix TestBlockRecovery and move it back into main test directory.
(todd)
HDFS-3003. Remove getHostPortString() from NameNode, replace it with
NetUtils.getHostPortString(). (Brandon Li via atm)
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong)

View File

@ -434,7 +434,7 @@ public class DFSUtil {
// Use default address as fall back
String defaultAddress;
try {
defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf));
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
} catch (IllegalArgumentException e) {
defaultAddress = null;
}

View File

@ -106,13 +106,13 @@ public class BackupNode extends NameNode {
@Override // NameNode
protected void setRpcServerAddress(Configuration conf,
InetSocketAddress addr) {
conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr));
conf.set(BN_ADDRESS_NAME_KEY, NetUtils.getHostPortString(addr));
}
@Override // Namenode
protected void setRpcServiceServerAddress(Configuration conf,
InetSocketAddress addr) {
conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr));
conf.set(BN_SERVICE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(addr));
}
@Override // NameNode
@ -124,7 +124,7 @@ public class BackupNode extends NameNode {
@Override // NameNode
protected void setHttpServerAddress(Configuration conf){
conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(getHttpAddress()));
conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
}
@Override // NameNode
@ -297,8 +297,8 @@ public class BackupNode extends NameNode {
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
UserGroupInformation.getCurrentUser());
this.nnRpcAddress = getHostPortString(nnAddress);
this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
// get version and id info from the name-node
NamespaceInfo nsInfo = null;
while(!isStopRequested()) {

View File

@ -71,7 +71,7 @@ public class FileChecksumServlets {
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
}
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
@ -72,7 +73,7 @@ public class FileDataServlet extends DfsServlet {
// Add namenode address to the url params
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
getServletContext());
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,

View File

@ -236,13 +236,6 @@ public class NameNode {
+ namenode.getHostName()+portString);
}
/**
* Compose a "host:port" string from the address.
*/
public static String getHostPortString(InetSocketAddress addr) {
return addr.getHostName() + ":" + addr.getPort();
}
//
// Common NameNode methods implementation for the active name-node role.
//
@ -272,7 +265,7 @@ public class NameNode {
*/
protected void setRpcServiceServerAddress(Configuration conf,
InetSocketAddress serviceRPCAddress) {
setServiceAddress(conf, getHostPortString(serviceRPCAddress));
setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress));
}
protected void setRpcServerAddress(Configuration conf,
@ -292,7 +285,7 @@ public class NameNode {
protected void setHttpServerAddress(Configuration conf) {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
getHostPortString(getHttpAddress()));
NetUtils.getHostPortString(getHttpAddress()));
}
protected void loadNamesystem(Configuration conf) throws IOException {
@ -305,8 +298,8 @@ public class NameNode {
NamenodeRegistration setRegistration() {
nodeRegistration = new NamenodeRegistration(
getHostPortString(rpcServer.getRpcAddress()),
getHostPortString(getHttpAddress()),
NetUtils.getHostPortString(rpcServer.getRpcAddress()),
NetUtils.getHostPortString(getHttpAddress()),
getFSImage().getStorage(), getRole());
return nodeRegistration;
}

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -401,7 +402,7 @@ class NamenodeJspHelper {
nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort();
}
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = "http://" + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort="

View File

@ -620,10 +620,10 @@ public class MiniDFSCluster {
NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
format, operation, clusterId);
conf.set(DFSUtil.getNameServiceIdKey(
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NetUtils
.getHostPortString(nn.getNameNodeAddress()));
conf.set(DFSUtil.getNameServiceIdKey(
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NetUtils
.getHostPortString(nn.getHttpAddress()));
DFSUtil.setGenericConf(conf, nameserviceId,
DFS_NAMENODE_HTTP_ADDRESS_KEY);
@ -643,7 +643,7 @@ public class MiniDFSCluster {
*/
public URI getURI(int nnIndex) {
InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
String hostPort = NameNode.getHostPortString(addr);
String hostPort = NetUtils.getHostPortString(addr);
URI uri = null;
try {
uri = new URI("hdfs://" + hostPort);

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ServletUtil;
import org.junit.Test;
import org.mockito.Mockito;
@ -134,7 +135,7 @@ public class TestDatanodeJsp {
Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView");
Mockito.doReturn("1").when(reqMock).getParameter("startOffset");
Mockito.doReturn("1024").when(reqMock).getParameter("blockSize");
Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
.when(reqMock).getParameter("nnaddr");
Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
}

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
@ -330,7 +331,7 @@ public class TestBackupNode {
InetSocketAddress add = backup.getNameNodeAddress();
// Write to BN
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
+ NameNode.getHostPortString(add)).toUri(), conf);
+ NetUtils.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
TestCheckpoint.writeFile(bnFS, file3, replication);

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.util.StringUtils;
@ -1432,7 +1433,7 @@ public class TestCheckpoint extends TestCase {
.format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
String fsName = NameNode.getHostPortString(
String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
// Make a finalized log on the server side.

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
import org.mockito.Mockito;
import org.mortbay.jetty.InclusiveByteRange;
@ -263,7 +264,7 @@ public class TestStreamFile {
Mockito.doReturn(CONF).when(mockServletContext).getAttribute(
JspHelper.CURRENT_CONF);
Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
.when(mockHttpServletRequest).getParameter("nnaddr");
Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest)
.getPathInfo();

View File

@ -27,6 +27,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.mockito.Mockito;
@ -54,7 +55,7 @@ public class TestTransferFsImage {
new File("/xxxxx-does-not-exist/blah"));
try {
String fsName = NameNode.getHostPortString(
String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
String id = "getimage=1&txid=0";
@ -86,7 +87,7 @@ public class TestTransferFsImage {
);
try {
String fsName = NameNode.getHostPortString(
String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
String id = "getimage=1&txid=0";

View File

@ -33,10 +33,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.GetConf;
import org.apache.hadoop.hdfs.tools.GetConf.Command;
import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
@ -86,7 +86,7 @@ public class TestGetConf {
private String[] toStringArray(List<InetSocketAddress> list) {
String[] ret = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
ret[i] = NameNode.getHostPortString(list.get(i));
ret[i] = NetUtils.getHostPortString(list.get(i));
}
return ret;
}