HDFS-5312. Generate HTTP/HTTPS URL in DFSUtil#getInfoServer() based on the configured http policy. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1548629 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8897b0cc9b
commit
045dc880e1
|
@ -233,6 +233,9 @@ Trunk (Unreleased)
|
|||
|
||||
HDFS-5630. Hook up cache directive and pool usage statistics. (wang)
|
||||
|
||||
HDFS-5312. Generate HTTP / HTTPS URL in DFSUtil#getInfoServer() based on the
|
||||
configured http policy. (Haohui Mai via jing9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -958,39 +959,71 @@ public class DFSUtil {
|
|||
* given namenode rpc address.
|
||||
* @param conf
|
||||
* @param namenodeAddr - namenode RPC address
|
||||
* @param httpsAddress -If true, and if security is enabled, returns server
|
||||
* https address. If false, returns server http address.
|
||||
* @param scheme - the scheme (http / https)
|
||||
* @return server http or https address
|
||||
* @throws IOException
|
||||
*/
|
||||
public static String getInfoServer(InetSocketAddress namenodeAddr,
|
||||
Configuration conf, boolean httpsAddress) throws IOException {
|
||||
boolean securityOn = UserGroupInformation.isSecurityEnabled();
|
||||
String httpAddressKey = (securityOn && httpsAddress) ?
|
||||
DFS_NAMENODE_HTTPS_ADDRESS_KEY : DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||
String httpAddressDefault = (securityOn && httpsAddress) ?
|
||||
DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT : DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
|
||||
|
||||
String suffixes[];
|
||||
public static URI getInfoServer(InetSocketAddress namenodeAddr,
|
||||
Configuration conf, String scheme) throws IOException {
|
||||
String[] suffixes = null;
|
||||
if (namenodeAddr != null) {
|
||||
// if non-default namenode, try reverse look up
|
||||
// the nameServiceID if it is available
|
||||
suffixes = getSuffixIDs(conf, namenodeAddr,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
} else {
|
||||
suffixes = new String[2];
|
||||
}
|
||||
String configuredInfoAddr = getSuffixedConf(conf, httpAddressKey,
|
||||
httpAddressDefault, suffixes);
|
||||
|
||||
String authority;
|
||||
if ("http".equals(scheme)) {
|
||||
authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
|
||||
} else if ("https".equals(scheme)) {
|
||||
authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||
DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Invalid scheme:" + scheme);
|
||||
}
|
||||
|
||||
if (namenodeAddr != null) {
|
||||
return substituteForWildcardAddress(configuredInfoAddr,
|
||||
authority = substituteForWildcardAddress(authority,
|
||||
namenodeAddr.getHostName());
|
||||
} else {
|
||||
return configuredInfoAddr;
|
||||
}
|
||||
return URI.create(scheme + "://" + authority);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
|
||||
* with defaultHost when it found out that the address is a wildcard / local
|
||||
* address.
|
||||
*
|
||||
* @param defaultHost
|
||||
* The default host name of the namenode.
|
||||
* @param conf
|
||||
* The configuration
|
||||
* @param scheme
|
||||
* HTTP or HTTPS
|
||||
* @throws IOException
|
||||
*/
|
||||
public static URI getInfoServerWithDefaultHost(String defaultHost,
|
||||
Configuration conf, final String scheme) throws IOException {
|
||||
URI configuredAddr = getInfoServer(null, conf, scheme);
|
||||
String authority = substituteForWildcardAddress(
|
||||
configuredAddr.getAuthority(), defaultHost);
|
||||
return URI.create(scheme + "://" + authority);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether HTTP or HTTPS should be used to connect to the remote
|
||||
* server. Currently the client only connects to the server via HTTPS if the
|
||||
* policy is set to HTTPS_ONLY.
|
||||
*
|
||||
* @return the scheme (HTTP / HTTPS)
|
||||
*/
|
||||
public static String getHttpClientScheme(Configuration conf) {
|
||||
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
|
||||
return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Substitute a default host in the case that an address has been configured
|
||||
|
@ -1004,8 +1037,9 @@ public class DFSUtil {
|
|||
* @return the substituted address
|
||||
* @throws IOException if it is a wildcard address and security is enabled
|
||||
*/
|
||||
public static String substituteForWildcardAddress(String configuredAddress,
|
||||
String defaultHost) throws IOException {
|
||||
@VisibleForTesting
|
||||
static String substituteForWildcardAddress(String configuredAddress,
|
||||
String defaultHost) throws IOException {
|
||||
InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
|
||||
InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
|
||||
+ ":0");
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URL;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -79,7 +80,7 @@ public class BackupNode extends NameNode {
|
|||
/** Name-node RPC address */
|
||||
String nnRpcAddress;
|
||||
/** Name-node HTTP address */
|
||||
String nnHttpAddress;
|
||||
URL nnHttpAddress;
|
||||
/** Checkpoint manager */
|
||||
Checkpointer checkpointManager;
|
||||
|
||||
|
@ -313,7 +314,8 @@ public class BackupNode extends NameNode {
|
|||
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
|
||||
true).getProxy();
|
||||
this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
|
||||
this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
|
||||
this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
|
||||
DFSUtil.getHttpClientScheme(conf)).toURL();
|
||||
// get version and id info from the name-node
|
||||
NamespaceInfo nsInfo = null;
|
||||
while(!isStopRequested()) {
|
||||
|
|
|
@ -24,11 +24,14 @@ import static org.apache.hadoop.util.Time.now;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||
|
@ -61,6 +64,7 @@ class Checkpointer extends Daemon {
|
|||
private String infoBindAddress;
|
||||
|
||||
private CheckpointConf checkpointConf;
|
||||
private final Configuration conf;
|
||||
|
||||
private BackupImage getFSImage() {
|
||||
return (BackupImage)backupNode.getFSImage();
|
||||
|
@ -74,6 +78,7 @@ class Checkpointer extends Daemon {
|
|||
* Create a connection to the primary namenode.
|
||||
*/
|
||||
Checkpointer(Configuration conf, BackupNode bnNode) throws IOException {
|
||||
this.conf = conf;
|
||||
this.backupNode = bnNode;
|
||||
try {
|
||||
initialize(conf);
|
||||
|
@ -274,10 +279,15 @@ class Checkpointer extends Daemon {
|
|||
+ " New Image Size: " + imageSize);
|
||||
}
|
||||
|
||||
private InetSocketAddress getImageListenAddress() {
|
||||
private URL getImageListenAddress() {
|
||||
InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
|
||||
int httpPort = httpSocAddr.getPort();
|
||||
return new InetSocketAddress(infoBindAddress, httpPort);
|
||||
try {
|
||||
return new URL(DFSUtil.getHttpClientScheme(conf) + "://" + infoBindAddress + ":" + httpPort);
|
||||
} catch (MalformedURLException e) {
|
||||
// Unreachable
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
static void rollForwardByApplyingLogs(
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.BufferedReader;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.ArrayList;
|
||||
|
@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.codehaus.jackson.JsonNode;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
@ -272,12 +272,13 @@ class ClusterJspHelper {
|
|||
static class NamenodeMXBeanHelper {
|
||||
private static final ObjectMapper mapper = new ObjectMapper();
|
||||
private final String host;
|
||||
private final String httpAddress;
|
||||
private final URI httpAddress;
|
||||
|
||||
NamenodeMXBeanHelper(InetSocketAddress addr, Configuration conf)
|
||||
throws IOException, MalformedObjectNameException {
|
||||
this.host = addr.getHostName();
|
||||
this.httpAddress = DFSUtil.getInfoServer(addr, conf, false);
|
||||
this.httpAddress = DFSUtil.getInfoServer(addr, conf,
|
||||
DFSUtil.getHttpClientScheme(conf));
|
||||
}
|
||||
|
||||
/** Get the map corresponding to the JSON string */
|
||||
|
@ -356,7 +357,7 @@ class ClusterJspHelper {
|
|||
nn.blocksCount = getProperty(props, "TotalBlocks").getLongValue();
|
||||
nn.missingBlocksCount = getProperty(props, "NumberOfMissingBlocks")
|
||||
.getLongValue();
|
||||
nn.httpAddress = httpAddress;
|
||||
nn.httpAddress = httpAddress.toURL();
|
||||
getLiveNodeCount(getProperty(props, "LiveNodes").getValueAsText(), nn);
|
||||
getDeadNodeCount(getProperty(props, "DeadNodes").getValueAsText(), nn);
|
||||
nn.softwareVersion = getProperty(props, "SoftwareVersion").getTextValue();
|
||||
|
@ -591,12 +592,14 @@ class ClusterJspHelper {
|
|||
toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
|
||||
toXmlItemBlock(doc, "Missing Blocks",
|
||||
Long.toString(nn.missingBlocksCount));
|
||||
toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" +
|
||||
nn.liveDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=LIVE",
|
||||
"Live Datanode (Decommissioned)");
|
||||
toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" +
|
||||
nn.deadDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=DEAD"
|
||||
, "Dead Datanode (Decommissioned)");
|
||||
toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
|
||||
+ nn.liveDecomCount + ")", new URL(nn.httpAddress,
|
||||
"/dfsnodelist.jsp?whatNodes=LIVE"),
|
||||
"Live Datanode (Decommissioned)");
|
||||
toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
|
||||
+ nn.deadDecomCount + ")", new URL(nn.httpAddress,
|
||||
"/dfsnodelist.jsp?whatNodes=DEAD"),
|
||||
"Dead Datanode (Decommissioned)");
|
||||
toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
|
||||
doc.endTag(); // node
|
||||
}
|
||||
|
@ -625,7 +628,7 @@ class ClusterJspHelper {
|
|||
int liveDecomCount = 0;
|
||||
int deadDatanodeCount = 0;
|
||||
int deadDecomCount = 0;
|
||||
String httpAddress = null;
|
||||
URL httpAddress = null;
|
||||
String softwareVersion = "";
|
||||
}
|
||||
|
||||
|
@ -763,7 +766,8 @@ class ClusterJspHelper {
|
|||
.equals(DecommissionStates.UNKNOWN.toString()))) {
|
||||
doc.startTag("node");
|
||||
// dn
|
||||
toXmlItemBlockWithLink(doc, dnhost, (dnhost+":"+httpPort),"DataNode");
|
||||
toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort,
|
||||
""), "DataNode");
|
||||
|
||||
// overall status first
|
||||
toXmlItemBlock(doc, OVERALL_STATUS, overallStatus);
|
||||
|
@ -823,11 +827,11 @@ class ClusterJspHelper {
|
|||
* link="http://hostname:50070" />
|
||||
*/
|
||||
private static void toXmlItemBlockWithLink(XMLOutputter doc, String value,
|
||||
String url, String label) throws IOException {
|
||||
URL url, String label) throws IOException {
|
||||
doc.startTag("item");
|
||||
doc.attribute("label", label);
|
||||
doc.attribute("value", value);
|
||||
doc.attribute("link", "///" + url);
|
||||
doc.attribute("link", url.toString());
|
||||
doc.endTag(); // item
|
||||
}
|
||||
|
||||
|
@ -885,7 +889,7 @@ class ClusterJspHelper {
|
|||
return out.toString();
|
||||
}
|
||||
|
||||
private static String queryMbean(String httpAddress, Configuration conf)
|
||||
private static String queryMbean(URI httpAddress, Configuration conf)
|
||||
throws IOException {
|
||||
/**
|
||||
* Although the other namenode might support HTTPS, it is fundamentally
|
||||
|
@ -896,7 +900,7 @@ class ClusterJspHelper {
|
|||
*
|
||||
* As a result, we just hard code the connection as an HTTP connection.
|
||||
*/
|
||||
URL url = new URL("http://" + httpAddress + JMX_QRY);
|
||||
URL url = new URL(httpAddress.toURL(), JMX_QRY);
|
||||
return readOutput(url);
|
||||
}
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.security.PrivilegedExceptionAction;
|
|||
import java.util.*;
|
||||
import java.io.*;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.ServletException;
|
||||
|
@ -31,10 +32,8 @@ import javax.servlet.http.HttpServletResponse;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
@ -87,8 +86,8 @@ public class GetImageServlet extends HttpServlet {
|
|||
ServletContext context = getServletContext();
|
||||
final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
|
||||
final GetImageParams parsedParams = new GetImageParams(request, response);
|
||||
final Configuration conf =
|
||||
(Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
|
||||
final Configuration conf = (Configuration) context
|
||||
.getAttribute(JspHelper.CURRENT_CONF);
|
||||
|
||||
if (UserGroupInformation.isSecurityEnabled() &&
|
||||
!isValidRequestor(context, request.getUserPrincipal().getName(), conf)) {
|
||||
|
@ -163,7 +162,7 @@ public class GetImageServlet extends HttpServlet {
|
|||
// issue a HTTP get request to download the new fsimage
|
||||
MD5Hash downloadImageDigest =
|
||||
TransferFsImage.downloadImageToStorage(
|
||||
parsedParams.getInfoServer(), txid,
|
||||
parsedParams.getInfoServer(conf), txid,
|
||||
nnImage.getStorage(), true);
|
||||
nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
|
||||
|
||||
|
@ -309,7 +308,9 @@ public class GetImageServlet extends HttpServlet {
|
|||
}
|
||||
|
||||
static String getParamStringToPutImage(long txid,
|
||||
InetSocketAddress imageListenAddress, Storage storage) {
|
||||
URL url, Storage storage) {
|
||||
InetSocketAddress imageListenAddress = NetUtils.createSocketAddr(url
|
||||
.getAuthority());
|
||||
String machine = !imageListenAddress.isUnresolved()
|
||||
&& imageListenAddress.getAddress().isAnyLocalAddress() ? null
|
||||
: imageListenAddress.getHostName();
|
||||
|
@ -419,11 +420,11 @@ public class GetImageServlet extends HttpServlet {
|
|||
return isPutImage;
|
||||
}
|
||||
|
||||
String getInfoServer() throws IOException{
|
||||
URL getInfoServer(Configuration conf) throws IOException {
|
||||
if (machineName == null || remoteport == 0) {
|
||||
throw new IOException ("MachineName and port undefined");
|
||||
throw new IOException("MachineName and port undefined");
|
||||
}
|
||||
return machineName + ":" + remoteport;
|
||||
return new URL(DFSUtil.getHttpClientScheme(conf), machineName, remoteport, "");
|
||||
}
|
||||
|
||||
boolean shouldFetchLatest() {
|
||||
|
|
|
@ -29,7 +29,9 @@ import java.io.File;
|
|||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
|
@ -111,7 +113,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
private final long starttime = Time.now();
|
||||
private volatile long lastCheckpointTime = 0;
|
||||
|
||||
private String fsName;
|
||||
private URL fsName;
|
||||
private CheckpointStorage checkpointImage;
|
||||
|
||||
private NamenodeProtocol namenode;
|
||||
|
@ -404,7 +406,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
* @throws IOException
|
||||
*/
|
||||
static boolean downloadCheckpointFiles(
|
||||
final String nnHostPort,
|
||||
final URL nnHostPort,
|
||||
final FSImage dstImage,
|
||||
final CheckpointSignature sig,
|
||||
final RemoteEditLogManifest manifest
|
||||
|
@ -467,25 +469,33 @@ public class SecondaryNameNode implements Runnable {
|
|||
/**
|
||||
* Returns the Jetty server that the Namenode is listening on.
|
||||
*/
|
||||
private String getInfoServer() throws IOException {
|
||||
private URL getInfoServer() throws IOException {
|
||||
URI fsName = FileSystem.getDefaultUri(conf);
|
||||
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
|
||||
throw new IOException("This is not a DFS");
|
||||
}
|
||||
|
||||
String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
|
||||
String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
|
||||
fsName.getHost());
|
||||
LOG.debug("Will connect to NameNode at HTTP address: " + address);
|
||||
return address;
|
||||
final String scheme = DFSUtil.getHttpClientScheme(conf);
|
||||
URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
|
||||
scheme);
|
||||
LOG.debug("Will connect to NameNode at " + address);
|
||||
return address.toURL();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the host:port of where this SecondaryNameNode is listening
|
||||
* for image transfers
|
||||
*/
|
||||
private InetSocketAddress getImageListenAddress() {
|
||||
return new InetSocketAddress(infoBindAddress, infoPort);
|
||||
private URL getImageListenAddress() {
|
||||
StringBuilder sb = new StringBuilder()
|
||||
.append(DFSUtil.getHttpClientScheme(conf)).append("://")
|
||||
.append(infoBindAddress).append(":").append(infoPort);
|
||||
try {
|
||||
return new URL(sb.toString());
|
||||
} catch (MalformedURLException e) {
|
||||
// Unreachable
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,13 +17,18 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
import java.security.DigestInputStream;
|
||||
import java.security.MessageDigest;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.lang.Math;
|
||||
|
||||
import javax.servlet.ServletOutputStream;
|
||||
import javax.servlet.ServletResponse;
|
||||
|
@ -41,14 +46,16 @@ import org.apache.hadoop.util.Time;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -76,15 +83,15 @@ public class TransferFsImage {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
|
||||
|
||||
public static void downloadMostRecentImageToDirectory(String fsName,
|
||||
public static void downloadMostRecentImageToDirectory(URL infoServer,
|
||||
File dir) throws IOException {
|
||||
String fileId = GetImageServlet.getParamStringForMostRecentImage();
|
||||
getFileClient(fsName, fileId, Lists.newArrayList(dir),
|
||||
getFileClient(infoServer, fileId, Lists.newArrayList(dir),
|
||||
null, false);
|
||||
}
|
||||
|
||||
public static MD5Hash downloadImageToStorage(
|
||||
String fsName, long imageTxId, Storage dstStorage, boolean needDigest)
|
||||
URL fsName, long imageTxId, Storage dstStorage, boolean needDigest)
|
||||
throws IOException {
|
||||
String fileid = GetImageServlet.getParamStringForImage(
|
||||
imageTxId, dstStorage);
|
||||
|
@ -102,7 +109,7 @@ public class TransferFsImage {
|
|||
return hash;
|
||||
}
|
||||
|
||||
static void downloadEditsToStorage(String fsName, RemoteEditLog log,
|
||||
static void downloadEditsToStorage(URL fsName, RemoteEditLog log,
|
||||
NNStorage dstStorage) throws IOException {
|
||||
assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
|
||||
"bad log: " + log;
|
||||
|
@ -156,17 +163,17 @@ public class TransferFsImage {
|
|||
* Requests that the NameNode download an image from this node.
|
||||
*
|
||||
* @param fsName the http address for the remote NN
|
||||
* @param imageListenAddress the host/port where the local node is running an
|
||||
* @param myNNAddress the host/port where the local node is running an
|
||||
* HTTPServer hosting GetImageServlet
|
||||
* @param storage the storage directory to transfer the image from
|
||||
* @param txid the transaction ID of the image to be uploaded
|
||||
*/
|
||||
public static void uploadImageFromStorage(String fsName,
|
||||
InetSocketAddress imageListenAddress,
|
||||
public static void uploadImageFromStorage(URL fsName,
|
||||
URL myNNAddress,
|
||||
Storage storage, long txid) throws IOException {
|
||||
|
||||
String fileid = GetImageServlet.getParamStringToPutImage(
|
||||
txid, imageListenAddress, storage);
|
||||
txid, myNNAddress, storage);
|
||||
// this doesn't directly upload an image, but rather asks the NN
|
||||
// to connect back to the 2NN to download the specified image.
|
||||
try {
|
||||
|
@ -244,17 +251,11 @@ public class TransferFsImage {
|
|||
* this storage object will be notified.
|
||||
* @Return a digest of the received file if getChecksum is true
|
||||
*/
|
||||
static MD5Hash getFileClient(String nnHostPort,
|
||||
static MD5Hash getFileClient(URL infoServer,
|
||||
String queryString, List<File> localPaths,
|
||||
Storage dstStorage, boolean getChecksum) throws IOException {
|
||||
|
||||
String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" +
|
||||
queryString;
|
||||
LOG.info("Opening connection to " + str);
|
||||
//
|
||||
// open connection to remote server
|
||||
//
|
||||
URL url = new URL(str);
|
||||
URL url = new URL(infoServer, "/getimage?" + queryString);
|
||||
LOG.info("Opening connection to " + url);
|
||||
return doGetUrl(url, localPaths, dstStorage, getChecksum);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -69,7 +70,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
private String nnId;
|
||||
private String otherNNId;
|
||||
|
||||
private String otherHttpAddr;
|
||||
private URL otherHttpAddr;
|
||||
private InetSocketAddress otherIpcAddr;
|
||||
private Collection<URI> dirsToFormat;
|
||||
private List<URI> editUrisToFormat;
|
||||
|
@ -179,6 +180,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
// Check with the user before blowing away data.
|
||||
if (!Storage.confirmFormat(storage.dirIterable(null),
|
||||
force, interactive)) {
|
||||
storage.close();
|
||||
return ERR_CODE_ALREADY_FORMATTED;
|
||||
}
|
||||
|
||||
|
@ -203,7 +205,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
|
||||
// Download that checkpoint into our storage directories.
|
||||
MD5Hash hash = TransferFsImage.downloadImageToStorage(
|
||||
otherHttpAddr.toString(), imageTxId,
|
||||
otherHttpAddr, imageTxId,
|
||||
storage, true);
|
||||
image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
|
||||
return 0;
|
||||
|
@ -276,11 +278,10 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
"Could not determine valid IPC address for other NameNode (%s)" +
|
||||
", got: %s", otherNNId, otherIpcAddr);
|
||||
|
||||
otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
|
||||
otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
|
||||
otherIpcAddr.getHostName());
|
||||
|
||||
|
||||
final String scheme = DFSUtil.getHttpClientScheme(conf);
|
||||
otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
|
||||
otherIpcAddr.getHostName(), otherNode, scheme).toURL();
|
||||
|
||||
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
|
||||
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
|
||||
conf, false);
|
||||
|
|
|
@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
|
|||
import static org.apache.hadoop.util.Time.now;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -43,7 +44,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|||
import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
|
||||
import org.apache.hadoop.hdfs.util.Canceler;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
|
@ -66,8 +66,8 @@ public class StandbyCheckpointer {
|
|||
private long lastCheckpointTime;
|
||||
private final CheckpointerThread thread;
|
||||
private final ThreadFactory uploadThreadFactory;
|
||||
private String activeNNAddress;
|
||||
private InetSocketAddress myNNAddress;
|
||||
private URL activeNNAddress;
|
||||
private URL myNNAddress;
|
||||
|
||||
private Object cancelLock = new Object();
|
||||
private Canceler canceler;
|
||||
|
@ -94,7 +94,7 @@ public class StandbyCheckpointer {
|
|||
*/
|
||||
private void setNameNodeAddresses(Configuration conf) throws IOException {
|
||||
// Look up our own address.
|
||||
String myAddrString = getHttpAddress(conf);
|
||||
myNNAddress = getHttpAddress(conf);
|
||||
|
||||
// Look up the active node's address
|
||||
Configuration confForActive = HAUtil.getConfForOtherNode(conf);
|
||||
|
@ -103,32 +103,22 @@ public class StandbyCheckpointer {
|
|||
// Sanity-check.
|
||||
Preconditions.checkArgument(checkAddress(activeNNAddress),
|
||||
"Bad address for active NN: %s", activeNNAddress);
|
||||
Preconditions.checkArgument(checkAddress(myAddrString),
|
||||
"Bad address for standby NN: %s", myAddrString);
|
||||
myNNAddress = NetUtils.createSocketAddr(myAddrString);
|
||||
Preconditions.checkArgument(checkAddress(myNNAddress),
|
||||
"Bad address for standby NN: %s", myNNAddress);
|
||||
}
|
||||
|
||||
private String getHttpAddress(Configuration conf) throws IOException {
|
||||
String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
|
||||
|
||||
// Use the hostname from the RPC address as a default, in case
|
||||
// the HTTP address is configured to 0.0.0.0.
|
||||
String hostnameFromRpc = NameNode.getServiceAddress(
|
||||
conf, true).getHostName();
|
||||
try {
|
||||
return DFSUtil.substituteForWildcardAddress(
|
||||
configuredAddr, hostnameFromRpc);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
private URL getHttpAddress(Configuration conf) throws IOException {
|
||||
final String scheme = DFSUtil.getHttpClientScheme(conf);
|
||||
String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
|
||||
URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
|
||||
return addr.toURL();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the given address is valid and has a port
|
||||
* specified.
|
||||
*/
|
||||
private boolean checkAddress(String addrStr) {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(addrStr);
|
||||
private static boolean checkAddress(URL addr) {
|
||||
return addr.getPort() != 0;
|
||||
}
|
||||
|
||||
|
@ -344,7 +334,7 @@ public class StandbyCheckpointer {
|
|||
}
|
||||
|
||||
@VisibleForTesting
|
||||
String getActiveNNAddress() {
|
||||
URL getActiveNNAddress() {
|
||||
return activeNNAddress;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -47,9 +48,9 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -547,8 +548,10 @@ public class DFSAdmin extends FsShell {
|
|||
* @throws IOException
|
||||
*/
|
||||
public int fetchImage(final String[] argv, final int idx) throws IOException {
|
||||
final String infoServer = DFSUtil.getInfoServer(
|
||||
HAUtil.getAddressOfActive(getDFS()), getConf(), false);
|
||||
Configuration conf = getConf();
|
||||
final URL infoServer = DFSUtil.getInfoServer(
|
||||
HAUtil.getAddressOfActive(getDFS()), conf,
|
||||
DFSUtil.getHttpClientScheme(conf)).toURL();
|
||||
SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintStream;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.net.URLEncoder;
|
||||
|
@ -37,7 +38,6 @@ import org.apache.hadoop.hdfs.HAUtil;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -227,7 +227,7 @@ public class DFSck extends Configured implements Tool {
|
|||
* @return Returns http address or null if failure.
|
||||
* @throws IOException if we can't determine the active NN address
|
||||
*/
|
||||
private String getCurrentNamenodeAddress() throws IOException {
|
||||
private URI getCurrentNamenodeAddress() throws IOException {
|
||||
//String nnAddress = null;
|
||||
Configuration conf = getConf();
|
||||
|
||||
|
@ -245,19 +245,21 @@ public class DFSck extends Configured implements Tool {
|
|||
return null;
|
||||
}
|
||||
|
||||
return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
|
||||
return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
|
||||
DFSUtil.getHttpClientScheme(conf));
|
||||
}
|
||||
|
||||
private int doWork(final String[] args) throws IOException {
|
||||
final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix());
|
||||
final StringBuilder url = new StringBuilder();
|
||||
|
||||
String namenodeAddress = getCurrentNamenodeAddress();
|
||||
URI namenodeAddress = getCurrentNamenodeAddress();
|
||||
if (namenodeAddress == null) {
|
||||
//Error message already output in {@link #getCurrentNamenodeAddress()}
|
||||
System.err.println("DFSck exiting.");
|
||||
return 0;
|
||||
}
|
||||
url.append(namenodeAddress);
|
||||
|
||||
url.append(namenodeAddress.toString());
|
||||
System.err.println("Connecting to namenode via " + url.toString());
|
||||
|
||||
url.append("/fsck?ugi=").append(ugi.getShortUserName());
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
||||
|
@ -431,20 +430,22 @@ public class TestDFSUtil {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testGetInfoServer() throws IOException {
|
||||
public void testGetInfoServer() throws IOException, URISyntaxException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
||||
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
||||
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
|
||||
URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
|
||||
assertEquals(new URI("https", null, "0.0.0.0",
|
||||
DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
|
||||
|
||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
|
||||
|
||||
String httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
|
||||
"localhost", 8020), conf, false);
|
||||
assertEquals("localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT, httpAddress);
|
||||
URI httpport = DFSUtil.getInfoServer(null, conf, "http");
|
||||
assertEquals(new URI("http", null, "0.0.0.0",
|
||||
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
|
||||
|
||||
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
|
||||
"localhost", 8020), conf, "http");
|
||||
assertEquals(
|
||||
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
|
||||
httpAddress);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.io.RandomAccessFile;
|
|||
import java.lang.management.ManagementFactory;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
|
||||
|
@ -218,6 +218,7 @@ public class TestCheckpoint {
|
|||
assertTrue("Removed directory wasn't what was expected",
|
||||
listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
|
||||
toString().indexOf("storageDirToCheck") != -1);
|
||||
nnStorage.close();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1947,8 +1948,9 @@ public class TestCheckpoint {
|
|||
.format(true).build();
|
||||
|
||||
NamenodeProtocols nn = cluster.getNameNodeRpc();
|
||||
String fsName = NetUtils.getHostPortString(
|
||||
cluster.getNameNode().getHttpAddress());
|
||||
URL fsName = DFSUtil.getInfoServer(
|
||||
cluster.getNameNode().getServiceRpcAddress(), conf,
|
||||
DFSUtil.getHttpClientScheme(conf)).toURL();
|
||||
|
||||
// Make a finalized log on the server side.
|
||||
nn.rollEditLog();
|
||||
|
@ -1980,8 +1982,7 @@ public class TestCheckpoint {
|
|||
}
|
||||
|
||||
try {
|
||||
InetSocketAddress fakeAddr = new InetSocketAddress(1);
|
||||
TransferFsImage.uploadImageFromStorage(fsName, fakeAddr, dstImage, 0);
|
||||
TransferFsImage.uploadImageFromStorage(fsName, new URL("http://localhost:1234"), dstImage, 0);
|
||||
fail("Storage info was not verified");
|
||||
} catch (IOException ioe) {
|
||||
String msg = StringUtils.stringifyException(ioe);
|
||||
|
|
|
@ -34,11 +34,11 @@ import javax.servlet.http.HttpServletRequest;
|
|||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.http.HttpServerFunctionalTest;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.Test;
|
||||
|
@ -66,8 +66,9 @@ public class TestTransferFsImage {
|
|||
new File("/xxxxx-does-not-exist/blah"));
|
||||
|
||||
try {
|
||||
String fsName = NetUtils.getHostPortString(
|
||||
cluster.getNameNode().getHttpAddress());
|
||||
URL fsName = DFSUtil.getInfoServer(
|
||||
cluster.getNameNode().getServiceRpcAddress(), conf,
|
||||
DFSUtil.getHttpClientScheme(conf)).toURL();
|
||||
String id = "getimage=1&txid=0";
|
||||
|
||||
TransferFsImage.getFileClient(fsName, id, localPath, mockStorage, false);
|
||||
|
@ -98,8 +99,10 @@ public class TestTransferFsImage {
|
|||
);
|
||||
|
||||
try {
|
||||
String fsName = NetUtils.getHostPortString(
|
||||
cluster.getNameNode().getHttpAddress());
|
||||
URL fsName = DFSUtil.getInfoServer(
|
||||
cluster.getNameNode().getServiceRpcAddress(), conf,
|
||||
DFSUtil.getHttpClientScheme(conf)).toURL();
|
||||
|
||||
String id = "getimage=1&txid=0";
|
||||
|
||||
TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);
|
||||
|
@ -123,7 +126,7 @@ public class TestTransferFsImage {
|
|||
URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
|
||||
TransferFsImage.timeout = 2000;
|
||||
try {
|
||||
TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
|
||||
TransferFsImage.getFileClient(serverURL, "txid=1", null,
|
||||
null, false);
|
||||
fail("TransferImage Should fail with timeout");
|
||||
} catch (SocketTimeoutException e) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -86,7 +87,8 @@ public class TestHAConfiguration {
|
|||
// 0.0.0.0, it should substitute the address from the RPC configuration
|
||||
// above.
|
||||
StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
|
||||
assertEquals("1.2.3.2:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
|
||||
assertEquals(new URL("http", "1.2.3.2",
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
|
||||
checkpointer.getActiveNNAddress());
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue