HADOOP-9485. No default value in the code for hadoop.rpc.socket.factory.class.default. Contributed by Colin Patrick McCabe.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1481200 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2013-05-10 21:45:32 +00:00
parent b98b776363
commit 945cb2ecaa
6 changed files with 50 additions and 9 deletions

View File

@ -707,6 +707,9 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9549. WebHdfsFileSystem hangs on close(). (daryn via kihwal)
HADOOP-9485. No default value in the code for
hadoop.rpc.socket.factory.class.default. (Colin Patrick McCabe via atm)
Release 2.0.4-alpha - 2013-04-25
INCOMPATIBLE CHANGES

View File

@ -215,6 +215,8 @@ public class CommonConfigurationKeysPublic {
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
"hadoop.rpc.socket.factory.class.default";
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT =
"org.apache.hadoop.net.StandardSocketFactory";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SOCKS_SERVER_KEY = "hadoop.socks.server";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */

View File

@ -50,6 +50,7 @@ import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.SecurityUtil;
@ -112,7 +113,9 @@ public class NetUtils {
*/
public static SocketFactory getDefaultSocketFactory(Configuration conf) {
String propValue = conf.get("hadoop.rpc.socket.factory.class.default");
String propValue = conf.get(
CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT);
if ((propValue == null) || (propValue.length() == 0))
return SocketFactory.getDefault();

View File

@ -41,6 +41,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.net.NetUtils;
@ -378,7 +379,8 @@ public class TestSaslRPC {
current.addToken(token);
Configuration newConf = new Configuration(conf);
newConf.set("hadoop.rpc.socket.factory.class.default", "");
newConf.set(CommonConfigurationKeysPublic.
HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
TestSaslProtocol proxy1 = null;

View File

@ -27,6 +27,7 @@ import javax.net.SocketFactory;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StandardSocketFactory;
import org.junit.Test;
@ -39,13 +40,13 @@ public class TestSocketFactory {
int toBeCached1 = 1;
int toBeCached2 = 2;
Configuration conf = new Configuration();
conf.set("hadoop.rpc.socket.factory.class.default",
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory = NetUtils
.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory, toBeCached1);
conf.set("hadoop.rpc.socket.factory.class.default",
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory = NetUtils
.getDefaultSocketFactory(conf);

View File

@ -67,12 +67,21 @@ public class TestDistributedFileSystem {
private boolean dualPortTesting = false;
private boolean noXmlDefaults = false;
private HdfsConfiguration getTestConfiguration() {
HdfsConfiguration conf = new HdfsConfiguration();
HdfsConfiguration conf;
if (noXmlDefaults) {
conf = new HdfsConfiguration(false);
} else {
conf = new HdfsConfiguration();
}
if (dualPortTesting) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
}
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
return conf;
}
@ -612,11 +621,32 @@ public class TestDistributedFileSystem {
public void testAllWithDualPort() throws Exception {
dualPortTesting = true;
testFileSystemCloseAll();
testDFSClose();
testDFSClient();
testFileChecksum();
try {
testFileSystemCloseAll();
testDFSClose();
testDFSClient();
testFileChecksum();
} finally {
dualPortTesting = false;
}
}
@Test
public void testAllWithNoXmlDefaults() throws Exception {
// Do all the tests with a configuration that ignores the defaults in
// the XML files.
noXmlDefaults = true;
try {
testFileSystemCloseAll();
testDFSClose();
testDFSClient();
testFileChecksum();
} finally {
noXmlDefaults = false;
}
}
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a