HDFS-2410. Further cleanup of hardcoded configuration keys and values. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1204753 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-11-21 23:49:44 +00:00
parent b76b85986e
commit 5293760973
53 changed files with 252 additions and 199 deletions

View File

@ -63,6 +63,9 @@ Trunk (unreleased changes)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh) HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2410. Further cleanup of hardcoded configuration keys and values.
(suresh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)

View File

@ -37,6 +37,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KE
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
@ -48,6 +49,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT;
@ -460,11 +462,11 @@ public class DataNode extends Configured
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort); LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
} }
if (conf.getBoolean("dfs.https.enable", false)) { if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
"dfs.datanode.https.address", infoHost + ":" + 0)); DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
Configuration sslConf = new HdfsConfiguration(false); Configuration sslConf = new HdfsConfiguration(false);
sslConf.addResource(conf.get("dfs.https.server.keystore.resource", sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
"ssl-server.xml")); "ssl-server.xml"));
@ -505,7 +507,7 @@ public class DataNode extends Configured
private void initIpcServer(Configuration conf) throws IOException { private void initIpcServer(Configuration conf) throws IOException {
InetSocketAddress ipcAddr = NetUtils.createSocketAddr( InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
conf.get("dfs.datanode.ipc.address")); conf.get(DFS_DATANODE_IPC_ADDRESS_KEY));
// Add all the RPC protocols that the Datanode implements // Add all the RPC protocols that the Datanode implements
ClientDatanodeProtocolServerSideTranslatorR23 ClientDatanodeProtocolServerSideTranslatorR23

View File

@ -17,6 +17,10 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -145,7 +149,7 @@ public class NameNodeHttpServer {
} }
}; };
boolean certSSL = conf.getBoolean("dfs.https.enable", false); boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
boolean useKrb = UserGroupInformation.isSecurityEnabled(); boolean useKrb = UserGroupInformation.isSecurityEnabled();
if (certSSL || useKrb) { if (certSSL || useKrb) {
boolean needClientAuth = conf.getBoolean( boolean needClientAuth = conf.getBoolean(
@ -156,14 +160,14 @@ public class NameNodeHttpServer {
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
Configuration sslConf = new HdfsConfiguration(false); Configuration sslConf = new HdfsConfiguration(false);
if (certSSL) { if (certSSL) {
sslConf.addResource(conf.get( sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
"dfs.https.server.keystore.resource", "ssl-server.xml")); DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
} }
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
useKrb); useKrb);
// assume same ssl port for all datanodes // assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf
.get("dfs.datanode.https.address", infoHost + ":" + 50475)); .get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
httpServer.setAttribute("datanode.https.port", datanodeSslPort httpServer.setAttribute("datanode.https.port", datanodeSslPort
.getPort()); .getPort());
} }

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
import org.apache.hadoop.fi.FiTestUtil; import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fi.FiTestUtil.Action; import org.apache.hadoop.fi.FiTestUtil.Action;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -56,8 +57,9 @@ public class TestFiDataTransferProtocol {
static private FSDataOutputStream createFile(FileSystem fs, Path p static private FSDataOutputStream createFile(FileSystem fs, Path p
) throws IOException { ) throws IOException {
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), return fs.create(p, true,
REPLICATION, BLOCKSIZE); fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,
4096), REPLICATION, BLOCKSIZE);
} }
{ {

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction; import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
import org.apache.hadoop.fi.FiTestUtil; import org.apache.hadoop.fi.FiTestUtil;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -65,8 +66,8 @@ public class TestFiDataTransferProtocol2 {
static private FSDataOutputStream createFile(FileSystem fs, Path p static private FSDataOutputStream createFile(FileSystem fs, Path p
) throws IOException { ) throws IOException {
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), return fs.create(p, true, fs.getConf()
REPLICATION, BLOCKSIZE); .getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE);
} }
{ {

View File

@ -163,7 +163,7 @@ public class TestStickyBit extends TestCase {
try { try {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem(); FileSystem hdfs = cluster.getFileSystem();

View File

@ -143,8 +143,8 @@ public class AppendTestUtil {
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
return fileSys.create(name, true, return fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096), fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, (long) BLOCK_SIZE); (short) repl, BLOCK_SIZE);
} }
/** /**

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -148,7 +149,7 @@ public class BlockReaderTestUtil {
sock, targetAddr.toString()+ ":" + block.getBlockId(), block, sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
testBlock.getBlockToken(), testBlock.getBlockToken(),
offset, lenToRead, offset, lenToRead,
conf.getInt("io.file.buffer.size", 4096), conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
true, ""); true, "");
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -66,7 +67,7 @@ public class FileAppendTest4 {
} }
@AfterClass @AfterClass
public static void tearDown() throws IOException { public static void tearDown() {
cluster.shutdown(); cluster.shutdown();
} }
@ -91,7 +92,7 @@ public class FileAppendTest4 {
new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2); new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2);
LOG.info("Creating file " + p); LOG.info("Creating file " + p);
FSDataOutputStream out = fs.create(p, false, FSDataOutputStream out = fs.create(p, false,
conf.getInt("io.file.buffer.size", 4096), conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
REPLICATION, BLOCK_SIZE); REPLICATION, BLOCK_SIZE);
out.write(contents, 0, oldFileLen); out.write(contents, 0, oldFileLen);
out.close(); out.close();

View File

@ -39,9 +39,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -324,8 +324,8 @@ public class MiniDFSCluster {
* Servers will be started on free ports. * Servers will be started on free ports.
* <p> * <p>
* The caller must manage the creation of NameNode and DataNode directories * The caller must manage the creation of NameNode and DataNode directories
* and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and * and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf. * {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf.
* *
* @param conf the base configuration to use in starting the servers. This * @param conf the base configuration to use in starting the servers. This
* will be modified as necessary. * will be modified as necessary.
@ -399,8 +399,8 @@ public class MiniDFSCluster {
* @param format if true, format the NameNode and DataNodes before starting * @param format if true, format the NameNode and DataNodes before starting
* up * up
* @param manageDfsDirs if true, the data directories for servers will be * @param manageDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf * the conf
* @param operation the operation with which to start the servers. If null * @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -431,8 +431,8 @@ public class MiniDFSCluster {
* @param numDataNodes Number of DataNodes to start; may be zero * @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up * @param format if true, format the NameNode and DataNodes before starting up
* @param manageDfsDirs if true, the data directories for servers will be * @param manageDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf * the conf
* @param operation the operation with which to start the servers. If null * @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -465,11 +465,11 @@ public class MiniDFSCluster {
* @param numDataNodes Number of DataNodes to start; may be zero * @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up * @param format if true, format the NameNode and DataNodes before starting up
* @param manageNameDfsDirs if true, the data directories for servers will be * @param manageNameDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf * the conf
* @param manageDataDfsDirs if true, the data directories for datanodes will * @param manageDataDfsDirs if true, the data directories for datanodes will
* be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} * be created and {@link #DFS_DATANODE_DATA_DIR_KEY}
* set to same in the conf * set to same in the conf
* @param operation the operation with which to start the servers. If null * @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -527,15 +527,15 @@ public class MiniDFSCluster {
} }
// disable service authorization, as it does not work with tunnelled RPC // disable service authorization, as it does not work with tunnelled RPC
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
false); false);
} }
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.class, DNSToSwitchMapping.class);
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf); Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
@ -543,8 +543,8 @@ public class MiniDFSCluster {
federation = true; federation = true;
if (!federation) { if (!federation) {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort); conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:"
+ nameNodeHttpPort); + nameNodeHttpPort);
NameNode nn = createNameNode(0, conf, numDataNodes, manageNameDfsDirs, NameNode nn = createNameNode(0, conf, numDataNodes, manageNameDfsDirs,
format, operation, clusterId); format, operation, clusterId);
@ -588,7 +588,7 @@ public class MiniDFSCluster {
initFederatedNamenodeAddress(conf, nameserviceId, nnPort); initFederatedNamenodeAddress(conf, nameserviceId, nnPort);
nnPort = nnPort == 0 ? 0 : nnPort + 2; nnPort = nnPort == 0 ? 0 : nnPort + 2;
} }
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIdList); conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIdList);
} }
/* For federated namenode initialize the address:port */ /* For federated namenode initialize the address:port */
@ -596,11 +596,11 @@ public class MiniDFSCluster {
String nameserviceId, int nnPort) { String nameserviceId, int nnPort) {
// Set nameserviceId specific key // Set nameserviceId specific key
String key = DFSUtil.getNameServiceIdKey( String key = DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId);
conf.set(key, "127.0.0.1:0"); conf.set(key, "127.0.0.1:0");
key = DFSUtil.getNameServiceIdKey( key = DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId);
conf.set(key, "127.0.0.1:" + nnPort); conf.set(key, "127.0.0.1:" + nnPort);
} }
@ -621,10 +621,10 @@ public class MiniDFSCluster {
StartupOption operation, String clusterId) StartupOption operation, String clusterId)
throws IOException { throws IOException {
if (manageNameDfsDirs) { if (manageNameDfsDirs) {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, conf.set(DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2)))); fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2)))); fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
} }
@ -649,17 +649,17 @@ public class MiniDFSCluster {
int numDataNodes, boolean manageNameDfsDirs, boolean format, int numDataNodes, boolean manageNameDfsDirs, boolean format,
StartupOption operation, String clusterId, String nameserviceId) StartupOption operation, String clusterId, String nameserviceId)
throws IOException { throws IOException {
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs, NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
format, operation, clusterId); format, operation, clusterId);
conf.set(DFSUtil.getNameServiceIdKey( conf.set(DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode
.getHostPortString(nn.getNameNodeAddress())); .getHostPortString(nn.getNameNodeAddress()));
conf.set(DFSUtil.getNameServiceIdKey( conf.set(DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
.getHostPortString(nn.getHttpAddress())); .getHostPortString(nn.getHttpAddress()));
DFSUtil.setGenericConf(conf, nameserviceId, DFSUtil.setGenericConf(conf, nameserviceId,
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); DFS_NAMENODE_HTTP_ADDRESS_KEY);
nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf)); nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
} }
@ -739,7 +739,7 @@ public class MiniDFSCluster {
* will be modified as necessary. * will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero * @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be * @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set
* in the conf * in the conf
* @param operation the operation with which to start the DataNodes. If null * @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -771,7 +771,7 @@ public class MiniDFSCluster {
* will be modified as necessary. * will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero * @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be * @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf * set in the conf
* @param operation the operation with which to start the DataNodes. If null * @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -805,7 +805,7 @@ public class MiniDFSCluster {
* will be modified as necessary. * will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero * @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be * @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf * set in the conf
* @param operation the operation with which to start the DataNodes. If null * @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@ -823,12 +823,12 @@ public class MiniDFSCluster {
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException { boolean checkDataNodeAddrConfig) throws IOException {
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
int curDatanodesNum = dataNodes.size(); int curDatanodesNum = dataNodes.size();
// for mincluster's the default initialDelay for BRs is 0 // for mincluster's the default initialDelay for BRs is 0
if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
} }
// If minicluster's name node is null assume that the conf has been // If minicluster's name node is null assume that the conf has been
// set with the right address:port of the name node. // set with the right address:port of the name node.
@ -875,8 +875,8 @@ public class MiniDFSCluster {
+ i + ": " + dir1 + " or " + dir2); + i + ": " + dir1 + " or " + dir2);
} }
String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2); String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
} }
if (simulatedCapacities != null) { if (simulatedCapacities != null) {
dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@ -905,7 +905,7 @@ public class MiniDFSCluster {
DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
if(dn == null) if(dn == null)
throw new IOException("Cannot start DataNode in " throw new IOException("Cannot start DataNode in "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
//since the HDFS does things based on IP:port, we need to add the mapping //since the HDFS does things based on IP:port, we need to add the mapping
//for IP:port to rackId //for IP:port to rackId
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress(); String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
@ -1321,7 +1321,7 @@ public class MiniDFSCluster {
Configuration newconf = new HdfsConfiguration(conf); // save cloned config Configuration newconf = new HdfsConfiguration(conf); // save cloned config
if (keepPort) { if (keepPort) {
InetSocketAddress addr = dnprop.datanode.getSelfAddr(); InetSocketAddress addr = dnprop.datanode.getSelfAddr();
conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":" conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
+ addr.getPort()); + addr.getPort());
} }
dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
@ -1448,10 +1448,10 @@ public class MiniDFSCluster {
/** /**
* @return a http URL * @return a http URL
*/ */
public String getHttpUri(int nnIndex) throws IOException { public String getHttpUri(int nnIndex) {
return "http://" return "http://"
+ nameNodes[nnIndex].conf + nameNodes[nnIndex].conf
.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); .get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
} }
/** /**
@ -1460,7 +1460,7 @@ public class MiniDFSCluster {
public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException { public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException {
String uri = "hftp://" String uri = "hftp://"
+ nameNodes[nnIndex].conf + nameNodes[nnIndex].conf
.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); .get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
try { try {
return (HftpFileSystem)FileSystem.get(new URI(uri), conf); return (HftpFileSystem)FileSystem.get(new URI(uri), conf);
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
@ -1910,9 +1910,9 @@ public class MiniDFSCluster {
nameNodes = newlist; nameNodes = newlist;
String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1);
String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES); String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES);
nameserviceIds += "," + nameserviceId; nameserviceIds += "," + nameserviceId;
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds); conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds);
initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort);
createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null,
@ -1945,28 +1945,28 @@ public class MiniDFSCluster {
private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException { boolean checkDataNodeAddrConfig) throws IOException {
if (setupHostsFile) { if (setupHostsFile) {
String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim(); String hostsFile = conf.get(DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) { if (hostsFile.length() == 0) {
throw new IOException("Parameter dfs.hosts is not setup in conf"); throw new IOException("Parameter dfs.hosts is not setup in conf");
} }
// Setup datanode in the include file, if it is defined in the conf // Setup datanode in the include file, if it is defined in the conf
String address = "127.0.0.1:" + getFreeSocketPort(); String address = "127.0.0.1:" + getFreeSocketPort();
if (checkDataNodeAddrConfig) { if (checkDataNodeAddrConfig) {
conf.setIfUnset("dfs.datanode.address", address); conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
} else { } else {
conf.set("dfs.datanode.address", address); conf.set(DFS_DATANODE_ADDRESS_KEY, address);
} }
addToFile(hostsFile, address); addToFile(hostsFile, address);
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile); LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
} else { } else {
if (checkDataNodeAddrConfig) { if (checkDataNodeAddrConfig) {
conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0"); conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0"); conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0"); conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} else { } else {
conf.set("dfs.datanode.address", "127.0.0.1:0"); conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
conf.set("dfs.datanode.http.address", "127.0.0.1:0"); conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set("dfs.datanode.ipc.address", "127.0.0.1:0"); conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} }
} }
} }

View File

@ -24,6 +24,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -80,8 +81,8 @@ public class TestBlockMissingException extends TestCase {
// //
private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize) private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blocksize); (short) repl, blocksize);
// fill data into file // fill data into file
final byte[] b = new byte[(int)blocksize]; final byte[] b = new byte[(int)blocksize];

View File

@ -40,7 +40,7 @@ public class TestClientProtocolForPipelineRecovery {
@Test public void testGetNewStamp() throws IOException { @Test public void testGetNewStamp() throws IOException {
int numDataNodes = 1; int numDataNodes = 1;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try { try {
cluster.waitActive(); cluster.waitActive();

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -80,7 +81,7 @@ public class TestClientReportBadBlock {
.build(); .build();
cluster.waitActive(); cluster.waitActive();
dfs = (DistributedFileSystem) cluster.getFileSystem(); dfs = (DistributedFileSystem) cluster.getFileSystem();
buffersize = conf.getInt("io.file.buffer.size", 4096); buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
} }
@After @After

View File

@ -28,6 +28,9 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
@ -62,9 +65,9 @@ public class TestDFSAddressConfig extends TestCase {
assertNotNull("Should have been able to stop simulated datanode", dnp); assertNotNull("Should have been able to stop simulated datanode", dnp);
} }
conf.unset("dfs.datanode.address"); conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset("dfs.datanode.http.address"); conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset("dfs.datanode.ipc.address"); conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
null, null, null, false, true); null, null, null, false, true);
@ -87,9 +90,9 @@ public class TestDFSAddressConfig extends TestCase {
assertNotNull("Should have been able to stop simulated datanode", dnp); assertNotNull("Should have been able to stop simulated datanode", dnp);
} }
conf.set("dfs.datanode.address","0.0.0.0:0"); conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set("dfs.datanode.http.address","0.0.0.0:0"); conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set("dfs.datanode.ipc.address","0.0.0.0:0"); conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
null, null, null, false, true); null, null, null, false, true);

View File

@ -43,6 +43,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -141,7 +142,7 @@ public class TestDFSClientRetries extends TestCase {
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1); conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
// set a small buffer size // set a small buffer size
final int bufferSize = 4096; final int bufferSize = 4096;
conf.setInt("io.file.buffer.size", bufferSize); conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

View File

@ -22,14 +22,13 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import javax.security.auth.login.LoginException;
import junit.framework.AssertionFailedError; import junit.framework.AssertionFailedError;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -202,7 +201,7 @@ public class TestDFSPermission extends TestCase {
switch (op) { switch (op) {
case CREATE: case CREATE:
FSDataOutputStream out = fs.create(name, permission, true, FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt("io.file.buffer.size", 4096), conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
out.close(); out.close();
break; break;
@ -520,8 +519,7 @@ public class TestDFSPermission extends TestCase {
} }
/* Perform an operation and verify if the permission checking is correct */ /* Perform an operation and verify if the permission checking is correct */
void verifyPermission(UserGroupInformation ugi) throws LoginException, void verifyPermission(UserGroupInformation ugi) throws IOException {
IOException {
if (this.ugi != ugi) { if (this.ugi != ugi) {
setRequiredPermissions(ugi); setRequiredPermissions(ugi);
this.ugi = ugi; this.ugi = ugi;
@ -564,8 +562,7 @@ public class TestDFSPermission extends TestCase {
} }
/* Set the permissions required to pass the permission checking */ /* Set the permissions required to pass the permission checking */
protected void setRequiredPermissions(UserGroupInformation ugi) protected void setRequiredPermissions(UserGroupInformation ugi) {
throws IOException {
if (SUPERUSER.equals(ugi)) { if (SUPERUSER.equals(ugi)) {
requiredAncestorPermission = SUPER_MASK; requiredAncestorPermission = SUPER_MASK;
requiredParentPermission = SUPER_MASK; requiredParentPermission = SUPER_MASK;

View File

@ -208,7 +208,7 @@ public class TestDataTransferProtocol extends TestCase {
@Test public void testOpWrite() throws IOException { @Test public void testOpWrite() throws IOException {
int numDataNodes = 1; int numDataNodes = 1;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try { try {
cluster.waitActive(); cluster.waitActive();

View File

@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -123,9 +124,9 @@ public class TestDatanodeDeath extends TestCase {
static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl) static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl)
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
repl, (long)blockSize); blockSize);
return stm; return stm;
} }

View File

@ -30,6 +30,7 @@ import java.util.Random;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -115,9 +116,9 @@ public class TestDecommission {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);
@ -246,7 +247,7 @@ public class TestDecommission {
* Wait till node is fully decommissioned. * Wait till node is fully decommissioned.
*/ */
private void waitNodeState(DatanodeInfo node, private void waitNodeState(DatanodeInfo node,
AdminStates state) throws IOException { AdminStates state) {
boolean done = state == node.getAdminState(); boolean done = state == node.getAdminState();
while (!done) { while (!done) {
LOG.info("Waiting for node " + node + " to change state to " LOG.info("Waiting for node " + node + " to change state to "

View File

@ -31,6 +31,7 @@ import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
@ -88,17 +89,17 @@ public class TestDistributedFileSystem {
@Test @Test
public void testDFSClose() throws Exception { public void testDFSClose() throws Exception {
Configuration conf = getTestConfiguration(); Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem(); FileSystem fileSys = cluster.getFileSystem();
try {
// create two files // create two files
fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.create(new Path("/test/dfsclose/file-1"));
fileSys.close(); fileSys.close();
} } finally {
finally {
if (cluster != null) {cluster.shutdown();} if (cluster != null) {cluster.shutdown();}
} }
} }
@ -106,10 +107,10 @@ public class TestDistributedFileSystem {
@Test @Test
public void testDFSSeekExceptions() throws IOException { public void testDFSSeekExceptions() throws IOException {
Configuration conf = getTestConfiguration(); Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); MiniDFSCluster cluster = null;
FileSystem fileSys = cluster.getFileSystem();
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem();
String file = "/test/fileclosethenseek/file-0"; String file = "/test/fileclosethenseek/file-0";
Path path = new Path(file); Path path = new Path(file);
// create file // create file
@ -455,7 +456,7 @@ public class TestDistributedFileSystem {
final Path dir = new Path("/filechecksum"); final Path dir = new Path("/filechecksum");
final int block_size = 1024; final int block_size = 1024;
final int buffer_size = conf.getInt("io.file.buffer.size", 4096); final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
//try different number of blocks //try different number of blocks

View File

@ -27,6 +27,7 @@ import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -54,7 +55,7 @@ public class TestFSInputChecker extends TestCase {
private void writeFile(FileSystem fileSys, Path name) throws IOException { private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777), FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777),
true, fileSys.getConf().getInt("io.file.buffer.size", 4096), true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE, null); NUM_OF_DATANODES, BLOCK_SIZE, null);
stm.write(expected); stm.write(expected);
stm.close(); stm.close();
@ -325,8 +326,10 @@ public class TestFSInputChecker extends TestCase {
throws IOException { throws IOException {
Path file = new Path("try.dat"); Path file = new Path("try.dat");
writeFile(fileSys, file); writeFile(fileSys, file);
stm = fileSys.open(file, stm = fileSys.open(
fileSys.getConf().getInt("io.file.buffer.size", 4096)); file,
fileSys.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096));
checkSeekAndRead(); checkSeekAndRead();
stm.close(); stm.close();
cleanupFile(fileSys, file); cleanupFile(fileSys, file);

View File

@ -21,6 +21,8 @@ import junit.framework.TestCase;
import java.io.*; import java.io.*;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -43,7 +45,7 @@ public class TestFSOutputSummer extends TestCase {
/* create a file, write all data at once */ /* create a file, write all data at once */
private void writeFile1(Path name) throws Exception { private void writeFile1(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096), fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE); NUM_OF_DATANODES, BLOCK_SIZE);
stm.write(expected); stm.write(expected);
stm.close(); stm.close();
@ -54,7 +56,7 @@ public class TestFSOutputSummer extends TestCase {
/* create a file, write data chunk by chunk */ /* create a file, write data chunk by chunk */
private void writeFile2(Path name) throws Exception { private void writeFile2(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096), fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE); NUM_OF_DATANODES, BLOCK_SIZE);
int i=0; int i=0;
for( ;i<FILE_SIZE-BYTES_PER_CHECKSUM; i+=BYTES_PER_CHECKSUM) { for( ;i<FILE_SIZE-BYTES_PER_CHECKSUM; i+=BYTES_PER_CHECKSUM) {
@ -69,7 +71,7 @@ public class TestFSOutputSummer extends TestCase {
/* create a file, write data with vairable amount of data */ /* create a file, write data with vairable amount of data */
private void writeFile3(Path name) throws Exception { private void writeFile3(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096), fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE); NUM_OF_DATANODES, BLOCK_SIZE);
stm.write(expected, 0, HALF_CHUNK_SIZE); stm.write(expected, 0, HALF_CHUNK_SIZE);
stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM+2); stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM+2);

View File

@ -85,7 +85,7 @@ public class TestFileAppend2 extends TestCase {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
} }
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
@ -306,7 +306,7 @@ public class TestFileAppend2 extends TestCase {
fileContents, "Read 2"); fileContents, "Read 2");
} catch (Throwable e) { } catch (Throwable e) {
globalStatus = false; globalStatus = false;
if (e != null && e.toString() != null) { if (e.toString() != null) {
System.out.println("Workload exception " + id + System.out.println("Workload exception " + id +
" testfile " + testfile + " testfile " + testfile +
" " + e); " " + e);
@ -338,7 +338,7 @@ public class TestFileAppend2 extends TestCase {
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000); conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes) .numDataNodes(numDatanodes)

View File

@ -27,6 +27,7 @@ import junit.framework.TestSuite;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -68,8 +69,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
AppendTestUtil.LOG.info("setUp()"); AppendTestUtil.LOG.info("setUp()");
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
buffersize = conf.getInt("io.file.buffer.size", 4096); buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = (DistributedFileSystem)cluster.getFileSystem(); fs = (DistributedFileSystem)cluster.getFileSystem();
} }

View File

@ -311,8 +311,9 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
final int writeSize, final int writeSize,
Configuration conf Configuration conf
) throws IOException { ) throws IOException {
conf.setBoolean("dfs.support.append", syncType == SyncType.APPEND); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, syncType == SyncType.APPEND);
conf.setBoolean("dfs.datanode.transferTo.allowed", transferToAllowed); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
transferToAllowed);
init(conf); init(conf);
final Path file = new Path("/block-being-written-to"); final Path file = new Path("/block-being-written-to");

View File

@ -28,6 +28,7 @@ import java.util.EnumSet;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -75,9 +76,9 @@ public class TestFileCreation extends junit.framework.TestCase {
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
System.out.println("createFile: Created " + name + " with " + repl + " replica."); System.out.println("createFile: Created " + name + " with " + repl + " replica.");
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
return stm; return stm;
} }
@ -638,7 +639,6 @@ public class TestFileCreation extends junit.framework.TestCase {
out = createNonRecursive(fs, path, 1, createFlag); out = createNonRecursive(fs, path, 1, createFlag);
out.close(); out.close();
// Create a file when parent dir exists as file, should fail // Create a file when parent dir exists as file, should fail
expectedException = null;
try { try {
createNonRecursive(fs, new Path(path, "Create"), 1, createFlag); createNonRecursive(fs, new Path(path, "Create"), 1, createFlag);
} catch (IOException e) { } catch (IOException e) {
@ -704,7 +704,7 @@ public class TestFileCreation extends junit.framework.TestCase {
+ " replica."); + " replica.");
FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive( FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive(
name, FsPermission.getDefault(), flag, fs.getConf().getInt( name, FsPermission.getDefault(), flag, fs.getConf().getInt(
"io.file.buffer.size", 4096), (short) repl, (long) blockSize, null); CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize, null);
return stm; return stm;
} }

View File

@ -43,7 +43,7 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
// create cluster // create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

View File

@ -308,23 +308,23 @@ public class TestHDFSServerPorts extends TestCase {
// start data-node on the same port as name-node // start data-node on the same port as name-node
Configuration conf2 = new HdfsConfiguration(config); Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath()); conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
conf2.set("dfs.datanode.address", conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
FileSystem.getDefaultUri(config).getAuthority()); FileSystem.getDefaultUri(config).getAuthority());
conf2.set("dfs.datanode.http.address", THIS_HOST); conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
boolean started = canStartDataNode(conf2); boolean started = canStartDataNode(conf2);
assertFalse(started); // should fail assertFalse(started); // should fail
// bind http server to the same port as name-node // bind http server to the same port as name-node
conf2.set("dfs.datanode.address", THIS_HOST); conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set("dfs.datanode.http.address", conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY)); config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started = canStartDataNode(conf2); started = canStartDataNode(conf2);
assertFalse(started); // should fail assertFalse(started); // should fail
// both ports are different from the name-node ones // both ports are different from the name-node ones
conf2.set("dfs.datanode.address", THIS_HOST); conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set("dfs.datanode.http.address", THIS_HOST); conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
conf2.set("dfs.datanode.ipc.address", THIS_HOST); conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, THIS_HOST);
started = canStartDataNode(conf2); started = canStartDataNode(conf2);
assertTrue(started); // should start now assertTrue(started); // should start now
} finally { } finally {

View File

@ -27,6 +27,7 @@ import java.net.*;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -53,9 +54,9 @@ public class TestInjectionForSimulatedStorage extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[filesize]; byte[] buffer = new byte[filesize];
for (int i=0; i<buffer.length; i++) { for (int i=0; i<buffer.length; i++) {
buffer[i] = '1'; buffer[i] = '1';

View File

@ -23,6 +23,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -55,8 +56,8 @@ public class TestLargeBlock {
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl, static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
final long blockSize) final long blockSize)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize); (short) repl, blockSize);
LOG.info("createFile: Created " + name + " with " + repl + " replica."); LOG.info("createFile: Created " + name + " with " + repl + " replica.");
return stm; return stm;

View File

@ -69,7 +69,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
final int ORG_FILE_SIZE = 3000; final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {

View File

@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -72,7 +73,8 @@ public class TestLeaseRecovery2 {
static private MiniDFSCluster cluster; static private MiniDFSCluster cluster;
static private DistributedFileSystem dfs; static private DistributedFileSystem dfs;
final static private Configuration conf = new HdfsConfiguration(); final static private Configuration conf = new HdfsConfiguration();
final static private int BUF_SIZE = conf.getInt("io.file.buffer.size", 4096); final static private int BUF_SIZE = conf.getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
final static private long SHORT_LEASE_PERIOD = 1000L; final static private long SHORT_LEASE_PERIOD = 1000L;
final static private long LONG_LEASE_PERIOD = 60*60*SHORT_LEASE_PERIOD; final static private long LONG_LEASE_PERIOD = 60*60*SHORT_LEASE_PERIOD;

View File

@ -24,6 +24,7 @@ import java.net.*;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -46,9 +47,9 @@ public class TestModTime extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -65,9 +66,9 @@ public class TestMultiThreadedHflush {
*/ */
private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
return stm; return stm;
} }

View File

@ -42,7 +42,7 @@ public class TestPread extends TestCase {
private void writeFile(FileSystem fileSys, Path name) throws IOException { private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
DataOutputStream stm = fileSys.create(name, true, 4096, (short)1, DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
(long)blockSize); blockSize);
// test empty file open and read // test empty file open and read
stm.close(); stm.close();
FSDataInputStream in = fileSys.open(name); FSDataInputStream in = fileSys.open(name);
@ -61,7 +61,7 @@ public class TestPread extends TestCase {
assertTrue("Cannot delete file", false); assertTrue("Cannot delete file", false);
// now create the real file // now create the real file
stm = fileSys.create(name, true, 4096, (short)1, (long)blockSize); stm = fileSys.create(name, true, 4096, (short)1, blockSize);
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);
stm.write(buffer); stm.write(buffer);
@ -206,7 +206,7 @@ public class TestPread extends TestCase {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
if (simulatedStorage) { if (simulatedStorage) {
conf.setBoolean("dfs.datanode.simulateddatastorage", true); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY, true);
} }
if (disableTransferTo) { if (disableTransferTo) {
conf.setBoolean("dfs.datanode.transferTo.allowed", false); conf.setBoolean("dfs.datanode.transferTo.allowed", false);

View File

@ -84,7 +84,7 @@ public class TestQuota {
// Space quotas // Space quotas
final int DEFAULT_BLOCK_SIZE = 512; final int DEFAULT_BLOCK_SIZE = 512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), assertTrue("Not a HDFS: "+fs.getUri(),
@ -533,7 +533,7 @@ public class TestQuota {
// set a smaller block size so that we can test with smaller // set a smaller block size so that we can test with smaller
// diskspace quotas // diskspace quotas
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512"); conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), assertTrue("Not a HDFS: "+fs.getUri(),

View File

@ -24,6 +24,7 @@ import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -77,7 +78,7 @@ public class TestReadWhileWriting {
// Do not close file yet. // Do not close file yet.
{ {
final FSDataOutputStream out = fs.create(p, true, final FSDataOutputStream out = fs.create(p, true,
fs.getConf().getInt("io.file.buffer.size", 4096), fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)3, BLOCK_SIZE); (short)3, BLOCK_SIZE);
write(out, 0, half); write(out, 0, half);

View File

@ -54,7 +54,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
// create cluster // create cluster
System.out.println("Test 1*****************************"); System.out.println("Test 1*****************************");
@ -140,7 +140,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 2************************************"); System.out.println("Test 2************************************");
// create cluster // create cluster
@ -215,7 +215,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 3************************************"); System.out.println("Test 3************************************");
// create cluster // create cluster
@ -280,7 +280,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 4************************************"); System.out.println("Test 4************************************");
// create cluster // create cluster

View File

@ -31,6 +31,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -60,9 +61,9 @@ public class TestReplication extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -24,6 +24,7 @@ import java.net.*;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -49,9 +50,9 @@ public class TestSetTimes extends TestCase {
private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl) private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -21,6 +21,7 @@ import junit.framework.TestCase;
import java.io.*; import java.io.*;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -40,9 +41,9 @@ public class TestSmallBlock extends TestCase {
private void writeFile(FileSystem fileSys, Path name) throws IOException { private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)1, (long)blockSize); (short) 1, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);
@ -92,7 +93,7 @@ public class TestSmallBlock extends TestCase {
public void testSmallBlock() throws IOException { public void testSmallBlock() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (simulatedStorage) { if (simulatedStorage) {
conf.setBoolean("dfs.datanode.simulateddatastorage", true); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY, true);
} }
conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1"); conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

View File

@ -31,6 +31,7 @@ import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -123,7 +124,7 @@ public class TestBlockTokenWithDFS {
private static FSDataOutputStream writeFile(FileSystem fileSys, Path name, private static FSDataOutputStream writeFile(FileSystem fileSys, Path name,
short repl, long blockSize) throws IOException { short repl, long blockSize) throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt("io.file.buffer.size", 4096), repl, blockSize); .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, blockSize);
return stm; return stm;
} }
@ -173,7 +174,7 @@ public class TestBlockTokenWithDFS {
} }
// get a conf for testing // get a conf for testing
private static Configuration getConf(int numDataNodes) throws IOException { private static Configuration getConf(int numDataNodes) {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@ -181,7 +182,8 @@ public class TestBlockTokenWithDFS {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0); conf.setInt("ipc.client.connect.max.retries", 0);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
return conf; return conf;
} }

View File

@ -391,7 +391,7 @@ public class TestBlocksWithNotEnoughRacks {
Path excludeFile = new Path(dir, "exclude"); Path excludeFile = new Path(dir, "exclude");
assertTrue(localFileSys.mkdirs(dir)); assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, excludeFile, "");
conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
// Two blocks and four racks // Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
@ -441,7 +441,7 @@ public class TestBlocksWithNotEnoughRacks {
Path excludeFile = new Path(dir, "exclude"); Path excludeFile = new Path(dir, "exclude");
assertTrue(localFileSys.mkdirs(dir)); assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, excludeFile, "");
conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
// All hosts are on two racks, only one host on /rack2 // All hosts are on two racks, only one host on /rack2
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"}; String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};

View File

@ -65,9 +65,9 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
public class SimulatedFSDataset implements FSDatasetInterface, Configurable{ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
public static final String CONFIG_PROPERTY_SIMULATED = public static final String CONFIG_PROPERTY_SIMULATED =
"dfs.datanode.simulateddatastorage"; DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY;
public static final String CONFIG_PROPERTY_CAPACITY = public static final String CONFIG_PROPERTY_CAPACITY =
"dfs.datanode.simulateddatastorage.capacity"; DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY;
public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte
@ -135,7 +135,7 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
} }
} }
synchronized SimulatedInputStream getIStream() throws IOException { synchronized SimulatedInputStream getIStream() {
if (!finalized) { if (!finalized) {
// throw new IOException("Trying to read an unfinalized block"); // throw new IOException("Trying to read an unfinalized block");
return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE); return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE);
@ -362,7 +362,7 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
private SimulatedStorage storage = null; private SimulatedStorage storage = null;
private String storageId; private String storageId;
public SimulatedFSDataset(Configuration conf) throws IOException { public SimulatedFSDataset(Configuration conf) {
setConf(conf); setConf(conf);
} }

View File

@ -73,7 +73,7 @@ public class TestDatanodeRestart {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive(); cluster.waitActive();
try { try {
@ -137,7 +137,7 @@ public class TestDatanodeRestart {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
conf.setBoolean("dfs.support.append", true); conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive(); cluster.waitActive();
try { try {

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
@ -99,9 +100,9 @@ public class TestCheckpoint extends TestCase {
static void writeFile(FileSystem fileSys, Path name, int repl) static void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[TestCheckpoint.fileSize]; byte[] buffer = new byte[TestCheckpoint.fileSize];
Random rand = new Random(TestCheckpoint.seed); Random rand = new Random(TestCheckpoint.seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -28,6 +28,7 @@ import java.util.List;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -117,7 +118,8 @@ public class TestDecommissioningStatus {
throws IOException { throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt("io.file.buffer.size", 4096), repl, (long) blockSize); .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);
@ -129,7 +131,8 @@ public class TestDecommissioningStatus {
short repl) throws IOException { short repl) throws IOException {
// create and write a file that contains three blocks of data // create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt("io.file.buffer.size", 4096), repl, (long) blockSize); .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -23,6 +23,7 @@ import java.util.Random;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -44,9 +45,9 @@ public class TestFileLimit extends TestCase {
// creates a zero file. // creates a zero file.
private void createFile(FileSystem fileSys, Path name) private void createFile(FileSystem fileSys, Path name)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)1, (long)blockSize); (short) 1, blockSize);
byte[] buffer = new byte[1024]; byte[] buffer = new byte[1024];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -29,6 +29,7 @@ import java.lang.InterruptedException;
import java.util.Random; import java.util.Random;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -50,7 +51,8 @@ public class TestMetaSave {
private void createFile(FileSystem fileSys, Path name) throws IOException { private void createFile(FileSystem fileSys, Path name) throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt("io.file.buffer.size", 4096), (short) 2, (long) blockSize); .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 2, blockSize);
byte[] buffer = new byte[1024]; byte[] buffer = new byte[1024];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);

View File

@ -24,6 +24,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -59,9 +60,9 @@ public class TestNameEditsConfigs extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)BLOCK_SIZE); (short) repl, BLOCK_SIZE);
byte[] buffer = new byte[FILE_SIZE]; byte[] buffer = new byte[FILE_SIZE];
Random rand = new Random(SEED); Random rand = new Random(SEED);
rand.nextBytes(buffer); rand.nextBytes(buffer);
@ -96,7 +97,7 @@ public class TestNameEditsConfigs extends TestCase {
int replication = fileSys.getFileStatus(name).getReplication(); int replication = fileSys.getFileStatus(name).getReplication();
assertEquals("replication for " + name, repl, replication); assertEquals("replication for " + name, repl, replication);
long size = fileSys.getContentSummary(name).getLength(); long size = fileSys.getContentSummary(name).getLength();
assertEquals("file size for " + name, size, (long)FILE_SIZE); assertEquals("file size for " + name, size, FILE_SIZE);
} }
private void cleanupFile(FileSystem fileSys, Path name) private void cleanupFile(FileSystem fileSys, Path name)

View File

@ -34,6 +34,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -76,9 +77,9 @@ public class TestStartup extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
fileSys.getConf().getInt("io.file.buffer.size", 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short) repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);
@ -233,11 +234,13 @@ public class TestStartup extends TestCase {
sd = it.next(); sd = it.next();
if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
File imf = img.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0); img.getStorage();
File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
assertEquals(expectedImgSize, imf.length()); assertEquals(expectedImgSize, imf.length());
} else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
File edf = img.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0); img.getStorage();
File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize);
assertEquals(expectedEditsSize, edf.length()); assertEquals(expectedEditsSize, edf.length());
} else { } else {
@ -342,8 +345,10 @@ public class TestStartup extends TestCase {
FSImage image = nn.getFSImage(); FSImage image = nn.getFSImage();
StorageDirectory sd = image.getStorage().getStorageDir(0); //only one StorageDirectory sd = image.getStorage().getStorageDir(0); //only one
assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS); assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
File imf = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0); image.getStorage();
File edf = image.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
image.getStorage();
File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length()); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());

View File

@ -124,7 +124,7 @@ public class TestPermission extends TestCase {
FsPermission filePerm = new FsPermission((short)0444); FsPermission filePerm = new FsPermission((short)0444);
FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
true, conf.getInt("io.file.buffer.size", 4096), true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
out.write(123); out.write(123);
out.close(); out.close();
@ -223,7 +223,7 @@ public class TestPermission extends TestCase {
userfs.mkdirs(RENAME_PATH); userfs.mkdirs(RENAME_PATH);
assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1)); assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1));
} finally { } finally {
if(cluster != null) cluster.shutdown(); cluster.shutdown();
} }
} }

View File

@ -25,6 +25,7 @@ import java.util.Random;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -51,8 +52,8 @@ public class TestJMXGet extends TestCase {
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096), fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, (long)blockSize); (short)repl, blockSize);
byte[] buffer = new byte[fileSize]; byte[] buffer = new byte[fileSize];
Random rand = new Random(seed); Random rand = new Random(seed);
rand.nextBytes(buffer); rand.nextBytes(buffer);