diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ed3632ec85d..9aa8e8ba6c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -107,6 +107,9 @@ Release 0.23.3 - UNRELEASED HDFS-3060. Bump TestDistributedUpgrade#testDistributedUpgrade timeout (eli) + HDFS-2410. Further cleanup of hardcoded configuration keys and values. + (suresh) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index fae19f18828..4c21724746e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; @@ -46,6 +47,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOUR import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT; import java.io.BufferedOutputStream; import java.io.ByteArrayInputStream; @@ -472,11 +476,11 @@ public class DataNode extends Configured if(LOG.isDebugEnabled()) { LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort); } - if (conf.getBoolean("dfs.https.enable", false)) { + if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( - "dfs.datanode.https.address", infoHost + ":" + 0)); + DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); Configuration sslConf = new HdfsConfiguration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); @@ -517,7 +521,7 @@ public class DataNode extends Configured private void initIpcServer(Configuration conf) throws IOException { InetSocketAddress ipcAddr = NetUtils.createSocketAddr( - conf.get("dfs.datanode.ipc.address")); + conf.get(DFS_DATANODE_IPC_ADDRESS_KEY)); // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 5cb267dbfdc..8d35469791d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT; + import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; @@ -144,7 +148,7 @@ public class NameNodeHttpServer { } }; - boolean certSSL = conf.getBoolean("dfs.https.enable", false); + boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); boolean useKrb = UserGroupInformation.isSecurityEnabled(); if (certSSL || useKrb) { boolean needClientAuth = conf.getBoolean( @@ -155,14 +159,14 @@ public class NameNodeHttpServer { DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); Configuration sslConf = new HdfsConfiguration(false); if (certSSL) { - sslConf.addResource(conf.get( - "dfs.https.server.keystore.resource", "ssl-server.xml")); + sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); } httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, useKrb); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf - .get("dfs.datanode.https.address", infoHost + ":" + 50475)); + .get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475)); httpServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java index b7e6277501e..fcad32e0b4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction; import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.FiTestUtil; import org.apache.hadoop.fi.FiTestUtil.Action; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -56,8 +57,9 @@ public class TestFiDataTransferProtocol { static private FSDataOutputStream createFile(FileSystem fs, Path p ) throws IOException { - return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), - REPLICATION, BLOCKSIZE); + return fs.create(p, true, + fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, + 4096), REPLICATION, BLOCKSIZE); } { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java index dcfdcf9e26b..5832bf04914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest; import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction; import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.FiTestUtil; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -65,8 +66,8 @@ public class TestFiDataTransferProtocol2 { static private FSDataOutputStream createFile(FileSystem fs, Path p ) throws IOException { - return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), - REPLICATION, BLOCKSIZE); + return fs.create(p, true, fs.getConf() + .getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE); } { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index fb562d14b27..593134350d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -163,7 +163,7 @@ public class TestStickyBit extends TestCase { try { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); FileSystem hdfs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java index 1c3ad442ea1..50a34a8a045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -143,8 +143,8 @@ public class AppendTestUtil { public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) throws IOException { return fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short) repl, (long) BLOCK_SIZE); + fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, BLOCK_SIZE); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 3c338e56f53..9d4f4a2e197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.net.NetUtils; @@ -148,7 +149,7 @@ public class BlockReaderTestUtil { sock, targetAddr.toString()+ ":" + block.getBlockId(), block, testBlock.getBlockToken(), offset, lenToRead, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), true, ""); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index 7f2c1aecd67..d837c0f71af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.junit.AfterClass; @@ -66,7 +67,7 @@ public class FileAppendTest4 { } @AfterClass - public static void tearDown() throws IOException { + public static void tearDown() { cluster.shutdown(); } @@ -91,7 +92,7 @@ public class FileAppendTest4 { new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2); LOG.info("Creating file " + p); FSDataOutputStream out = fs.create(p, false, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCK_SIZE); out.write(contents, 0, oldFileLen); out.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 934770b177c..e710ec0ef81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -39,9 +39,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -316,8 +316,8 @@ public class MiniDFSCluster { * Servers will be started on free ports. *

* The caller must manage the creation of NameNode and DataNode directories - * and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf. + * and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf. * * @param conf the base configuration to use in starting the servers. This * will be modified as necessary. @@ -391,8 +391,8 @@ public class MiniDFSCluster { * @param format if true, format the NameNode and DataNodes before starting * up * @param manageDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -423,8 +423,8 @@ public class MiniDFSCluster { * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -457,11 +457,11 @@ public class MiniDFSCluster { * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageNameDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param manageDataDfsDirs if true, the data directories for datanodes will - * be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} + * be created and {@link #DFS_DATANODE_DATA_DIR_KEY} * set to same in the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -498,11 +498,11 @@ public class MiniDFSCluster { this.federation = federation; this.waitSafeMode = waitSafeMode; - int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); - conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second - conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + int replication = conf.getInt(DFS_REPLICATION_KEY, 3); + conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); + conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); + conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second + conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); Collection nameserviceIds = DFSUtil.getNameServiceIds(conf); @@ -510,8 +510,8 @@ public class MiniDFSCluster { federation = true; if (!federation) { - conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort); - conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort); + conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + nameNodeHttpPort); NameNode nn = createNameNode(0, conf, numDataNodes, manageNameDfsDirs, format, operation, clusterId); @@ -555,7 +555,7 @@ public class MiniDFSCluster { initFederatedNamenodeAddress(conf, nameserviceId, nnPort); nnPort = nnPort == 0 ? 0 : nnPort + 2; } - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIdList); + conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIdList); } /* For federated namenode initialize the address:port */ @@ -563,11 +563,11 @@ public class MiniDFSCluster { String nameserviceId, int nnPort) { // Set nameserviceId specific key String key = DFSUtil.getNameServiceIdKey( - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); + DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); conf.set(key, "127.0.0.1:0"); key = DFSUtil.getNameServiceIdKey( - DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); conf.set(key, "127.0.0.1:" + nnPort); } @@ -588,10 +588,10 @@ public class MiniDFSCluster { StartupOption operation, String clusterId) throws IOException { if (manageNameDfsDirs) { - conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + conf.set(DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2)))); - conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, + conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2)))); } @@ -616,17 +616,17 @@ public class MiniDFSCluster { int numDataNodes, boolean manageNameDfsDirs, boolean format, StartupOption operation, String clusterId, String nameserviceId) throws IOException { - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs, format, operation, clusterId); conf.set(DFSUtil.getNameServiceIdKey( - DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode .getHostPortString(nn.getNameNodeAddress())); conf.set(DFSUtil.getNameServiceIdKey( - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode .getHostPortString(nn.getHttpAddress())); DFSUtil.setGenericConf(conf, nameserviceId, - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf)); } @@ -702,7 +702,7 @@ public class MiniDFSCluster { * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set * in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -734,7 +734,7 @@ public class MiniDFSCluster { * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -768,7 +768,7 @@ public class MiniDFSCluster { * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -786,12 +786,12 @@ public class MiniDFSCluster { long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { - conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); + conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); int curDatanodesNum = dataNodes.size(); // for mincluster's the default initialDelay for BRs is 0 - if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { - conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); + if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { + conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); } // If minicluster's name node is null assume that the conf has been // set with the right address:port of the name node. @@ -838,8 +838,8 @@ public class MiniDFSCluster { + i + ": " + dir1 + " or " + dir2); } String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2); - dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); + dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); + conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); } if (simulatedCapacities != null) { SimulatedFSDataset.setFactory(dnConf); @@ -868,7 +868,7 @@ public class MiniDFSCluster { DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); if(dn == null) throw new IOException("Cannot start DataNode in " - + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); + + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); //NOTE: the following is true if and only if: // hadoop.security.token.service.use_ip=true //since the HDFS does things based on IP:port, we need to add the mapping @@ -1286,7 +1286,7 @@ public class MiniDFSCluster { Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getSelfAddr(); - conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":" + conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); } dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), @@ -1413,10 +1413,10 @@ public class MiniDFSCluster { /** * @return a http URL */ - public String getHttpUri(int nnIndex) throws IOException { + public String getHttpUri(int nnIndex) { return "http://" + nameNodes[nnIndex].conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); } /** @@ -1425,7 +1425,7 @@ public class MiniDFSCluster { public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException { String uri = "hftp://" + nameNodes[nnIndex].conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); try { return (HftpFileSystem)FileSystem.get(new URI(uri), conf); } catch (URISyntaxException e) { @@ -1877,9 +1877,9 @@ public class MiniDFSCluster { nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); - String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES); + String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES); nameserviceIds += "," + nameserviceId; - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds); + conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, @@ -1912,28 +1912,28 @@ public class MiniDFSCluster { private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { if (setupHostsFile) { - String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim(); + String hostsFile = conf.get(DFS_HOSTS, "").trim(); if (hostsFile.length() == 0) { throw new IOException("Parameter dfs.hosts is not setup in conf"); } // Setup datanode in the include file, if it is defined in the conf String address = "127.0.0.1:" + getFreeSocketPort(); if (checkDataNodeAddrConfig) { - conf.setIfUnset("dfs.datanode.address", address); + conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address); } else { - conf.set("dfs.datanode.address", address); + conf.set(DFS_DATANODE_ADDRESS_KEY, address); } addToFile(hostsFile, address); LOG.info("Adding datanode " + address + " to hosts file " + hostsFile); } else { if (checkDataNodeAddrConfig) { - conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0"); - conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0"); - conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } else { - conf.set("dfs.datanode.address", "127.0.0.1:0"); - conf.set("dfs.datanode.http.address", "127.0.0.1:0"); - conf.set("dfs.datanode.ipc.address", "127.0.0.1:0"); + conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index 64e8588790f..be6e741a0a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -24,6 +24,7 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; @@ -80,9 +81,9 @@ public class TestBlockMissingException extends TestCase { // private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, blocksize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blocksize); // fill data into file final byte[] b = new byte[(int)blocksize]; for (int i = 0; i < numBlocks; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index dadda445c13..259d26a3049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -40,7 +40,7 @@ public class TestClientProtocolForPipelineRecovery { @Test public void testGetNewStamp() throws IOException { int numDataNodes = 1; Configuration conf = new HdfsConfiguration(); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 55e744e1a0c..dea2ba0ba3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -80,7 +81,7 @@ public class TestClientReportBadBlock { .build(); cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); - buffersize = conf.getInt("io.file.buffer.size", 4096); + buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index bf0326766fa..0f0caa673b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -28,6 +28,9 @@ import java.io.IOException; import java.util.ArrayList; import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; @@ -62,9 +65,9 @@ public class TestDFSAddressConfig extends TestCase { assertNotNull("Should have been able to stop simulated datanode", dnp); } - conf.unset("dfs.datanode.address"); - conf.unset("dfs.datanode.http.address"); - conf.unset("dfs.datanode.ipc.address"); + conf.unset(DFS_DATANODE_ADDRESS_KEY); + conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY); + conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY); cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true); @@ -87,9 +90,9 @@ public class TestDFSAddressConfig extends TestCase { assertNotNull("Should have been able to stop simulated datanode", dnp); } - conf.set("dfs.datanode.address","0.0.0.0:0"); - conf.set("dfs.datanode.http.address","0.0.0.0:0"); - conf.set("dfs.datanode.ipc.address","0.0.0.0:0"); + conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 257ddd376c7..937b28cd900 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -43,6 +43,7 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileSystem; @@ -142,7 +143,7 @@ public class TestDFSClientRetries extends TestCase { conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1); // set a small buffer size final int bufferSize = 4096; - conf.setInt("io.file.buffer.size", bufferSize); + conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 495e8e191a3..38a837247a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -22,14 +22,13 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; -import javax.security.auth.login.LoginException; - import junit.framework.AssertionFailedError; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -202,7 +201,7 @@ public class TestDFSPermission extends TestCase { switch (op) { case CREATE: FSDataOutputStream out = fs.create(name, permission, true, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); out.close(); break; @@ -520,8 +519,7 @@ public class TestDFSPermission extends TestCase { } /* Perform an operation and verify if the permission checking is correct */ - void verifyPermission(UserGroupInformation ugi) throws LoginException, - IOException { + void verifyPermission(UserGroupInformation ugi) throws IOException { if (this.ugi != ugi) { setRequiredPermissions(ugi); this.ugi = ugi; @@ -564,8 +562,7 @@ public class TestDFSPermission extends TestCase { } /* Set the permissions required to pass the permission checking */ - protected void setRequiredPermissions(UserGroupInformation ugi) - throws IOException { + protected void setRequiredPermissions(UserGroupInformation ugi) { if (SUPERUSER.equals(ugi)) { requiredAncestorPermission = SUPER_MASK; requiredParentPermission = SUPER_MASK; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index eef008fa20d..089ab4d837e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -208,7 +208,7 @@ public class TestDataTransferProtocol extends TestCase { @Test public void testOpWrite() throws IOException { int numDataNodes = 1; Configuration conf = new HdfsConfiguration(); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index b061f267831..e271bb95a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -123,9 +124,9 @@ public class TestDatanodeDeath extends TestCase { static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, + blockSize); return stm; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 3069727a48c..faf7efd5364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -30,6 +30,7 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -115,9 +116,9 @@ public class TestDecommission { private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); @@ -246,7 +247,7 @@ public class TestDecommission { * Wait till node is fully decommissioned. */ private void waitNodeState(DatanodeInfo node, - AdminStates state) throws IOException { + AdminStates state) { boolean done = state == node.getAdminState(); while (!done) { LOG.info("Waiting for node " + node + " to change state to " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index aaa085f1e7f..4055cd8d3d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -31,6 +31,7 @@ import java.util.Random; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; @@ -88,17 +89,17 @@ public class TestDistributedFileSystem { @Test public void testDFSClose() throws Exception { Configuration conf = getTestConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - FileSystem fileSys = cluster.getFileSystem(); - + MiniDFSCluster cluster = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + FileSystem fileSys = cluster.getFileSystem(); + // create two files fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.close(); - } - finally { + } finally { if (cluster != null) {cluster.shutdown();} } } @@ -106,10 +107,10 @@ public class TestDistributedFileSystem { @Test public void testDFSSeekExceptions() throws IOException { Configuration conf = getTestConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - FileSystem fileSys = cluster.getFileSystem(); - + MiniDFSCluster cluster = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + FileSystem fileSys = cluster.getFileSystem(); String file = "/test/fileclosethenseek/file-0"; Path path = new Path(file); // create file @@ -455,7 +456,7 @@ public class TestDistributedFileSystem { final Path dir = new Path("/filechecksum"); final int block_size = 1024; - final int buffer_size = conf.getInt("io.file.buffer.size", 4096); + final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); //try different number of blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index 1ba982ee090..fea024c2c75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -27,6 +27,7 @@ import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -54,7 +55,7 @@ public class TestFSInputChecker extends TestCase { private void writeFile(FileSystem fileSys, Path name) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777), - true, fileSys.getConf().getInt("io.file.buffer.size", 4096), + true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE, null); stm.write(expected); stm.close(); @@ -325,8 +326,10 @@ public class TestFSInputChecker extends TestCase { throws IOException { Path file = new Path("try.dat"); writeFile(fileSys, file); - stm = fileSys.open(file, - fileSys.getConf().getInt("io.file.buffer.size", 4096)); + stm = fileSys.open( + file, + fileSys.getConf().getInt( + CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096)); checkSeekAndRead(); stm.close(); cleanupFile(fileSys, file); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java index 8a18420aadf..da18bbe0cc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java @@ -21,6 +21,8 @@ import junit.framework.TestCase; import java.io.*; import java.util.Random; import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; + import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -43,7 +45,7 @@ public class TestFSOutputSummer extends TestCase { /* create a file, write all data at once */ private void writeFile1(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE); stm.write(expected); stm.close(); @@ -54,7 +56,7 @@ public class TestFSOutputSummer extends TestCase { /* create a file, write data chunk by chunk */ private void writeFile2(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE); int i=0; for( ;i