-kn / (ln(1 - c^(1/k)))
bits for
// single key, where is the number of hash functions,
// n
is the number of keys and c
is the desired
// max. error rate.
// Our desired error rate is by default 0.005, i.e. 0.5%
- float errorRate = conf.getFloat("io.mapfile.bloom.error.rate", 0.005f);
+ float errorRate = conf.getFloat(
+ IO_MAPFILE_BLOOM_ERROR_RATE_KEY, IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT);
vectorSize = (int)Math.ceil((double)(-HASH_COUNT * numKeys) /
Math.log(1.0 - Math.pow(errorRate, 1.0/HASH_COUNT)));
bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 70e407b2bf8..28073077761 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -37,6 +37,9 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ChunkedArrayList;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
/**
* An utility class for I/O related functionality.
*/
@@ -103,7 +106,8 @@ public class IOUtils {
*/
public static void copyBytes(InputStream in, OutputStream out, Configuration conf)
throws IOException {
- copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), true);
+ copyBytes(in, out, conf.getInt(
+ IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), true);
}
/**
@@ -117,7 +121,8 @@ public class IOUtils {
*/
public static void copyBytes(InputStream in, OutputStream out, Configuration conf, boolean close)
throws IOException {
- copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), close);
+ copyBytes(in, out, conf.getInt(
+ IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), close);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index ee76458b5d0..5ba506a178b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -38,6 +38,9 @@ import org.apache.hadoop.util.Options;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
+
/** A file-based map from keys to values.
*
* A map is a directory containing two files, the data
file,
@@ -395,7 +398,8 @@ public class MapFile {
Options.getOption(ComparatorOption.class, opts);
WritableComparator comparator =
comparatorOption == null ? null : comparatorOption.getValue();
- INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
+ INDEX_SKIP = conf.getInt(
+ IO_MAP_INDEX_SKIP_KEY, IO_MAP_INDEX_SKIP_DEFAULT);
open(dir, comparator, conf, opts);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 5d556d091e3..3a7e4d66581 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -51,6 +51,13 @@ import org.apache.hadoop.util.MergeSort;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.Time;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_KEY;
+
/**
* SequenceFile
s are flat files consisting of binary key/value
* pairs.
@@ -1513,7 +1520,9 @@ public class SequenceFile {
Option... options) throws IOException {
super(conf, options);
compressionBlockSize =
- conf.getInt("io.seqfile.compress.blocksize", 1000000);
+ conf.getInt(IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY,
+ IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT
+ );
keySerializer.close();
keySerializer.open(keyBuffer);
uncompressedValSerializer.close();
@@ -1637,7 +1646,7 @@ public class SequenceFile {
/** Get the configured buffer size */
private static int getBufferSize(Configuration conf) {
- return conf.getInt("io.file.buffer.size", 4096);
+ return conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
}
/** Reads key/value pairs from a sequence-format file. */
@@ -2655,7 +2664,8 @@ public class SequenceFile {
private void handleChecksumException(ChecksumException e)
throws IOException {
- if (this.conf.getBoolean("io.skip.checksum.errors", false)) {
+ if (this.conf.getBoolean(
+ IO_SKIP_CHECKSUM_ERRORS_KEY, IO_SKIP_CHECKSUM_ERRORS_DEFAULT)) {
LOG.warn("Bad checksum at "+getPosition()+". Skipping entries.");
sync(getPosition()+this.conf.getInt("io.bytes.per.checksum", 512));
} else {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index bf78e0c953d..08b4d4d5df9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -35,6 +35,9 @@ import org.apache.hadoop.io.compress.bzip2.CBZip2InputStream;
import org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream;
import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
/**
* This class provides output and input streams for bzip2 compression
* and decompression. It uses the native bzip2 library on the system
@@ -120,7 +123,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
Compressor compressor) throws IOException {
return Bzip2Factory.isNativeBzip2Loaded(conf) ?
new CompressorStream(out, compressor,
- conf.getInt("io.file.buffer.size", 4*1024)) :
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT)) :
new BZip2CompressionOutputStream(out);
}
@@ -174,7 +178,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
Decompressor decompressor) throws IOException {
return Bzip2Factory.isNativeBzip2Loaded(conf) ?
new DecompressorStream(in, decompressor,
- conf.getInt("io.file.buffer.size", 4*1024)) :
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT)) :
new BZip2CompressionInputStream(in);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 0e6f02cc9f4..31196cc7288 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -31,6 +31,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
@@ -60,7 +63,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
Compressor compressor)
throws IOException {
return new CompressorStream(out, compressor,
- conf.getInt("io.file.buffer.size", 4*1024));
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT));
}
@Override
@@ -85,7 +89,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
Decompressor decompressor)
throws IOException {
return new DecompressorStream(in, decompressor,
- conf.getInt("io.file.buffer.size", 4*1024));
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT));
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index da0c5ad8478..ce24793ff21 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.zlib.*;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
@@ -117,8 +119,8 @@ public class GzipCodec extends DefaultCodec {
throws IOException {
return (compressor != null) ?
new CompressorStream(out, compressor,
- conf.getInt("io.file.buffer.size",
- 4*1024)) :
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT)) :
createOutputStream(out);
}
@@ -151,7 +153,8 @@ public class GzipCodec extends DefaultCodec {
decompressor = createDecompressor(); // always succeeds (or throws)
}
return new DecompressorStream(in, decompressor,
- conf.getInt("io.file.buffer.size", 4*1024));
+ conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT));
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index da3fe341530..f7ec7ac8353 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -36,6 +36,10 @@ import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.ReflectionUtils;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
/**
* Compression related stuff.
*/
@@ -124,7 +128,8 @@ final class Compression {
} else {
bis1 = downStream;
}
- conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+ conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+ IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
CompressionInputStream cis =
codec.createInputStream(bis1, decompressor);
BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
@@ -146,7 +151,8 @@ final class Compression {
} else {
bos1 = downStream;
}
- conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+ conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+ IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
@@ -175,7 +181,7 @@ final class Compression {
int downStreamBufferSize) throws IOException {
// Set the internal buffer size to read from down stream.
if (downStreamBufferSize > 0) {
- codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
+ codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, downStreamBufferSize);
}
CompressionInputStream cis =
codec.createInputStream(downStream, decompressor);
@@ -193,7 +199,7 @@ final class Compression {
} else {
bos1 = downStream;
}
- codec.getConf().setInt("io.file.buffer.size", 32 * 1024);
+ codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, 32 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
index 6b84f9d2cf3..ba9e815f218 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
+
/**
* Specialized SocketFactory to create sockets with a SOCKS proxy
*/
@@ -133,7 +135,7 @@ public class SocksSocketFactory extends SocketFactory implements
@Override
public void setConf(Configuration conf) {
this.conf = conf;
- String proxyStr = conf.get("hadoop.socks.server");
+ String proxyStr = conf.get(HADOOP_SOCKS_SERVER_KEY);
if ((proxyStr != null) && (proxyStr.length() > 0)) {
setProxy(proxyStr);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
index e20a7c1084e..a1cf7099ada 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
/**
* A class that provides a line reader from an input stream.
* Depending on the constructor used, lines will either be terminated by:
@@ -89,7 +91,7 @@ public class LineReader implements Closeable {
* @throws IOException
*/
public LineReader(InputStream in, Configuration conf) throws IOException {
- this(in, conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE));
+ this(in, conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE));
}
/**
@@ -136,7 +138,7 @@ public class LineReader implements Closeable {
public LineReader(InputStream in, Configuration conf,
byte[] recordDelimiterBytes) throws IOException {
this.in = in;
- this.bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
+ this.bufferSize = conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE);
this.buffer = new byte[this.bufferSize];
this.recordDelimiterBytes = recordDelimiterBytes;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
index 9f0ea163136..50f6091d41a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_KEY;
+
/**
* This class represents a common API for hashing functions.
*/
@@ -59,7 +62,8 @@ public abstract class Hash {
* @return one of the predefined constants
*/
public static int getHashType(Configuration conf) {
- String name = conf.get("hadoop.util.hash.type", "murmur");
+ String name = conf.get(HADOOP_UTIL_HASH_TYPE_KEY,
+ HADOOP_UTIL_HASH_TYPE_DEFAULT);
return parseHashType(name);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index b8ca6b28f94..0a1ca496e08 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -47,6 +47,9 @@ import java.util.Properties;
import java.util.Set;
import java.util.UUID;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
/**
* Utilities used across test cases.
*/
@@ -55,8 +58,6 @@ public class ContractTestUtils extends Assert {
private static final Logger LOG =
LoggerFactory.getLogger(ContractTestUtils.class);
- public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
-
// For scale testing, we can repeatedly write small chunk data to generate
// a large file.
public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
@@ -150,8 +151,8 @@ public class ContractTestUtils extends Assert {
FSDataOutputStream out = fs.create(path,
overwrite,
fs.getConf()
- .getInt(IO_FILE_BUFFER_SIZE,
- 4096),
+ .getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT),
(short) 1,
buffersize);
out.write(src, 0, len);