diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 06495bb7c66..0fd8e919fee 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -11,6 +11,8 @@ Release 0.23.1 - Unreleased HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd) + HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh) + IMPROVEMENTS HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite) diff --git a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml index 4fb057ff767..1a2de1dd001 100644 --- a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml +++ b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml @@ -628,8 +628,11 @@ conf/hdfs-site.xml dfs.blocksize - 134217728 - HDFS blocksize of 128MB for large file-systems. + 128m + + HDFS blocksize of 128 MB for large file-systems. Sizes can be provided + in size-prefixed values (10k, 128m, 1g, etc.) or simply in bytes (134217728 for 128 MB, etc.). + conf/hdfs-site.xml diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 4fb1d190663..4514b807f14 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -737,6 +737,27 @@ public class Configuration implements Iterable>, return Long.parseLong(valueString); } + /** + * Get the value of the name property as a long or + * human readable format. If no such property exists, the provided default + * value is returned, or if the specified value is not a valid + * long or human readable format, then an error is thrown. You + * can use the following suffix (case insensitive): k(kilo), m(mega), g(giga), + * t(tera), p(peta), e(exa) + * + * @param name property name. + * @param defaultValue default value. + * @throws NumberFormatException when the value is invalid + * @return property value as a long, + * or defaultValue. + */ + public long getLongBytes(String name, long defaultValue) { + String valueString = getTrimmed(name); + if (valueString == null) + return defaultValue; + return StringUtils.TraditionalBinaryPrefix.string2long(valueString); + } + private String getHexDigits(String value) { boolean negative = false; String str = value; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 013443d3abb..4c32ffc0ba5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -661,7 +661,14 @@ public class StringUtils { if (Character.isDigit(lastchar)) return Long.parseLong(s); else { - long prefix = TraditionalBinaryPrefix.valueOf(lastchar).value; + long prefix; + try { + prefix = TraditionalBinaryPrefix.valueOf(lastchar).value; + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid size prefix '" + lastchar + + "' in '" + s + + "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)"); + } long num = Long.parseLong(s.substring(0, lastpos)); if (num > (Long.MAX_VALUE/prefix) || num < (Long.MIN_VALUE/prefix)) { throw new IllegalArgumentException(s + " does not fit in a Long"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index f9f14fb8480..021510c0bd1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -405,12 +405,16 @@ public class TestConfiguration extends TestCase { conf.addResource(fileResource); assertEquals(20, conf.getInt("test.int1", 0)); assertEquals(20, conf.getLong("test.int1", 0)); + assertEquals(20, conf.getLongBytes("test.int1", 0)); assertEquals(20, conf.getInt("test.int2", 0)); assertEquals(20, conf.getLong("test.int2", 0)); + assertEquals(20, conf.getLongBytes("test.int2", 0)); assertEquals(-20, conf.getInt("test.int3", 0)); assertEquals(-20, conf.getLong("test.int3", 0)); + assertEquals(-20, conf.getLongBytes("test.int3", 0)); assertEquals(-20, conf.getInt("test.int4", 0)); assertEquals(-20, conf.getLong("test.int4", 0)); + assertEquals(-20, conf.getLongBytes("test.int4", 0)); try { conf.getInt("test.int5", 0); fail("Property had invalid int value, but was read successfully."); @@ -419,6 +423,26 @@ public class TestConfiguration extends TestCase { } } + public void testHumanReadableValues() throws IOException { + out = new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.humanReadableValue1", "1m"); + appendProperty("test.humanReadableValue2", "1M"); + appendProperty("test.humanReadableValue5", "1MBCDE"); + + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + assertEquals(1048576, conf.getLongBytes("test.humanReadableValue1", 0)); + assertEquals(1048576, conf.getLongBytes("test.humanReadableValue2", 0)); + try { + conf.getLongBytes("test.humanReadableValue5", 0); + fail("Property had invalid human readable value, but was read successfully."); + } catch (NumberFormatException e) { + // pass + } + } + public void testBooleanValues() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java index 7135a463de9..e0dddb2ce40 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java @@ -143,8 +143,62 @@ public class TestStringUtils extends UnitTestcaseTimeLimit { } assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0")); - assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k")); - assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g")); + assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k")); + assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k")); + assertEquals(1259520L, + StringUtils.TraditionalBinaryPrefix.string2long("1230K")); + assertEquals(-1259520L, + StringUtils.TraditionalBinaryPrefix.string2long("-1230K")); + assertEquals(104857600L, + StringUtils.TraditionalBinaryPrefix.string2long("100m")); + assertEquals(-104857600L, + StringUtils.TraditionalBinaryPrefix.string2long("-100M")); + assertEquals(956703965184L, + StringUtils.TraditionalBinaryPrefix.string2long("891g")); + assertEquals(-956703965184L, + StringUtils.TraditionalBinaryPrefix.string2long("-891G")); + assertEquals(501377302265856L, + StringUtils.TraditionalBinaryPrefix.string2long("456t")); + assertEquals(-501377302265856L, + StringUtils.TraditionalBinaryPrefix.string2long("-456T")); + assertEquals(11258999068426240L, + StringUtils.TraditionalBinaryPrefix.string2long("10p")); + assertEquals(-11258999068426240L, + StringUtils.TraditionalBinaryPrefix.string2long("-10P")); + assertEquals(1152921504606846976L, + StringUtils.TraditionalBinaryPrefix.string2long("1e")); + assertEquals(-1152921504606846976L, + StringUtils.TraditionalBinaryPrefix.string2long("-1E")); + + String tooLargeNumStr = "10e"; + try { + StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr); + fail("Test passed for a number " + tooLargeNumStr + " too large"); + } catch (IllegalArgumentException e) { + assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage()); + } + + String tooSmallNumStr = "-10e"; + try { + StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr); + fail("Test passed for a number " + tooSmallNumStr + " too small"); + } catch (IllegalArgumentException e) { + assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage()); + } + + String invalidFormatNumStr = "10kb"; + char invalidPrefix = 'b'; + try { + StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr); + fail("Test passed for a number " + invalidFormatNumStr + + " has invalid format"); + } catch (IllegalArgumentException e) { + assertEquals("Invalid size prefix '" + invalidPrefix + "' in '" + + invalidFormatNumStr + + "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)", + e.getMessage()); + } + } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0095f4511de..0828db35a21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -68,6 +68,19 @@ Release 0.23.1 - UNRELEASED HDFS-2710. Add HDFS tests related to HADOOP-7933. (Siddarth Seth via suresh) + HDFS-2349. Corruption detected during block transfers between DNs + should log a WARN instead of INFO. (harsh) + + HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh) + + HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh) + + HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh) + + HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh) + + HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh) + OPTIMIZATIONS HDFS-2130. Switch default checksum to CRC32C. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 19d696ac457..53095b2ef05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -180,7 +180,7 @@ public class DFSClient implements java.io.Closeable { /** dfs.write.packet.size is an internal config variable */ writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); - defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY, + defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); defaultReplication = (short) conf.getInt( DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 8422ecca007..f8b9ed45b69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -1056,7 +1056,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable { } catch (IOException ie) { - DFSClient.LOG.info("Exception in createBlockOutputStream " + ie); + DFSClient.LOG.info("Exception in createBlockOutputStream", ie); // find the datanode that matches if (firstBadLink.length() != 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 293d5c59699..b769ae30327 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -141,9 +141,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * happen only when replication is manually increased by the user. */ Object[] old = triplets; triplets = new Object[(last+num)*3]; - for(int i=0; i < last*3; i++) { - triplets[i] = old[i]; - } + System.arraycopy(old, 0, triplets, 0, last*3); return last; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 84a866f3d05..b81a69f571f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1524,7 +1524,7 @@ public class BlockManager { // Ignore replicas already scheduled to be removed from the DN if(invalidateBlocks.contains(dn.getStorageID(), block)) { assert storedBlock.findDatanode(dn) < 0 : "Block " + block - + " in recentInvalidatesSet should not appear in DN " + dn; + + " in invalidated blocks set should not appear in DN " + dn; return storedBlock; } @@ -1752,7 +1752,7 @@ public class BlockManager { * Invalidate corrupt replicas. *

* This will remove the replicas from the block's location list, - * add them to {@link #recentInvalidateSets} so that they could be further + * add them to {@link #invalidateBlocks} so that they could be further * deleted from the respective data-nodes, * and remove the block from corruptReplicasMap. *

@@ -1981,7 +1981,7 @@ public class BlockManager { // addToInvalidates(b, cur); NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: " - +"("+cur.getName()+", "+b+") is added to recentInvalidateSets"); + +"("+cur.getName()+", "+b+") is added to invalidated blocks set."); } } @@ -2375,7 +2375,7 @@ public class BlockManager { /** * Get blocks to invalidate for nodeId - * in {@link #recentInvalidateSets}. + * in {@link #invalidateBlocks}. * * @return number of blocks scheduled for removal during this iteration. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 9c7a6119876..007cabace48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -439,11 +439,8 @@ public class DataNode extends Configured private static String getHostName(Configuration config) throws UnknownHostException { - String name = null; // use configured nameserver & interface to get local hostname - if (config.get(DFS_DATANODE_HOST_NAME_KEY) != null) { - name = config.get(DFS_DATANODE_HOST_NAME_KEY); - } + String name = config.get(DFS_DATANODE_HOST_NAME_KEY); if (name == null) { name = DNS .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY, @@ -1298,7 +1295,7 @@ public class DataNode extends Configured nn.reportBadBlocks(new LocatedBlock[]{ new LocatedBlock(block, new DatanodeInfo[] { new DatanodeInfo(bpReg)})}); - LOG.info("Can't replicate block " + block + LOG.warn("Can't replicate block " + block + " because on-disk length " + onDiskLength + " is shorter than NameNode recorded length " + block.getNumBytes()); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index eed58ecad4c..8c932d71ccc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -119,7 +119,7 @@ class DataXceiverServer implements Runnable { conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT); - this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, + this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); //set up parameter for cluster balancing diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7e9d9b9f7cb..49a1751daac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -483,7 +483,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission)); this.serverDefaults = new FsServerDefaults( - conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), + conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT), conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT), (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 01006d1aeef..3a802065167 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -529,7 +529,7 @@ public class WebHdfsFileSystem extends FileSystem @Override public long getDefaultBlockSize() { - return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, + return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java index 4076746e34e..b6d82c2c37a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java @@ -55,6 +55,6 @@ public class BlockSizeParam extends LongParam { /** @return the value or, if it is null, return the default from conf. */ public long getValue(final Configuration conf) { return getValue() != null? getValue() - : conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); + : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 394a2319c1e..384674fe930 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -329,7 +329,12 @@ creations/deletions), or "all". dfs.blocksize 67108864 - The default block size for new files. + + The default block size for new files, in bytes. + You can use the following suffix (case insensitive): + k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.), + Or provide complete size in bytes (such as 134217728 for 128 MB). + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index 9834cb74a45..646edd42825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -51,7 +51,7 @@ public class TestParam { final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT); Assert.assertEquals(null, p.getValue()); Assert.assertEquals( - conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, + conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT), p.getValue(conf));