merge HDFS-2349, HDFS-2729, HDFS-2726, HDFS-554, HDFS-1314, HADOOP-7910 to branch-0.23.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1228562 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Harsh J 2012-01-07 03:32:57 +00:00
parent 776be1de32
commit e508495147
18 changed files with 149 additions and 25 deletions

View File

@ -11,6 +11,8 @@ Release 0.23.1 - Unreleased
HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd)
HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
IMPROVEMENTS
HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite)

View File

@ -628,8 +628,11 @@
<tr>
<td>conf/hdfs-site.xml</td>
<td>dfs.blocksize</td>
<td>134217728</td>
<td>HDFS blocksize of 128MB for large file-systems.</td>
<td>128m</td>
<td>
HDFS blocksize of 128 MB for large file-systems. Sizes can be provided
in size-prefixed values (10k, 128m, 1g, etc.) or simply in bytes (134217728 for 128 MB, etc.).
</td>
</tr>
<tr>
<td>conf/hdfs-site.xml</td>

View File

@ -737,6 +737,27 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
return Long.parseLong(valueString);
}
/**
* Get the value of the <code>name</code> property as a <code>long</code> or
* human readable format. If no such property exists, the provided default
* value is returned, or if the specified value is not a valid
* <code>long</code> or human readable format, then an error is thrown. You
* can use the following suffix (case insensitive): k(kilo), m(mega), g(giga),
* t(tera), p(peta), e(exa)
*
* @param name property name.
* @param defaultValue default value.
* @throws NumberFormatException when the value is invalid
* @return property value as a <code>long</code>,
* or <code>defaultValue</code>.
*/
public long getLongBytes(String name, long defaultValue) {
String valueString = getTrimmed(name);
if (valueString == null)
return defaultValue;
return StringUtils.TraditionalBinaryPrefix.string2long(valueString);
}
private String getHexDigits(String value) {
boolean negative = false;
String str = value;

View File

@ -661,7 +661,14 @@ public class StringUtils {
if (Character.isDigit(lastchar))
return Long.parseLong(s);
else {
long prefix = TraditionalBinaryPrefix.valueOf(lastchar).value;
long prefix;
try {
prefix = TraditionalBinaryPrefix.valueOf(lastchar).value;
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid size prefix '" + lastchar
+ "' in '" + s
+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)");
}
long num = Long.parseLong(s.substring(0, lastpos));
if (num > (Long.MAX_VALUE/prefix) || num < (Long.MIN_VALUE/prefix)) {
throw new IllegalArgumentException(s + " does not fit in a Long");

View File

@ -405,12 +405,16 @@ public class TestConfiguration extends TestCase {
conf.addResource(fileResource);
assertEquals(20, conf.getInt("test.int1", 0));
assertEquals(20, conf.getLong("test.int1", 0));
assertEquals(20, conf.getLongBytes("test.int1", 0));
assertEquals(20, conf.getInt("test.int2", 0));
assertEquals(20, conf.getLong("test.int2", 0));
assertEquals(20, conf.getLongBytes("test.int2", 0));
assertEquals(-20, conf.getInt("test.int3", 0));
assertEquals(-20, conf.getLong("test.int3", 0));
assertEquals(-20, conf.getLongBytes("test.int3", 0));
assertEquals(-20, conf.getInt("test.int4", 0));
assertEquals(-20, conf.getLong("test.int4", 0));
assertEquals(-20, conf.getLongBytes("test.int4", 0));
try {
conf.getInt("test.int5", 0);
fail("Property had invalid int value, but was read successfully.");
@ -419,6 +423,26 @@ public class TestConfiguration extends TestCase {
}
}
public void testHumanReadableValues() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.humanReadableValue1", "1m");
appendProperty("test.humanReadableValue2", "1M");
appendProperty("test.humanReadableValue5", "1MBCDE");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(1048576, conf.getLongBytes("test.humanReadableValue1", 0));
assertEquals(1048576, conf.getLongBytes("test.humanReadableValue2", 0));
try {
conf.getLongBytes("test.humanReadableValue5", 0);
fail("Property had invalid human readable value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testBooleanValues() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();

View File

@ -143,8 +143,62 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
}
assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k"));
assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g"));
assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k"));
assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k"));
assertEquals(1259520L,
StringUtils.TraditionalBinaryPrefix.string2long("1230K"));
assertEquals(-1259520L,
StringUtils.TraditionalBinaryPrefix.string2long("-1230K"));
assertEquals(104857600L,
StringUtils.TraditionalBinaryPrefix.string2long("100m"));
assertEquals(-104857600L,
StringUtils.TraditionalBinaryPrefix.string2long("-100M"));
assertEquals(956703965184L,
StringUtils.TraditionalBinaryPrefix.string2long("891g"));
assertEquals(-956703965184L,
StringUtils.TraditionalBinaryPrefix.string2long("-891G"));
assertEquals(501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("456t"));
assertEquals(-501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
assertEquals(11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("10p"));
assertEquals(-11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
assertEquals(1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("1e"));
assertEquals(-1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
String tooLargeNumStr = "10e";
try {
StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr);
fail("Test passed for a number " + tooLargeNumStr + " too large");
} catch (IllegalArgumentException e) {
assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
}
String tooSmallNumStr = "-10e";
try {
StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr);
fail("Test passed for a number " + tooSmallNumStr + " too small");
} catch (IllegalArgumentException e) {
assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
}
String invalidFormatNumStr = "10kb";
char invalidPrefix = 'b';
try {
StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr);
fail("Test passed for a number " + invalidFormatNumStr
+ " has invalid format");
} catch (IllegalArgumentException e) {
assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"
+ invalidFormatNumStr
+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",
e.getMessage());
}
}
@Test

View File

@ -68,6 +68,19 @@ Release 0.23.1 - UNRELEASED
HDFS-2710. Add HDFS tests related to HADOOP-7933. (Siddarth Seth via
suresh)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -180,7 +180,7 @@ public class DFSClient implements java.io.Closeable {
/** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);

View File

@ -1056,7 +1056,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
} catch (IOException ie) {
DFSClient.LOG.info("Exception in createBlockOutputStream " + ie);
DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
// find the datanode that matches
if (firstBadLink.length() != 0) {

View File

@ -141,9 +141,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* happen only when replication is manually increased by the user. */
Object[] old = triplets;
triplets = new Object[(last+num)*3];
for(int i=0; i < last*3; i++) {
triplets[i] = old[i];
}
System.arraycopy(old, 0, triplets, 0, last*3);
return last;
}

View File

@ -1524,7 +1524,7 @@ public class BlockManager {
// Ignore replicas already scheduled to be removed from the DN
if(invalidateBlocks.contains(dn.getStorageID(), block)) {
assert storedBlock.findDatanode(dn) < 0 : "Block " + block
+ " in recentInvalidatesSet should not appear in DN " + dn;
+ " in invalidated blocks set should not appear in DN " + dn;
return storedBlock;
}
@ -1752,7 +1752,7 @@ public class BlockManager {
* Invalidate corrupt replicas.
* <p>
* This will remove the replicas from the block's location list,
* add them to {@link #recentInvalidateSets} so that they could be further
* add them to {@link #invalidateBlocks} so that they could be further
* deleted from the respective data-nodes,
* and remove the block from corruptReplicasMap.
* <p>
@ -1981,7 +1981,7 @@ public class BlockManager {
//
addToInvalidates(b, cur);
NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
+"("+cur.getName()+", "+b+") is added to recentInvalidateSets");
+"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
}
}
@ -2375,7 +2375,7 @@ public class BlockManager {
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #recentInvalidateSets}.
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/

View File

@ -439,11 +439,8 @@ public class DataNode extends Configured
private static String getHostName(Configuration config)
throws UnknownHostException {
String name = null;
// use configured nameserver & interface to get local hostname
if (config.get(DFS_DATANODE_HOST_NAME_KEY) != null) {
name = config.get(DFS_DATANODE_HOST_NAME_KEY);
}
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
name = DNS
.getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
@ -1298,7 +1295,7 @@ public class DataNode extends Configured
nn.reportBadBlocks(new LocatedBlock[]{
new LocatedBlock(block, new DatanodeInfo[] {
new DatanodeInfo(bpReg)})});
LOG.info("Can't replicate block " + block
LOG.warn("Can't replicate block " + block
+ " because on-disk length " + onDiskLength
+ " is shorter than NameNode recorded length " + block.getNumBytes());
return;

View File

@ -119,7 +119,7 @@ class DataXceiverServer implements Runnable {
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
//set up parameter for cluster balancing

View File

@ -483,7 +483,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
this.serverDefaults = new FsServerDefaults(
conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
(short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),

View File

@ -529,7 +529,7 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public long getDefaultBlockSize() {
return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
}

View File

@ -55,6 +55,6 @@ public class BlockSizeParam extends LongParam {
/** @return the value or, if it is null, return the default from conf. */
public long getValue(final Configuration conf) {
return getValue() != null? getValue()
: conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
: conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
}
}

View File

@ -329,7 +329,12 @@ creations/deletions), or "all".</description>
<property>
<name>dfs.blocksize</name>
<value>67108864</value>
<description>The default block size for new files.</description>
<description>
The default block size for new files, in bytes.
You can use the following suffix (case insensitive):
k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
Or provide complete size in bytes (such as 134217728 for 128 MB).
</description>
</property>
<property>

View File

@ -51,7 +51,7 @@ public class TestParam {
final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
Assert.assertEquals(
conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
p.getValue(conf));