HBASE-10855 Enable hfilev3 by default

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1583819 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2014-04-01 23:00:03 +00:00
parent 211ae1d150
commit 556f7fa07e
3 changed files with 25 additions and 9 deletions

View File

@ -666,10 +666,11 @@ possible configurations would overwhelm and obscure the important.
</property>
<property>
<name>hfile.format.version</name>
<value>2</value>
<description>The HFile format version to use for new files. Set this to 1 to test
backwards-compatibility. The default value of this option should be
consistent with FixedFileTrailer.MAX_VERSION.</description>
<value>3</value>
<description>The HFile format version to use for new files.
Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
Distributed Log Replay requires that tags are enabled.
</description>
</property>
<property>
<name>hfile.block.bloom.cacheonwrite</name>

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
/**
* Data structure to describe the distribution of HDFS blocks amount hosts.
* Data structure to describe the distribution of HDFS blocks among hosts.
*
* Adding erroneous data will be ignored silently.
*/

View File

@ -3663,8 +3663,13 @@ public class TestHRegion {
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
final int DEFAULT_BLOCK_SIZE = 1024;
htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
// Why do we set the block size in this test? If we set it smaller than the kvs, then we'll
// break up the file in to more pieces that can be distributed across the three nodes and we
// won't be able to have the condition this test asserts; that at least one node has
// a copy of all replicas -- if small block size, then blocks are spread evenly across the
// the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
// final int DEFAULT_BLOCK_SIZE = 1024;
// htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setInt("dfs.replication", 2);
// set up a cluster with 3 nodes
@ -3691,15 +3696,25 @@ public class TestHRegion {
firstRegion.flushcache();
HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
// given the default replication factor is 2 and we have 2 HFiles,
// Given the default replication factor is 2 and we have 2 HFiles,
// we will have total of 4 replica of blocks on 3 datanodes; thus there
// must be at least one host that have replica for 2 HFiles. That host's
// weight will be equal to the unique block weight.
long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
StringBuilder sb = new StringBuilder();
for (String host: blocksDistribution1.getTopHosts()) {
if (sb.length() > 0) sb.append(", ");
sb.append(host);
sb.append("=");
sb.append(blocksDistribution1.getWeight(host));
}
String topHost = blocksDistribution1.getTopHosts().get(0);
long topHostWeight = blocksDistribution1.getWeight(topHost);
assertTrue(uniqueBlocksWeight1 == topHostWeight);
String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
LOG.info(msg);
assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
// use the static method to compute the value, it should be the same.
// static method is used by load balancer or other components