HBASE-10855 Enable hfilev3 by default
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1583819 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
211ae1d150
commit
556f7fa07e
|
@ -666,10 +666,11 @@ possible configurations would overwhelm and obscure the important.
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hfile.format.version</name>
|
<name>hfile.format.version</name>
|
||||||
<value>2</value>
|
<value>3</value>
|
||||||
<description>The HFile format version to use for new files. Set this to 1 to test
|
<description>The HFile format version to use for new files.
|
||||||
backwards-compatibility. The default value of this option should be
|
Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
|
||||||
consistent with FixedFileTrailer.MAX_VERSION.</description>
|
Distributed Log Replay requires that tags are enabled.
|
||||||
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hfile.block.bloom.cacheonwrite</name>
|
<name>hfile.block.bloom.cacheonwrite</name>
|
||||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Data structure to describe the distribution of HDFS blocks amount hosts.
|
* Data structure to describe the distribution of HDFS blocks among hosts.
|
||||||
*
|
*
|
||||||
* Adding erroneous data will be ignored silently.
|
* Adding erroneous data will be ignored silently.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -3663,8 +3663,13 @@ public class TestHRegion {
|
||||||
@Test
|
@Test
|
||||||
public void testgetHDFSBlocksDistribution() throws Exception {
|
public void testgetHDFSBlocksDistribution() throws Exception {
|
||||||
HBaseTestingUtility htu = new HBaseTestingUtility();
|
HBaseTestingUtility htu = new HBaseTestingUtility();
|
||||||
final int DEFAULT_BLOCK_SIZE = 1024;
|
// Why do we set the block size in this test? If we set it smaller than the kvs, then we'll
|
||||||
htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
// break up the file in to more pieces that can be distributed across the three nodes and we
|
||||||
|
// won't be able to have the condition this test asserts; that at least one node has
|
||||||
|
// a copy of all replicas -- if small block size, then blocks are spread evenly across the
|
||||||
|
// the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
|
||||||
|
// final int DEFAULT_BLOCK_SIZE = 1024;
|
||||||
|
// htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
||||||
htu.getConfiguration().setInt("dfs.replication", 2);
|
htu.getConfiguration().setInt("dfs.replication", 2);
|
||||||
|
|
||||||
// set up a cluster with 3 nodes
|
// set up a cluster with 3 nodes
|
||||||
|
@ -3691,15 +3696,25 @@ public class TestHRegion {
|
||||||
firstRegion.flushcache();
|
firstRegion.flushcache();
|
||||||
HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
|
HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
|
||||||
|
|
||||||
// given the default replication factor is 2 and we have 2 HFiles,
|
// Given the default replication factor is 2 and we have 2 HFiles,
|
||||||
// we will have total of 4 replica of blocks on 3 datanodes; thus there
|
// we will have total of 4 replica of blocks on 3 datanodes; thus there
|
||||||
// must be at least one host that have replica for 2 HFiles. That host's
|
// must be at least one host that have replica for 2 HFiles. That host's
|
||||||
// weight will be equal to the unique block weight.
|
// weight will be equal to the unique block weight.
|
||||||
long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
|
long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (String host: blocksDistribution1.getTopHosts()) {
|
||||||
|
if (sb.length() > 0) sb.append(", ");
|
||||||
|
sb.append(host);
|
||||||
|
sb.append("=");
|
||||||
|
sb.append(blocksDistribution1.getWeight(host));
|
||||||
|
}
|
||||||
|
|
||||||
String topHost = blocksDistribution1.getTopHosts().get(0);
|
String topHost = blocksDistribution1.getTopHosts().get(0);
|
||||||
long topHostWeight = blocksDistribution1.getWeight(topHost);
|
long topHostWeight = blocksDistribution1.getWeight(topHost);
|
||||||
assertTrue(uniqueBlocksWeight1 == topHostWeight);
|
String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
|
||||||
|
topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
|
||||||
|
LOG.info(msg);
|
||||||
|
assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
|
||||||
|
|
||||||
// use the static method to compute the value, it should be the same.
|
// use the static method to compute the value, it should be the same.
|
||||||
// static method is used by load balancer or other components
|
// static method is used by load balancer or other components
|
||||||
|
|
Loading…
Reference in New Issue