HBASE-8732 HFileBlockDefaultEncodingContext isn't thread-safe but is used by all readers,

breaks column encoding
HBASE-8961  Make TestReplicationQueueFailover less integration-y by splitting it up


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1504300 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2013-07-17 22:05:56 +00:00
parent 126526ee30
commit d6605b710c
7 changed files with 105 additions and 28 deletions

View File

@ -41,7 +41,7 @@ import com.google.common.base.Preconditions;
public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
private final DataBlockEncoding onDisk; private final DataBlockEncoding onDisk;
private final DataBlockEncoding inCache; private final DataBlockEncoding inCache;
private final HFileBlockEncodingContext inCacheEncodeCtx; private final byte[] dummyHeader;
public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) { public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) {
this(encoding, encoding); this(encoding, encoding);
@ -75,16 +75,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
onDisk : DataBlockEncoding.NONE; onDisk : DataBlockEncoding.NONE;
this.inCache = inCache != null ? this.inCache = inCache != null ?
inCache : DataBlockEncoding.NONE; inCache : DataBlockEncoding.NONE;
if (inCache != DataBlockEncoding.NONE) { this.dummyHeader = dummyHeader;
inCacheEncodeCtx =
this.inCache.getEncoder().newDataBlockEncodingContext(
Algorithm.NONE, this.inCache, dummyHeader);
} else {
// create a default encoding context
inCacheEncodeCtx =
new HFileBlockDefaultEncodingContext(Algorithm.NONE,
this.inCache, dummyHeader);
}
Preconditions.checkArgument(onDisk == DataBlockEncoding.NONE || Preconditions.checkArgument(onDisk == DataBlockEncoding.NONE ||
onDisk == inCache, "on-disk encoding (" + onDisk + ") must be " + onDisk == inCache, "on-disk encoding (" + onDisk + ") must be " +
@ -166,7 +157,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
} }
// Encode the unencoded block with the in-cache encoding. // Encode the unencoded block with the in-cache encoding.
return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS(), return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS(),
inCacheEncodeCtx); createInCacheEncodingContext());
} }
if (block.getBlockType() == BlockType.ENCODED_DATA) { if (block.getBlockType() == BlockType.ENCODED_DATA) {
@ -256,6 +247,22 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
return encodedBlock; return encodedBlock;
} }
/**
* Returns a new encoding context given the inCache encoding scheme provided in the constructor.
* This used to be kept around but HFileBlockDefaultEncodingContext isn't thread-safe.
* See HBASE-8732
* @return a new in cache encoding context
*/
private HFileBlockEncodingContext createInCacheEncodingContext() {
return (inCache != DataBlockEncoding.NONE) ?
this.inCache.getEncoder().newDataBlockEncodingContext(
Algorithm.NONE, this.inCache, dummyHeader)
:
// create a default encoding context
new HFileBlockDefaultEncodingContext(Algorithm.NONE,
this.inCache, dummyHeader);
}
@Override @Override
public String toString() { public String toString() {
return getClass().getSimpleName() + "(onDisk=" + onDisk + ", inCache=" + return getClass().getSimpleName() + "(onDisk=" + onDisk + ", inCache=" +

View File

@ -88,6 +88,7 @@ public class TestReplicationBase {
conf1.setBoolean("dfs.support.append", true); conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility1 = new HBaseTestingUtility(conf1); utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster(); utility1.startMiniZKCluster();
@ -105,6 +106,7 @@ public class TestReplicationBase {
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2); utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
@ -127,7 +129,7 @@ public class TestReplicationBase {
HBaseAdmin admin1 = new HBaseAdmin(conf1); HBaseAdmin admin1 = new HBaseAdmin(conf1);
HBaseAdmin admin2 = new HBaseAdmin(conf2); HBaseAdmin admin2 = new HBaseAdmin(conf2);
admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
admin2.createTable(table); admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
htable1 = new HTable(conf1, tableName); htable1 = new HTable(conf1, tableName);
htable1.setWriteBufferSize(1024); htable1.setWriteBufferSize(1024);
htable2 = new HTable(conf2, tableName); htable2 = new HTable(conf2, tableName);

View File

@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication;
import org.apache.hadoop.hbase.LargeTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Runs the TestReplicationKillRS test and selects the RS to kill in the master cluster
* Do not add other tests in this class.
*/
@Category(LargeTests.class)
public class TestReplicationKillMasterRS extends TestReplicationKillRS {
@Test(timeout=300000)
public void killOneMasterRS() throws Exception {
loadTableAndKillRS(utility1);
}
}

View File

@ -24,10 +24,11 @@ import org.junit.BeforeClass;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
/** /**
* Run the same test as TestReplication but with HLog compression enabled * Run the same test as TestReplicationKillMasterRS but with HLog compression enabled
* Do not add other tests in this class.
*/ */
@Category(LargeTests.class) @Category(LargeTests.class)
public class TestReplicationQueueFailoverCompressed extends TestReplicationQueueFailover { public class TestReplicationKillMasterRSCompressed extends TestReplicationKillMasterRS {
/** /**
* @throws java.lang.Exception * @throws java.lang.Exception

View File

@ -33,36 +33,31 @@ import org.junit.experimental.categories.Category;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@Category(LargeTests.class) @Category(LargeTests.class)
public class TestReplicationQueueFailover extends TestReplicationBase { public class TestReplicationKillRS extends TestReplicationBase {
private static final Log LOG = LogFactory.getLog(TestReplicationQueueFailover.class); private static final Log LOG = LogFactory.getLog(TestReplicationKillRS.class);
/** /**
* Load up multiple tables over 2 region servers and kill a source during * Load up 1 tables over 2 region servers and kill a source during
* the upload. The failover happens internally. * the upload. The failover happens internally.
* *
* WARNING this test sometimes fails because of HBASE-3515 * WARNING this test sometimes fails because of HBASE-3515
* *
* @throws Exception * @throws Exception
*/ */
@Test(timeout=300000) public void loadTableAndKillRS(HBaseTestingUtility util) throws Exception {
public void queueFailover() throws Exception {
// killing the RS with .META. can result into failed puts until we solve // killing the RS with .META. can result into failed puts until we solve
// IO fencing // IO fencing
int rsToKill1 = int rsToKill1 =
utility1.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0; util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
int rsToKill2 =
utility2.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
// Takes about 20 secs to run the full loading, kill around the middle // Takes about 20 secs to run the full loading, kill around the middle
Thread killer1 = killARegionServer(utility1, 7500, rsToKill1); Thread killer = killARegionServer(util, 5000, rsToKill1);
Thread killer2 = killARegionServer(utility2, 10000, rsToKill2);
LOG.info("Start loading table"); LOG.info("Start loading table");
int initialCount = utility1.loadTable(htable1, famName); int initialCount = utility1.loadTable(htable1, famName);
LOG.info("Done loading table"); LOG.info("Done loading table");
killer1.join(5000); killer.join(5000);
killer2.join(5000);
LOG.info("Done waiting for threads"); LOG.info("Done waiting for threads");
Result[] res; Result[] res;

View File

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication;
import org.apache.hadoop.hbase.LargeTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Runs the TestReplicationKillRS test and selects the RS to kill in the slave cluster
* Do not add other tests in this class.
*/
@Category(LargeTests.class)
public class TestReplicationKillSlaveRS extends TestReplicationKillRS {
@Test(timeout=300000)
public void killOneSlaveRS() throws Exception {
loadTableAndKillRS(utility2);
}
}

View File

@ -240,6 +240,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
assertEquals(NB_ROWS_IN_BATCH, res1.length); assertEquals(NB_ROWS_IN_BATCH, res1.length);
for (int i = 0; i < NB_RETRIES; i++) { for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
if (i==NB_RETRIES-1) { if (i==NB_RETRIES-1) {
fail("Waited too much time for normal batch replication"); fail("Waited too much time for normal batch replication");
} }
@ -378,10 +379,10 @@ public class TestReplicationSmallTests extends TestReplicationBase {
assertEquals(NB_ROWS_IN_BIG_BATCH, res.length); assertEquals(NB_ROWS_IN_BIG_BATCH, res.length);
scan = new Scan();
long start = System.currentTimeMillis(); long start = System.currentTimeMillis();
for (int i = 0; i < NB_RETRIES; i++) { for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scanner = htable2.getScanner(scan); scanner = htable2.getScanner(scan);
res = scanner.next(NB_ROWS_IN_BIG_BATCH); res = scanner.next(NB_ROWS_IN_BIG_BATCH);