diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index e13090202fe..228c4bb063e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -41,7 +41,7 @@ import com.google.common.base.Preconditions; public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { private final DataBlockEncoding onDisk; private final DataBlockEncoding inCache; - private final HFileBlockEncodingContext inCacheEncodeCtx; + private final byte[] dummyHeader; public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) { this(encoding, encoding); @@ -75,16 +75,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { onDisk : DataBlockEncoding.NONE; this.inCache = inCache != null ? inCache : DataBlockEncoding.NONE; - if (inCache != DataBlockEncoding.NONE) { - inCacheEncodeCtx = - this.inCache.getEncoder().newDataBlockEncodingContext( - Algorithm.NONE, this.inCache, dummyHeader); - } else { - // create a default encoding context - inCacheEncodeCtx = - new HFileBlockDefaultEncodingContext(Algorithm.NONE, - this.inCache, dummyHeader); - } + this.dummyHeader = dummyHeader; Preconditions.checkArgument(onDisk == DataBlockEncoding.NONE || onDisk == inCache, "on-disk encoding (" + onDisk + ") must be " + @@ -166,7 +157,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { } // Encode the unencoded block with the in-cache encoding. return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS(), - inCacheEncodeCtx); + createInCacheEncodingContext()); } if (block.getBlockType() == BlockType.ENCODED_DATA) { @@ -256,6 +247,22 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { return encodedBlock; } + /** + * Returns a new encoding context given the inCache encoding scheme provided in the constructor. + * This used to be kept around but HFileBlockDefaultEncodingContext isn't thread-safe. + * See HBASE-8732 + * @return a new in cache encoding context + */ + private HFileBlockEncodingContext createInCacheEncodingContext() { + return (inCache != DataBlockEncoding.NONE) ? + this.inCache.getEncoder().newDataBlockEncodingContext( + Algorithm.NONE, this.inCache, dummyHeader) + : + // create a default encoding context + new HFileBlockDefaultEncodingContext(Algorithm.NONE, + this.inCache, dummyHeader); + } + @Override public String toString() { return getClass().getSimpleName() + "(onDisk=" + onDisk + ", inCache=" + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index b4b90cf0f43..1791bd99fc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -88,6 +88,7 @@ public class TestReplicationBase { conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); + conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); @@ -105,6 +106,7 @@ public class TestReplicationBase { conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf2.setBoolean("dfs.support.append", true); + conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); @@ -127,7 +129,7 @@ public class TestReplicationBase { HBaseAdmin admin1 = new HBaseAdmin(conf1); HBaseAdmin admin2 = new HBaseAdmin(conf2); admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); - admin2.createTable(table); + admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); htable1 = new HTable(conf1, tableName); htable1.setWriteBufferSize(1024); htable2 = new HTable(conf2, tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java new file mode 100644 index 00000000000..ca0fd3172fc --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Runs the TestReplicationKillRS test and selects the RS to kill in the master cluster + * Do not add other tests in this class. + */ +@Category(LargeTests.class) +public class TestReplicationKillMasterRS extends TestReplicationKillRS { + + @Test(timeout=300000) + public void killOneMasterRS() throws Exception { + loadTableAndKillRS(utility1); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailoverCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java similarity index 85% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailoverCompressed.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java index 35c0715cb1e..ec5a3bafe2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailoverCompressed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java @@ -24,10 +24,11 @@ import org.junit.BeforeClass; import org.junit.experimental.categories.Category; /** - * Run the same test as TestReplication but with HLog compression enabled + * Run the same test as TestReplicationKillMasterRS but with HLog compression enabled + * Do not add other tests in this class. */ @Category(LargeTests.class) -public class TestReplicationQueueFailoverCompressed extends TestReplicationQueueFailover { +public class TestReplicationKillMasterRSCompressed extends TestReplicationKillMasterRS { /** * @throws java.lang.Exception diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java similarity index 87% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailover.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java index 3d8402bab4a..9941b8ae878 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationQueueFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java @@ -33,36 +33,31 @@ import org.junit.experimental.categories.Category; import static org.junit.Assert.fail; @Category(LargeTests.class) -public class TestReplicationQueueFailover extends TestReplicationBase { +public class TestReplicationKillRS extends TestReplicationBase { - private static final Log LOG = LogFactory.getLog(TestReplicationQueueFailover.class); + private static final Log LOG = LogFactory.getLog(TestReplicationKillRS.class); /** - * Load up multiple tables over 2 region servers and kill a source during + * Load up 1 tables over 2 region servers and kill a source during * the upload. The failover happens internally. * * WARNING this test sometimes fails because of HBASE-3515 * * @throws Exception */ - @Test(timeout=300000) - public void queueFailover() throws Exception { + public void loadTableAndKillRS(HBaseTestingUtility util) throws Exception { // killing the RS with .META. can result into failed puts until we solve // IO fencing int rsToKill1 = - utility1.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0; - int rsToKill2 = - utility2.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0; + util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0; // Takes about 20 secs to run the full loading, kill around the middle - Thread killer1 = killARegionServer(utility1, 7500, rsToKill1); - Thread killer2 = killARegionServer(utility2, 10000, rsToKill2); + Thread killer = killARegionServer(util, 5000, rsToKill1); LOG.info("Start loading table"); int initialCount = utility1.loadTable(htable1, famName); LOG.info("Done loading table"); - killer1.join(5000); - killer2.join(5000); + killer.join(5000); LOG.info("Done waiting for threads"); Result[] res; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java new file mode 100644 index 00000000000..bba86b0f6e4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Runs the TestReplicationKillRS test and selects the RS to kill in the slave cluster + * Do not add other tests in this class. + */ +@Category(LargeTests.class) +public class TestReplicationKillSlaveRS extends TestReplicationKillRS { + + @Test(timeout=300000) + public void killOneSlaveRS() throws Exception { + loadTableAndKillRS(utility2); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 538c2df689e..1a7ee841d11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -240,6 +240,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { assertEquals(NB_ROWS_IN_BATCH, res1.length); for (int i = 0; i < NB_RETRIES; i++) { + scan = new Scan(); if (i==NB_RETRIES-1) { fail("Waited too much time for normal batch replication"); } @@ -378,10 +379,10 @@ public class TestReplicationSmallTests extends TestReplicationBase { assertEquals(NB_ROWS_IN_BIG_BATCH, res.length); - scan = new Scan(); long start = System.currentTimeMillis(); for (int i = 0; i < NB_RETRIES; i++) { + scan = new Scan(); scanner = htable2.getScanner(scan); res = scanner.next(NB_ROWS_IN_BIG_BATCH);