HBASE-18771 Incorrect StoreFileRefresh leading to split and compaction failures

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Abhishek Singh Chouhan 2017-09-12 16:00:47 +05:30 committed by Andrew Purtell
parent 0501855bc7
commit a797fe0daa
3 changed files with 218 additions and 4 deletions

View File

@ -618,16 +618,24 @@ public class HStore implements Store {
private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException { private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException {
StoreFileManager sfm = storeEngine.getStoreFileManager(); StoreFileManager sfm = storeEngine.getStoreFileManager();
Collection<StoreFile> currentFiles = sfm.getStorefiles(); Collection<StoreFile> currentFiles = sfm.getStorefiles();
if (currentFiles == null) currentFiles = new ArrayList<>(0); Collection<StoreFile> compactedFiles = sfm.getCompactedfiles();
if (currentFiles == null) currentFiles = Collections.emptySet();
if (newFiles == null) newFiles = new ArrayList<>(0); if (newFiles == null) newFiles = Collections.emptySet();
if (compactedFiles == null) compactedFiles = Collections.emptySet();
HashMap<StoreFileInfo, StoreFile> currentFilesSet = new HashMap<>(currentFiles.size()); HashMap<StoreFileInfo, StoreFile> currentFilesSet = new HashMap<>(currentFiles.size());
for (StoreFile sf : currentFiles) { for (StoreFile sf : currentFiles) {
currentFilesSet.put(sf.getFileInfo(), sf); currentFilesSet.put(sf.getFileInfo(), sf);
} }
HashSet<StoreFileInfo> newFilesSet = new HashSet<>(newFiles); HashMap<StoreFileInfo, StoreFile> compactedFilesSet =
new HashMap<StoreFileInfo, StoreFile>(compactedFiles.size());
for (StoreFile sf : compactedFiles) {
compactedFilesSet.put(sf.getFileInfo(), sf);
}
Set<StoreFileInfo> newFilesSet = new HashSet<StoreFileInfo>(newFiles);
// Exclude the files that have already been compacted
newFilesSet = Sets.difference(newFilesSet, compactedFilesSet.keySet());
Set<StoreFileInfo> toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet()); Set<StoreFileInfo> toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet());
Set<StoreFileInfo> toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet); Set<StoreFileInfo> toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet);

View File

@ -0,0 +1,202 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* This class tests the scenario where a store refresh happens due to a file not found during scan,
* after a compaction but before the compacted files are archived. At this state we test for a split
* and compaction
*/
@Category(MediumTests.class)
public class TestCompactionFileNotFound {
private static final Log LOG = LogFactory.getLog(TestCompactionFileNotFound.class);
private static final HBaseTestingUtility util = new HBaseTestingUtility();
private static final TableName TEST_TABLE = TableName.valueOf("test");
private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
private static final byte[] ROW_A = Bytes.toBytes("aaa");
private static final byte[] ROW_B = Bytes.toBytes("bbb");
private static final byte[] ROW_C = Bytes.toBytes("ccc");
private static final byte[] qualifierCol1 = Bytes.toBytes("col1");
private static final byte[] bytes1 = Bytes.toBytes(1);
private static final byte[] bytes2 = Bytes.toBytes(2);
private static final byte[] bytes3 = Bytes.toBytes(3);
private Table table;
@BeforeClass
public static void setupBeforeClass() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hfile.compaction.discharger.interval",
Integer.MAX_VALUE);
util.startMiniCluster(3);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
util.shutdownMiniCluster();
}
@After
public void after() throws Exception {
try {
if (table != null) {
table.close();
}
} finally {
util.deleteTable(TEST_TABLE);
}
}
@Test
public void testSplitAfterRefresh() throws Exception {
Admin admin = util.getAdmin();
table = util.createTable(TEST_TABLE, TEST_FAMILY);
try {
// Create Multiple store files
Put puta = new Put(ROW_A);
puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
table.put(puta);
admin.flush(TEST_TABLE);
Put putb = new Put(ROW_B);
putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
table.put(putb);
admin.flush(TEST_TABLE);
Put putc = new Put(ROW_C);
putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
table.put(putc);
admin.flush(TEST_TABLE);
admin.compact(TEST_TABLE);
while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) {
Thread.sleep(1000);
}
table.put(putb);
HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE)
.getRegionByEncodedName(admin.getTableRegions(TEST_TABLE).get(0).getEncodedName());
// Refresh store files post compaction, this should not open already compacted files
hr1.refreshStoreFiles(true);
int numRegionsBeforeSplit = admin.getTableRegions(TEST_TABLE).size();
// Check if we can successfully split after compaction
admin.splitRegion(admin.getTableRegions(TEST_TABLE).get(0).getEncodedNameAsBytes(), ROW_C);
util.waitFor(20000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
int numRegionsAfterSplit = 0;
List<RegionServerThread> rst = util.getMiniHBaseCluster().getLiveRegionServerThreads();
for (RegionServerThread t : rst) {
numRegionsAfterSplit += t.getRegionServer().getOnlineRegions(TEST_TABLE).size();
}
// Make sure that the split went through and all the regions are assigned
return (numRegionsAfterSplit == numRegionsBeforeSplit + 1
&& admin.isTableAvailable(TEST_TABLE));
}
});
// Split at this point should not result in the RS being aborted
assertEquals(util.getMiniHBaseCluster().getLiveRegionServerThreads().size(), 3);
} finally {
if (admin != null) {
admin.close();
}
}
}
@Test
public void testCompactionAfterRefresh() throws Exception {
Admin admin = util.getAdmin();
table = util.createTable(TEST_TABLE, TEST_FAMILY);
try {
// Create Multiple store files
Put puta = new Put(ROW_A);
puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
table.put(puta);
admin.flush(TEST_TABLE);
Put putb = new Put(ROW_B);
putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
table.put(putb);
admin.flush(TEST_TABLE);
Put putc = new Put(ROW_C);
putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
table.put(putc);
admin.flush(TEST_TABLE);
admin.compact(TEST_TABLE);
while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) {
Thread.sleep(1000);
}
table.put(putb);
HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE)
.getRegionByEncodedName(admin.getTableRegions(TEST_TABLE).get(0).getEncodedName());
// Refresh store files post compaction, this should not open already compacted files
hr1.refreshStoreFiles(true);
// Archive the store files and try another compaction to see if all is good
for (Store store : hr1.getStores()) {
store.closeAndArchiveCompactedFiles();
}
try {
hr1.compact(false);
} catch (IOException e) {
LOG.error("Got an exception during compaction", e);
if (e instanceof FileNotFoundException) {
Assert.fail("Got a FNFE during compaction");
} else {
Assert.fail();
}
}
} finally {
if (admin != null) {
admin.close();
}
}
}
}

View File

@ -907,6 +907,10 @@ public class TestStore {
assertEquals(0, this.store.getStorefilesCount()); assertEquals(0, this.store.getStorefilesCount());
// Test refreshing store files when no store files are there
store.refreshStoreFiles();
assertEquals(0, this.store.getStorefilesCount());
// add some data, flush // add some data, flush
this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null);
flush(1); flush(1);