HBASE-18024 HRegion#initializeRegionInternals should not re-create .hregioninfo file when the region directory no longer exists
This commit is contained in:
parent
cab492d34f
commit
15bad3c036
|
@ -902,8 +902,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write HRI to a file in case we need to recover hbase:meta
|
// Write HRI to a file in case we need to recover hbase:meta
|
||||||
status.setStatus("Writing region info on filesystem");
|
// Only the primary replica should write .regioninfo
|
||||||
fs.checkRegionInfoOnFilesystem();
|
if (this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
|
||||||
|
status.setStatus("Writing region info on filesystem");
|
||||||
|
fs.checkRegionInfoOnFilesystem();
|
||||||
|
} else {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Skipping creation of .regioninfo file for " + this.getRegionInfo());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize all the HStores
|
// Initialize all the HStores
|
||||||
status.setStatus("Initializing all the Stores");
|
status.setStatus("Initializing all the Stores");
|
||||||
|
|
|
@ -858,9 +858,19 @@ public class HRegionFileSystem {
|
||||||
// only should be sufficient. I don't want to read the file every time to check if it pb
|
// only should be sufficient. I don't want to read the file every time to check if it pb
|
||||||
// serialized.
|
// serialized.
|
||||||
byte[] content = getRegionInfoFileContent(regionInfoForFs);
|
byte[] content = getRegionInfoFileContent(regionInfoForFs);
|
||||||
|
|
||||||
|
// Verify if the region directory exists before opening a region. We need to do this since if
|
||||||
|
// the region directory doesn't exist we will re-create the region directory and a new HRI
|
||||||
|
// when HRegion.openHRegion() is called.
|
||||||
|
try {
|
||||||
|
FileStatus status = fs.getFileStatus(getRegionDir());
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
LOG.warn(getRegionDir() + " doesn't exist for region: " + regionInfoForFs.getEncodedName() +
|
||||||
|
" on table " + regionInfo.getTable());
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
|
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
|
||||||
|
|
||||||
FileStatus status = fs.getFileStatus(regionInfoFile);
|
FileStatus status = fs.getFileStatus(regionInfoFile);
|
||||||
if (status != null && status.getLen() == content.length) {
|
if (status != null && status.getLen() == content.length) {
|
||||||
// Then assume the content good and move on.
|
// Then assume the content good and move on.
|
||||||
|
@ -953,7 +963,13 @@ public class HRegionFileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write HRI to a file in case we need to recover hbase:meta
|
// Write HRI to a file in case we need to recover hbase:meta
|
||||||
regionFs.writeRegionInfoOnFilesystem(false);
|
// Only primary replicas should write region info
|
||||||
|
if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
|
||||||
|
regionFs.writeRegionInfoOnFilesystem(false);
|
||||||
|
} else {
|
||||||
|
if (LOG.isDebugEnabled())
|
||||||
|
LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
|
||||||
|
}
|
||||||
return regionFs;
|
return regionFs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -983,8 +999,15 @@ public class HRegionFileSystem {
|
||||||
regionFs.cleanupSplitsDir();
|
regionFs.cleanupSplitsDir();
|
||||||
regionFs.cleanupMergesDir();
|
regionFs.cleanupMergesDir();
|
||||||
|
|
||||||
// if it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
|
// If it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
|
||||||
regionFs.checkRegionInfoOnFilesystem();
|
// Only create HRI if we are the default replica
|
||||||
|
if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
|
||||||
|
regionFs.checkRegionInfoOnFilesystem();
|
||||||
|
} else {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return regionFs;
|
return regionFs;
|
||||||
|
|
|
@ -6283,6 +6283,10 @@ public class TestHRegion {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCloseRegionWrittenToWAL() throws Exception {
|
public void testCloseRegionWrittenToWAL() throws Exception {
|
||||||
|
|
||||||
|
Path rootDir = new Path(dir + name.getMethodName());
|
||||||
|
FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
|
||||||
|
|
||||||
final ServerName serverName = ServerName.valueOf("testCloseRegionWrittenToWAL", 100, 42);
|
final ServerName serverName = ServerName.valueOf("testCloseRegionWrittenToWAL", 100, 42);
|
||||||
final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
|
final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
|
||||||
|
|
||||||
|
@ -6301,7 +6305,8 @@ public class TestHRegion {
|
||||||
when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
|
when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
|
||||||
|
|
||||||
|
|
||||||
// open a region first so that it can be closed later
|
// create and then open a region first so that it can be closed later
|
||||||
|
region = HRegion.createHRegion(hri, rootDir, TEST_UTIL.getConfiguration(), htd, rss.getWAL(hri));
|
||||||
region = HRegion.openHRegion(hri, htd, rss.getWAL(hri),
|
region = HRegion.openHRegion(hri, htd, rss.getWAL(hri),
|
||||||
TEST_UTIL.getConfiguration(), rss, null);
|
TEST_UTIL.getConfiguration(), rss, null);
|
||||||
|
|
||||||
|
|
|
@ -19,26 +19,39 @@
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.executor.ExecutorType;
|
import org.apache.hadoop.hbase.executor.ExecutorType;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
@Category({MediumTests.class, RegionServerTests.class})
|
@Category({MediumTests.class, RegionServerTests.class})
|
||||||
public class TestRegionOpen {
|
public class TestRegionOpen {
|
||||||
|
@ -47,7 +60,9 @@ public class TestRegionOpen {
|
||||||
private static final int NB_SERVERS = 1;
|
private static final int NB_SERVERS = 1;
|
||||||
|
|
||||||
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
||||||
final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
|
|
||||||
|
@Rule
|
||||||
|
public TestName name = new TestName();
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void before() throws Exception {
|
public static void before() throws Exception {
|
||||||
|
@ -68,6 +83,7 @@ public class TestRegionOpen {
|
||||||
|
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception {
|
public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
|
||||||
ThreadPoolExecutor exec = getRS().getExecutorService()
|
ThreadPoolExecutor exec = getRS().getExecutorService()
|
||||||
.getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
|
.getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
|
||||||
assertEquals(1, exec.getCompletedTaskCount()); // namespace region
|
assertEquals(1, exec.getCompletedTaskCount()); // namespace region
|
||||||
|
@ -82,4 +98,42 @@ public class TestRegionOpen {
|
||||||
|
|
||||||
assertEquals(2, exec.getCompletedTaskCount());
|
assertEquals(2, exec.getCompletedTaskCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testNonExistentRegionReplica() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
final byte[] FAMILYNAME = Bytes.toBytes("fam");
|
||||||
|
FileSystem fs = HTU.getTestFileSystem();
|
||||||
|
Connection connection = HTU.getConnection();
|
||||||
|
Admin admin = connection.getAdmin();
|
||||||
|
Configuration conf = HTU.getConfiguration();
|
||||||
|
Path rootDir = HTU.getDataTestDirOnTestFS();
|
||||||
|
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
htd.addFamily(new HColumnDescriptor(FAMILYNAME));
|
||||||
|
admin.createTable(htd);
|
||||||
|
HTU.waitUntilNoRegionsInTransition(60000);
|
||||||
|
|
||||||
|
// Create new HRI with non-default region replica id
|
||||||
|
HRegionInfo hri = new HRegionInfo(htd.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("B"), false,
|
||||||
|
System.currentTimeMillis(), 2);
|
||||||
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
|
||||||
|
FSUtils.getTableDir(rootDir, hri.getTable()), hri);
|
||||||
|
Path regionDir = regionFs.getRegionDir();
|
||||||
|
try {
|
||||||
|
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.info("Caught expected IOE due missing .regioninfo file, due: " + e.getMessage() + " skipping region open.");
|
||||||
|
// We should only have 1 region online
|
||||||
|
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||||
|
LOG.info("Regions: " + regions);
|
||||||
|
if (regions.size() != 1) {
|
||||||
|
fail("Table " + tableName + " should have only one region, but got more: " + regions);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
} finally {
|
||||||
|
admin.close();
|
||||||
|
}
|
||||||
|
fail("Should have thrown IOE when attempting to open a non-existing region.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,6 +169,8 @@ public class TestStoreFileRefresherChore {
|
||||||
when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
||||||
|
|
||||||
HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families);
|
HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families);
|
||||||
|
htd.setRegionReplication(2);
|
||||||
|
|
||||||
Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
|
Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
|
||||||
Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
|
Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
|
||||||
regions.add(primary);
|
regions.add(primary);
|
||||||
|
|
Loading…
Reference in New Issue