HBASE-15321 - Ability to open a HRegion from hdfs snapshot.

This commit is contained in:
Rahul Gidwani 2018-01-22 12:22:57 -08:00
parent 5cb3ab85ae
commit 73ed127039
3 changed files with 142 additions and 1 deletions

View File

@ -6998,6 +6998,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return this; return this;
} }
/**
* Open a Region on a read-only file-system (like hdfs snapshots)
* @param conf The Configuration object to use.
* @param fs Filesystem to use
* @param info Info for region to be opened.
* @param htd the table descriptor
* @return new HRegion
* @throws IOException e
*/
public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, final FileSystem fs,
final Path tableDir, HRegionInfo info, final HTableDescriptor htd) throws IOException {
if (info == null) {
throw new NullPointerException("Passed region info is null");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Opening region (readOnly filesystem): " + info);
}
if (info.getReplicaId() <= 0) {
info = new HRegionInfo((HRegionInfo) info, 1);
}
HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null);
r.writestate.setReadOnly(true);
return r.openHRegion(null);
}
public static void warmupHRegion(final HRegionInfo info, public static void warmupHRegion(final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf, final HTableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final RegionServerServices rsServices,

View File

@ -28,6 +28,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID; import java.util.UUID;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -75,7 +76,7 @@ public class HRegionFileSystem {
public static final String REGION_SPLITS_DIR = ".splits"; public static final String REGION_SPLITS_DIR = ".splits";
/** Temporary subdirectory of the region directory used for compaction output. */ /** Temporary subdirectory of the region directory used for compaction output. */
private static final String REGION_TEMP_DIR = ".tmp"; @VisibleForTesting static final String REGION_TEMP_DIR = ".tmp";
private final HRegionInfo regionInfo; private final HRegionInfo regionInfo;
//regionInfo for interacting with FS (getting encodedName, etc) //regionInfo for interacting with FS (getting encodedName, etc)

View File

@ -0,0 +1,115 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hdfs.DFSClient;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({RegionServerTests.class, MediumTests.class})
public class TestHdfsSnapshotHRegion {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static final String SNAPSHOT_NAME = "foo_snapshot";
private Table table;
public static final TableName TABLE_NAME = TableName.valueOf("foo");
public static final byte[] FAMILY = Bytes.toBytes("f1");
private DFSClient client;
private String baseDir;
@Before
public void setUp() throws Exception {
Configuration c = TEST_UTIL.getConfiguration();
c.setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(1);
table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
TEST_UTIL.loadTable(table, FAMILY);
// setup the hdfssnapshots
client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration());
String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString();
String uriString = TEST_UTIL.getTestFileSystem().getUri().toString();
baseDir = StringUtils.removeStart(fullUrIPath, uriString);
client.allowSnapshot(baseDir);
}
@After
public void tearDown() throws Exception {
client.deleteSnapshot(baseDir, SNAPSHOT_NAME);
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testOpeningReadOnlyRegionBasic() throws Exception {
String snapshotDir = client.createSnapshot(baseDir, SNAPSHOT_NAME);
HRegionInfo firstRegion = TEST_UTIL.getHBaseAdmin().getTableRegions(table.getName()).get(0);
Path tableDir = FSUtils.getTableDir(new Path(snapshotDir), TABLE_NAME);
HRegion snapshottedRegion = openSnapshotRegion(firstRegion, tableDir);
Assert.assertNotNull(snapshottedRegion);
snapshottedRegion.close();
}
@Test
public void testSnapshottingWithTmpSplitsAndMergeDirectoriesPresent() throws Exception {
// lets get a region and create those directories and make sure we ignore them
HRegionInfo firstRegion = TEST_UTIL.getHBaseAdmin().getTableRegions(table.getName()).get(0);
String encodedName = firstRegion.getEncodedName();
Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLE_NAME);
Path regionDirectoryPath = new Path(tableDir, encodedName);
TEST_UTIL.getTestFileSystem().create(
new Path(regionDirectoryPath, HRegionFileSystem.REGION_TEMP_DIR));
TEST_UTIL.getTestFileSystem().create(
new Path(regionDirectoryPath, HRegionFileSystem.REGION_SPLITS_DIR));
TEST_UTIL.getTestFileSystem().create(
new Path(regionDirectoryPath, HRegionFileSystem.REGION_MERGES_DIR));
// now snapshot
String snapshotDir = client.createSnapshot(baseDir, "foo_snapshot");
// everything should still open just fine
HRegion snapshottedRegion = openSnapshotRegion(firstRegion,
FSUtils.getTableDir(new Path(snapshotDir), TABLE_NAME));
Assert.assertNotNull(snapshottedRegion); // no errors and the region should open
snapshottedRegion.close();
}
private HRegion openSnapshotRegion(HRegionInfo firstRegion, Path tableDir) throws IOException {
return HRegion.openReadOnlyFileSystemHRegion(
TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(),
tableDir,
firstRegion,
table.getTableDescriptor()
);
}
}