HBASE-7389 HBASE-7365 Snapshot clone/restore followup (Matteo Bertozzi)
git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445846 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3aa26c1134
commit
e3ce13909d
|
@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
@ -83,6 +84,7 @@ public class HFileLink extends FileLink {
|
|||
|
||||
private final Path archivePath;
|
||||
private final Path originPath;
|
||||
private final Path tempPath;
|
||||
|
||||
/**
|
||||
* @param conf {@link Configuration} from which to extract specific archive locations
|
||||
|
@ -100,19 +102,10 @@ public class HFileLink extends FileLink {
|
|||
*/
|
||||
public HFileLink(final Path rootDir, final Path archiveDir, final Path path) {
|
||||
Path hfilePath = getRelativeTablePath(path);
|
||||
this.tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath);
|
||||
this.originPath = new Path(rootDir, hfilePath);
|
||||
this.archivePath = new Path(archiveDir, hfilePath);
|
||||
setLocations(originPath, archivePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param originPath Path to the hfile in the table directory
|
||||
* @param archivePath Path to the hfile in the archive directory
|
||||
*/
|
||||
public HFileLink(final Path originPath, final Path archivePath) {
|
||||
this.originPath = originPath;
|
||||
this.archivePath = archivePath;
|
||||
setLocations(originPath, archivePath);
|
||||
setLocations(originPath, archivePath, this.tempPath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -185,7 +178,12 @@ public class HFileLink extends FileLink {
|
|||
return originPath;
|
||||
}
|
||||
|
||||
return new Path(archiveDir, hfilePath);
|
||||
Path archivePath = new Path(archiveDir, hfilePath);
|
||||
if (fs.exists(archivePath)) {
|
||||
return archivePath;
|
||||
}
|
||||
|
||||
return new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,9 +61,9 @@ import org.apache.zookeeper.KeeperException;
|
|||
@InterfaceAudience.Private
|
||||
public class CreateTableHandler extends EventHandler {
|
||||
private static final Log LOG = LogFactory.getLog(CreateTableHandler.class);
|
||||
private MasterFileSystem fileSystemManager;
|
||||
private final HTableDescriptor hTableDescriptor;
|
||||
private Configuration conf;
|
||||
protected final MasterFileSystem fileSystemManager;
|
||||
protected final HTableDescriptor hTableDescriptor;
|
||||
protected final Configuration conf;
|
||||
private final AssignmentManager assignmentManager;
|
||||
private final CatalogTracker catalogTracker;
|
||||
private final HRegionInfo [] newRegions;
|
||||
|
@ -205,11 +205,11 @@ public class CreateTableHandler extends EventHandler {
|
|||
|
||||
/**
|
||||
* Create the on-disk structure for the table, and returns the regions info.
|
||||
* @param rootdir directory where the table is being created
|
||||
* @param tableRootDir directory where the table is being created
|
||||
* @param tableName name of the table under construction
|
||||
* @return the list of regions created
|
||||
*/
|
||||
protected List<HRegionInfo> handleCreateHdfsRegions(final Path rootdir, final String tableName)
|
||||
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir, final String tableName)
|
||||
throws IOException {
|
||||
int regionNumber = newRegions.length;
|
||||
ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(
|
||||
|
@ -224,7 +224,7 @@ public class CreateTableHandler extends EventHandler {
|
|||
|
||||
// 1. Create HRegion
|
||||
HRegion region = HRegion.createHRegion(newRegion,
|
||||
rootdir, conf, hTableDescriptor, null,
|
||||
tableRootDir, conf, hTableDescriptor, null,
|
||||
false, true);
|
||||
// 2. Close the new region to flush to disk. Close log file too.
|
||||
region.close();
|
||||
|
|
|
@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
|||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Handler to Clone a snapshot.
|
||||
*
|
||||
|
@ -76,23 +78,36 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
|||
this.monitor = new ForeignExceptionDispatcher();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the on-disk regions, using the tableRootDir provided by the CreateTableHandler.
|
||||
* The cloned table will be created in a temp directory, and then the CreateTableHandler
|
||||
* will be responsible to add the regions returned by this method to META and do the assignment.
|
||||
*/
|
||||
@Override
|
||||
protected List<HRegionInfo> handleCreateRegions(String tableName) throws IOException {
|
||||
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir, final String tableName)
|
||||
throws IOException {
|
||||
FileSystem fs = fileSystemManager.getFileSystem();
|
||||
Path rootDir = fileSystemManager.getRootDir();
|
||||
Path tableDir = HTableDescriptor.getTableDir(rootDir, Bytes.toBytes(tableName));
|
||||
Path tableDir = new Path(tableRootDir, tableName);
|
||||
|
||||
try {
|
||||
// Execute the Clone
|
||||
// 1. Execute the on-disk Clone
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
|
||||
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs,
|
||||
catalogTracker, snapshot, snapshotDir, hTableDescriptor, tableDir, monitor);
|
||||
restoreHelper.restore();
|
||||
snapshot, snapshotDir, hTableDescriptor, tableDir, monitor);
|
||||
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
|
||||
|
||||
// Clone operation should not have stuff to restore or remove
|
||||
Preconditions.checkArgument(metaChanges.getRegionsToRestore() == null,
|
||||
"A clone should not have regions to restore");
|
||||
Preconditions.checkArgument(metaChanges.getRegionsToRemove() == null,
|
||||
"A clone should not have regions to remove");
|
||||
|
||||
// At this point the clone is complete. Next step is enabling the table.
|
||||
LOG.info("Clone snapshot=" + snapshot.getName() + " on table=" + tableName + " completed!");
|
||||
|
||||
return MetaReader.getTableRegions(catalogTracker, Bytes.toBytes(tableName));
|
||||
// 2. let the CreateTableHandler add the regions to meta
|
||||
return metaChanges.getRegionsToAdd();
|
||||
} catch (Exception e) {
|
||||
String msg = "clone snapshot=" + snapshot + " failed";
|
||||
LOG.error(msg, e);
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.catalog.CatalogTracker;
|
||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
|
@ -78,6 +79,13 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
|||
this.hTableDescriptor = htd;
|
||||
}
|
||||
|
||||
/**
|
||||
* The restore table is executed in place.
|
||||
* - The on-disk data will be restored
|
||||
* - [if something fail here: you need to delete the table and re-run the restore]
|
||||
* - META will be updated
|
||||
* - [if something fail here: you need to run hbck to fix META entries]
|
||||
*/
|
||||
@Override
|
||||
protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
|
||||
MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
|
||||
|
@ -88,25 +96,29 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
|||
Path tableDir = HTableDescriptor.getTableDir(rootDir, tableName);
|
||||
|
||||
try {
|
||||
// Update descriptor
|
||||
// 1. Update descriptor
|
||||
this.masterServices.getTableDescriptors().add(hTableDescriptor);
|
||||
|
||||
// Execute the Restore
|
||||
// 2. Execute the on-disk Restore
|
||||
LOG.debug("Starting restore snapshot=" + snapshot);
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
|
||||
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
|
||||
masterServices.getConfiguration(), fs, catalogTracker,
|
||||
masterServices.getConfiguration(), fs,
|
||||
snapshot, snapshotDir, hTableDescriptor, tableDir, monitor);
|
||||
restoreHelper.restore();
|
||||
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
|
||||
|
||||
// 3. Applies changes to .META.
|
||||
hris.clear();
|
||||
hris.addAll(metaChanges.getRegionsToAdd());
|
||||
hris.addAll(metaChanges.getRegionsToRestore());
|
||||
List<HRegionInfo> hrisToRemove = metaChanges.getRegionsToRemove();
|
||||
MetaEditor.mutateRegions(catalogTracker, hrisToRemove, hris);
|
||||
|
||||
// At this point the restore is complete. Next step is enabling the table.
|
||||
LOG.info("Restore snapshot=" + snapshot.getName() + " on table=" +
|
||||
Bytes.toString(tableName) + " completed!");
|
||||
|
||||
hris.clear();
|
||||
hris.addAll(MetaReader.getTableRegions(catalogTracker, tableName));
|
||||
} catch (IOException e) {
|
||||
String msg = "restore snapshot=" + snapshot + " failed";
|
||||
String msg = "restore snapshot=" + snapshot + " failed. re-run the restore command.";
|
||||
LOG.error(msg, e);
|
||||
monitor.receive(new ForeignException(masterServices.getServerName().toString(), e));
|
||||
throw new RestoreSnapshotException(msg, e);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.snapshot;
|
|||
import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
|
@ -40,7 +41,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||
import org.apache.hadoop.hbase.catalog.CatalogTracker;
|
||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
|
@ -110,19 +110,16 @@ public class RestoreSnapshotHelper {
|
|||
private final HTableDescriptor tableDesc;
|
||||
private final Path tableDir;
|
||||
|
||||
private final CatalogTracker catalogTracker;
|
||||
private final Configuration conf;
|
||||
private final FileSystem fs;
|
||||
|
||||
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs,
|
||||
final CatalogTracker catalogTracker,
|
||||
final SnapshotDescription snapshotDescription, final Path snapshotDir,
|
||||
final HTableDescriptor tableDescriptor, final Path tableDir,
|
||||
final ForeignExceptionDispatcher monitor)
|
||||
{
|
||||
this.fs = fs;
|
||||
this.conf = conf;
|
||||
this.catalogTracker = catalogTracker;
|
||||
this.snapshotDesc = snapshotDescription;
|
||||
this.snapshotDir = snapshotDir;
|
||||
this.tableDesc = tableDescriptor;
|
||||
|
@ -131,45 +128,45 @@ public class RestoreSnapshotHelper {
|
|||
}
|
||||
|
||||
/**
|
||||
* Restore table to a specified snapshot state.
|
||||
* Restore the on-disk table to a specified snapshot state.
|
||||
* @return the set of regions touched by the restore operation
|
||||
*/
|
||||
public void restore() throws IOException {
|
||||
public RestoreMetaChanges restoreHdfsRegions() throws IOException {
|
||||
long startTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||
|
||||
LOG.debug("starting restore");
|
||||
Set<String> snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
|
||||
if (snapshotRegionNames == null) {
|
||||
LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty");
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
RestoreMetaChanges metaChanges = new RestoreMetaChanges();
|
||||
|
||||
// Identify which region are still available and which not.
|
||||
// NOTE: we rely upon the region name as: "table name, start key, end key"
|
||||
List<HRegionInfo> tableRegions = getTableRegions();
|
||||
if (tableRegions != null) {
|
||||
monitor.rethrowException();
|
||||
List<HRegionInfo> regionsToRestore = new LinkedList<HRegionInfo>();
|
||||
List<HRegionInfo> regionsToRemove = new LinkedList<HRegionInfo>();
|
||||
|
||||
for (HRegionInfo regionInfo: tableRegions) {
|
||||
String regionName = regionInfo.getEncodedName();
|
||||
if (snapshotRegionNames.contains(regionName)) {
|
||||
LOG.info("region to restore: " + regionName);
|
||||
snapshotRegionNames.remove(regionInfo);
|
||||
regionsToRestore.add(regionInfo);
|
||||
metaChanges.addRegionToRestore(regionInfo);
|
||||
} else {
|
||||
LOG.info("region to remove: " + regionName);
|
||||
regionsToRemove.add(regionInfo);
|
||||
metaChanges.addRegionToRemove(regionInfo);
|
||||
}
|
||||
}
|
||||
|
||||
// Restore regions using the snapshot data
|
||||
monitor.rethrowException();
|
||||
restoreRegions(regionsToRestore);
|
||||
restoreHdfsRegions(metaChanges.getRegionsToRestore());
|
||||
|
||||
// Remove regions from the current table
|
||||
monitor.rethrowException();
|
||||
ModifyRegionUtils.deleteRegions(conf, fs, catalogTracker, regionsToRemove);
|
||||
removeHdfsRegions(metaChanges.getRegionsToRemove());
|
||||
}
|
||||
|
||||
// Regions to Add: present in the snapshot but not in the current table
|
||||
|
@ -185,18 +182,92 @@ public class RestoreSnapshotHelper {
|
|||
|
||||
// Create new regions cloning from the snapshot
|
||||
monitor.rethrowException();
|
||||
cloneRegions(regionsToAdd);
|
||||
HRegionInfo[] clonedRegions = cloneHdfsRegions(regionsToAdd);
|
||||
metaChanges.setNewRegions(clonedRegions);
|
||||
}
|
||||
|
||||
// Restore WALs
|
||||
monitor.rethrowException();
|
||||
restoreWALs();
|
||||
|
||||
return metaChanges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describe the set of operations needed to update META after restore.
|
||||
*/
|
||||
public class RestoreMetaChanges {
|
||||
private List<HRegionInfo> regionsToRestore = null;
|
||||
private List<HRegionInfo> regionsToRemove = null;
|
||||
private List<HRegionInfo> regionsToAdd = null;
|
||||
|
||||
/**
|
||||
* Returns the list of new regions added during the on-disk restore.
|
||||
* The caller is responsible to add the regions to META.
|
||||
* e.g MetaEditor.addRegionsToMeta(...)
|
||||
* @return the list of regions to add to META
|
||||
*/
|
||||
public List<HRegionInfo> getRegionsToAdd() {
|
||||
return this.regionsToAdd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of 'restored regions' during the on-disk restore.
|
||||
* The caller is responsible to add the regions to META if not present.
|
||||
* @return the list of regions restored
|
||||
*/
|
||||
public List<HRegionInfo> getRegionsToRestore() {
|
||||
return this.regionsToRestore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of regions removed during the on-disk restore.
|
||||
* The caller is responsible to remove the regions from META.
|
||||
* e.g. MetaEditor.deleteRegions(...)
|
||||
* @return the list of regions to remove from META
|
||||
*/
|
||||
public List<HRegionInfo> getRegionsToRemove() {
|
||||
return this.regionsToRemove;
|
||||
}
|
||||
|
||||
void setNewRegions(final HRegionInfo[] hris) {
|
||||
if (hris != null) {
|
||||
regionsToAdd = Arrays.asList(hris);
|
||||
} else {
|
||||
regionsToAdd = null;
|
||||
}
|
||||
}
|
||||
|
||||
void addRegionToRemove(final HRegionInfo hri) {
|
||||
if (regionsToRemove == null) {
|
||||
regionsToRemove = new LinkedList<HRegionInfo>();
|
||||
}
|
||||
regionsToRemove.add(hri);
|
||||
}
|
||||
|
||||
void addRegionToRestore(final HRegionInfo hri) {
|
||||
if (regionsToRestore == null) {
|
||||
regionsToRestore = new LinkedList<HRegionInfo>();
|
||||
}
|
||||
regionsToRestore.add(hri);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove specified regions from the file-system, using the archiver.
|
||||
*/
|
||||
private void removeHdfsRegions(final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions != null && regions.size() > 0) {
|
||||
for (HRegionInfo hri: regions) {
|
||||
HFileArchiver.archiveRegion(conf, fs, hri);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore specified regions by restoring content to the snapshot state.
|
||||
*/
|
||||
private void restoreRegions(final List<HRegionInfo> regions) throws IOException {
|
||||
private void restoreHdfsRegions(final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return;
|
||||
for (HRegionInfo hri: regions) restoreRegion(hri);
|
||||
}
|
||||
|
@ -289,8 +360,8 @@ public class RestoreSnapshotHelper {
|
|||
* Clone specified regions. For each region create a new region
|
||||
* and create a HFileLink for each hfile.
|
||||
*/
|
||||
private void cloneRegions(final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return;
|
||||
private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return null;
|
||||
|
||||
final Map<String, HRegionInfo> snapshotRegions =
|
||||
new HashMap<String, HRegionInfo>(regions.size());
|
||||
|
@ -313,16 +384,14 @@ public class RestoreSnapshotHelper {
|
|||
}
|
||||
|
||||
// create the regions on disk
|
||||
List<HRegionInfo> clonedRegions = ModifyRegionUtils.createRegions(conf, FSUtils.getRootDir(conf),
|
||||
tableDesc, clonedRegionsInfo, catalogTracker, new ModifyRegionUtils.RegionFillTask() {
|
||||
List<HRegionInfo> clonedRegions = ModifyRegionUtils.createRegions(conf, tableDir.getParent(),
|
||||
tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
|
||||
public void fillRegion(final HRegion region) throws IOException {
|
||||
cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
|
||||
}
|
||||
});
|
||||
if (regions != null && regions.size() > 0) {
|
||||
// add regions to .META.
|
||||
MetaEditor.addRegionsToMeta(catalogTracker, clonedRegions);
|
||||
}
|
||||
|
||||
return clonedRegionsInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -386,7 +455,7 @@ public class RestoreSnapshotHelper {
|
|||
* wxyz/table=1234-abc
|
||||
* stuv/table=1234-abc.wxyz
|
||||
*
|
||||
* NOTE that the region name in the clone change (md5 of regioninfo)
|
||||
* NOTE that the region name in the clone changes (md5 of regioninfo)
|
||||
* and the reference should reflect that change.
|
||||
* </pre></blockquote>
|
||||
* @param familyDir destination directory for the store file
|
||||
|
|
|
@ -70,13 +70,11 @@ public abstract class ModifyRegionUtils {
|
|||
* @param rootDir Root directory for HBase instance
|
||||
* @param hTableDescriptor description of the table
|
||||
* @param newRegions {@link HRegionInfo} that describes the regions to create
|
||||
* @param catalogTracker the catalog tracker
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
|
||||
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
|
||||
final CatalogTracker catalogTracker) throws IOException {
|
||||
return createRegions(conf, rootDir, hTableDescriptor, newRegions, catalogTracker, null);
|
||||
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) throws IOException {
|
||||
return createRegions(conf, rootDir, hTableDescriptor, newRegions, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -87,13 +85,12 @@ public abstract class ModifyRegionUtils {
|
|||
* @param rootDir Root directory for HBase instance
|
||||
* @param hTableDescriptor description of the table
|
||||
* @param newRegions {@link HRegionInfo} that describes the regions to create
|
||||
* @param catalogTracker the catalog tracker
|
||||
* @param task {@link RegionFillTask} custom code to populate region after creation
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
|
||||
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
|
||||
final CatalogTracker catalogTracker, final RegionFillTask task) throws IOException {
|
||||
final RegionFillTask task) throws IOException {
|
||||
if (newRegions == null) return null;
|
||||
int regionNumber = newRegions.length;
|
||||
ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(conf,
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TestRestoreSnapshotHelper {
|
|||
|
||||
FSTableDescriptors.createTableDescriptor(htdClone, conf);
|
||||
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone);
|
||||
helper.restore();
|
||||
helper.restoreHdfsRegions();
|
||||
|
||||
LOG.debug("post-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir);
|
||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
||||
|
@ -146,13 +146,10 @@ public class TestRestoreSnapshotHelper {
|
|||
HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class);
|
||||
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
|
||||
|
||||
HConnection hconnection = HConnectionTestingUtility.getMockedConnection(conf);
|
||||
Mockito.when(catalogTracker.getConnection()).thenReturn(hconnection);
|
||||
|
||||
SnapshotDescription sd = SnapshotDescription.newBuilder()
|
||||
.setName("snapshot").setTable(sourceTableName).build();
|
||||
|
||||
return new RestoreSnapshotHelper(conf, fs, catalogTracker, sd, snapshotDir,
|
||||
return new RestoreSnapshotHelper(conf, fs, sd, snapshotDir,
|
||||
htdClone, HTableDescriptor.getTableDir(rootDir, htdClone.getName()), monitor);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue