add rsgroup support for backup
This commit is contained in:
parent
72d5a46899
commit
fb742807ea
|
@ -42,7 +42,7 @@ public interface BackupAdmin extends Closeable {
|
||||||
* @return the backup Id
|
* @return the backup Id
|
||||||
*/
|
*/
|
||||||
|
|
||||||
String backupTables(final BackupRequest userRequest) throws IOException;
|
BackupInfo backupTables(final BackupRequest userRequest) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Restore backup
|
* Restore backup
|
||||||
|
|
|
@ -519,7 +519,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String backupTables(BackupRequest request) throws IOException {
|
public BackupInfo backupTables(BackupRequest request) throws IOException {
|
||||||
BackupType type = request.getBackupType();
|
BackupType type = request.getBackupType();
|
||||||
String targetRootDir = request.getTargetRootDir();
|
String targetRootDir = request.getTargetRootDir();
|
||||||
List<TableName> tableList = request.getTableList();
|
List<TableName> tableList = request.getTableList();
|
||||||
|
@ -602,7 +602,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
|
|
||||||
client.execute();
|
client.execute();
|
||||||
|
|
||||||
return backupId;
|
return client.backupInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<TableName> excludeNonExistingTables(List<TableName> tableList,
|
private List<TableName> excludeNonExistingTables(List<TableName> tableList,
|
||||||
|
|
|
@ -342,8 +342,9 @@ public final class BackupCommands {
|
||||||
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
||||||
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
||||||
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
||||||
String backupId = admin.backupTables(request);
|
BackupInfo backupInfo = admin.backupTables(request);
|
||||||
System.out.println("Backup session " + backupId + " finished. Status: SUCCESS");
|
System.out
|
||||||
|
.println("Backup session " + backupInfo.getBackupId() + " finished. Status: SUCCESS");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
System.out.println("Backup session finished. Status: FAILURE");
|
System.out.println("Backup session finished. Status: FAILURE");
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -20,8 +20,11 @@ package org.apache.hadoop.hbase.backup.impl;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -33,7 +36,9 @@ import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
|
||||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
@ -93,13 +98,36 @@ public class IncrementalBackupManager extends BackupManager {
|
||||||
}
|
}
|
||||||
newTimestamps = readRegionServerLastLogRollResult();
|
newTimestamps = readRegionServerLastLogRollResult();
|
||||||
|
|
||||||
logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode);
|
logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode,
|
||||||
|
getParticipatingServerNames(backupInfo.getTables()));
|
||||||
logList = excludeProcV2WALs(logList);
|
logList = excludeProcV2WALs(logList);
|
||||||
backupInfo.setIncrBackupFileList(logList);
|
backupInfo.setIncrBackupFileList(logList);
|
||||||
|
|
||||||
return newTimestamps;
|
return newTimestamps;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Set<String> getParticipatingServerNames(Set<TableName> tables) throws IOException {
|
||||||
|
Set<Address> participatingServers = new HashSet<>();
|
||||||
|
boolean flag = false;
|
||||||
|
for (TableName table : tables) {
|
||||||
|
RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table);
|
||||||
|
if (rsGroupInfo != null && !rsGroupInfo.getServers().isEmpty()) {
|
||||||
|
LOG.info("Participating servers for table {}, rsgroup Name: {} are: {}", table,
|
||||||
|
rsGroupInfo.getName(), rsGroupInfo.getServers());
|
||||||
|
participatingServers.addAll(rsGroupInfo.getServers());
|
||||||
|
} else {
|
||||||
|
LOG.warn(
|
||||||
|
"Rsgroup isn't available for table {}, all servers in the cluster will be participating ",
|
||||||
|
table);
|
||||||
|
flag = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flag
|
||||||
|
? new HashSet<>()
|
||||||
|
: participatingServers.stream().map(a -> a.toString()).collect(Collectors.toSet());
|
||||||
|
}
|
||||||
|
|
||||||
private List<String> excludeProcV2WALs(List<String> logList) {
|
private List<String> excludeProcV2WALs(List<String> logList) {
|
||||||
List<String> list = new ArrayList<>();
|
List<String> list = new ArrayList<>();
|
||||||
for (int i = 0; i < logList.size(); i++) {
|
for (int i = 0; i < logList.size(); i++) {
|
||||||
|
@ -126,8 +154,8 @@ public class IncrementalBackupManager extends BackupManager {
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
|
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
|
||||||
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode)
|
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode,
|
||||||
throws IOException {
|
Set<String> servers) throws IOException {
|
||||||
LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps
|
LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps
|
||||||
+ "\n newestTimestamps: " + newestTimestamps);
|
+ "\n newestTimestamps: " + newestTimestamps);
|
||||||
|
|
||||||
|
@ -160,7 +188,7 @@ public class IncrementalBackupManager extends BackupManager {
|
||||||
for (FileStatus rs : rss) {
|
for (FileStatus rs : rss) {
|
||||||
p = rs.getPath();
|
p = rs.getPath();
|
||||||
host = BackupUtils.parseHostNameFromLogFile(p);
|
host = BackupUtils.parseHostNameFromLogFile(p);
|
||||||
if (host == null) {
|
if (host == null || (!servers.isEmpty() && !servers.contains(host))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
FileStatus[] logs;
|
FileStatus[] logs;
|
||||||
|
@ -215,7 +243,7 @@ public class IncrementalBackupManager extends BackupManager {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
host = BackupUtils.parseHostFromOldLog(p);
|
host = BackupUtils.parseHostFromOldLog(p);
|
||||||
if (host == null) {
|
if (host == null || (!servers.isEmpty() && !servers.contains(host))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
currentLogTS = BackupUtils.getCreationTime(p);
|
currentLogTS = BackupUtils.getCreationTime(p);
|
||||||
|
|
|
@ -21,8 +21,11 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
|
import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
|
||||||
import org.apache.hadoop.hbase.net.Address;
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -83,6 +87,22 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
||||||
Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>();
|
Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>();
|
||||||
|
|
||||||
Map<TableName, Long> tableNameBackupInfoMap = new HashMap<>();
|
Map<TableName, Long> tableNameBackupInfoMap = new HashMap<>();
|
||||||
|
Set<Address> servers = new HashSet<>();
|
||||||
|
for (BackupInfo backupInfo : backups) {
|
||||||
|
for (TableName table : backupInfo.getTables()) {
|
||||||
|
RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table);
|
||||||
|
if (
|
||||||
|
rsGroupInfo != null && rsGroupInfo.getServers() != null
|
||||||
|
&& !rsGroupInfo.getServers().isEmpty()
|
||||||
|
) {
|
||||||
|
servers.addAll(rsGroupInfo.getServers());
|
||||||
|
} else {
|
||||||
|
servers.addAll(conn.getAdmin().getRegionServers().stream().map(s -> s.getAddress())
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (BackupInfo backupInfo : backups) {
|
for (BackupInfo backupInfo : backups) {
|
||||||
for (TableName table : backupInfo.getTables()) {
|
for (TableName table : backupInfo.getTables()) {
|
||||||
tableNameBackupInfoMap.putIfAbsent(table, backupInfo.getStartTs());
|
tableNameBackupInfoMap.putIfAbsent(table, backupInfo.getStartTs());
|
||||||
|
@ -90,7 +110,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
||||||
tableNameBackupInfoMap.put(table, backupInfo.getStartTs());
|
tableNameBackupInfoMap.put(table, backupInfo.getStartTs());
|
||||||
for (Map.Entry<String, Long> entry : backupInfo.getTableSetTimestampMap().get(table)
|
for (Map.Entry<String, Long> entry : backupInfo.getTableSetTimestampMap().get(table)
|
||||||
.entrySet()) {
|
.entrySet()) {
|
||||||
serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()), entry.getValue());
|
if (servers.contains(Address.fromString(entry.getKey()))) {
|
||||||
|
serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()),
|
||||||
|
entry.getValue());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,14 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -57,6 +61,9 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner;
|
||||||
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
|
||||||
import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
|
import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
|
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
|
||||||
|
@ -84,6 +91,15 @@ public class TestBackupBase {
|
||||||
protected static Configuration conf1;
|
protected static Configuration conf1;
|
||||||
protected static Configuration conf2;
|
protected static Configuration conf2;
|
||||||
|
|
||||||
|
protected static final int RSGROUP_RS_NUM = 5;
|
||||||
|
protected static final int NUM_REGIONSERVERS = 3;
|
||||||
|
protected static final String RSGROUP_NAME = "rsgroup1";
|
||||||
|
protected static final String RSGROUP_NAMESPACE = "rsgroup_ns";
|
||||||
|
protected static final TableName RSGROUP_TABLE_1 =
|
||||||
|
TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table1");
|
||||||
|
protected static final TableName RSGROUP_TABLE_2 =
|
||||||
|
TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table2");
|
||||||
|
|
||||||
protected static TableName table1 = TableName.valueOf("table1");
|
protected static TableName table1 = TableName.valueOf("table1");
|
||||||
protected static TableDescriptor table1Desc;
|
protected static TableDescriptor table1Desc;
|
||||||
protected static TableName table2 = TableName.valueOf("table2");
|
protected static TableName table2 = TableName.valueOf("table2");
|
||||||
|
@ -105,6 +121,7 @@ public class TestBackupBase {
|
||||||
|
|
||||||
protected static boolean autoRestoreOnFailure;
|
protected static boolean autoRestoreOnFailure;
|
||||||
protected static boolean useSecondCluster;
|
protected static boolean useSecondCluster;
|
||||||
|
protected static boolean enableRSgroup;
|
||||||
|
|
||||||
static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient {
|
static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient {
|
||||||
public IncrementalTableBackupClientForTest() {
|
public IncrementalTableBackupClientForTest() {
|
||||||
|
@ -257,6 +274,22 @@ public class TestBackupBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static RSGroupInfo addGroup(String groupName, int serverCount) throws IOException {
|
||||||
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
|
RSGroupInfo defaultInfo = admin.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
|
||||||
|
admin.addRSGroup(groupName);
|
||||||
|
Set<Address> set = new HashSet<>();
|
||||||
|
for (Address server : defaultInfo.getServers()) {
|
||||||
|
if (set.size() == serverCount) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
set.add(server);
|
||||||
|
}
|
||||||
|
admin.moveServersToRSGroup(set, groupName);
|
||||||
|
RSGroupInfo result = admin.getRSGroup(groupName);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
public static void setUpHelper() throws Exception {
|
public static void setUpHelper() throws Exception {
|
||||||
BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
||||||
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
||||||
|
@ -279,7 +312,13 @@ public class TestBackupBase {
|
||||||
|
|
||||||
// Set MultiWAL (with 2 default WAL files per RS)
|
// Set MultiWAL (with 2 default WAL files per RS)
|
||||||
conf1.set(WALFactory.WAL_PROVIDER, provider);
|
conf1.set(WALFactory.WAL_PROVIDER, provider);
|
||||||
|
if (enableRSgroup) {
|
||||||
|
conf1.setBoolean(RSGroupUtil.RS_GROUP_ENABLED, true);
|
||||||
|
TEST_UTIL.startMiniCluster(RSGROUP_RS_NUM + NUM_REGIONSERVERS);
|
||||||
|
addGroup(RSGROUP_NAME, RSGROUP_RS_NUM);
|
||||||
|
} else {
|
||||||
TEST_UTIL.startMiniCluster();
|
TEST_UTIL.startMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
if (useSecondCluster) {
|
if (useSecondCluster) {
|
||||||
conf2 = HBaseConfiguration.create(conf1);
|
conf2 = HBaseConfiguration.create(conf1);
|
||||||
|
@ -317,6 +356,7 @@ public class TestBackupBase {
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
TEST_UTIL = new HBaseTestingUtil();
|
TEST_UTIL = new HBaseTestingUtil();
|
||||||
conf1 = TEST_UTIL.getConfiguration();
|
conf1 = TEST_UTIL.getConfiguration();
|
||||||
|
enableRSgroup = false;
|
||||||
autoRestoreOnFailure = true;
|
autoRestoreOnFailure = true;
|
||||||
useSecondCluster = false;
|
useSecondCluster = false;
|
||||||
setUpHelper();
|
setUpHelper();
|
||||||
|
@ -342,6 +382,7 @@ public class TestBackupBase {
|
||||||
}
|
}
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
TEST_UTIL.shutdownMiniMapReduceCluster();
|
TEST_UTIL.shutdownMiniMapReduceCluster();
|
||||||
|
enableRSgroup = false;
|
||||||
autoRestoreOnFailure = true;
|
autoRestoreOnFailure = true;
|
||||||
useSecondCluster = false;
|
useSecondCluster = false;
|
||||||
}
|
}
|
||||||
|
@ -366,16 +407,16 @@ public class TestBackupBase {
|
||||||
return request;
|
return request;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String backupTables(BackupType type, List<TableName> tables, String path)
|
protected BackupInfo backupTables(BackupType type, List<TableName> tables, String path)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Connection conn = null;
|
Connection conn = null;
|
||||||
BackupAdmin badmin = null;
|
BackupAdmin badmin = null;
|
||||||
String backupId;
|
BackupInfo backupInfo;
|
||||||
try {
|
try {
|
||||||
conn = ConnectionFactory.createConnection(conf1);
|
conn = ConnectionFactory.createConnection(conf1);
|
||||||
badmin = new BackupAdminImpl(conn);
|
badmin = new BackupAdminImpl(conn);
|
||||||
BackupRequest request = createBackupRequest(type, tables, path);
|
BackupRequest request = createBackupRequest(type, tables, path);
|
||||||
backupId = badmin.backupTables(request);
|
backupInfo = badmin.backupTables(request);
|
||||||
} finally {
|
} finally {
|
||||||
if (badmin != null) {
|
if (badmin != null) {
|
||||||
badmin.close();
|
badmin.close();
|
||||||
|
@ -384,14 +425,14 @@ public class TestBackupBase {
|
||||||
conn.close();
|
conn.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return backupId;
|
return backupInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String fullTableBackup(List<TableName> tables) throws IOException {
|
protected BackupInfo fullTableBackup(List<TableName> tables) throws IOException {
|
||||||
return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String incrementalTableBackup(List<TableName> tables) throws IOException {
|
protected BackupInfo incrementalTableBackup(List<TableName> tables) throws IOException {
|
||||||
return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -439,6 +480,23 @@ public class TestBackupBase {
|
||||||
table.close();
|
table.close();
|
||||||
ha.close();
|
ha.close();
|
||||||
conn.close();
|
conn.close();
|
||||||
|
|
||||||
|
if (enableRSgroup) {
|
||||||
|
ha.createNamespace(NamespaceDescriptor.create(RSGROUP_NAMESPACE)
|
||||||
|
.addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, RSGROUP_NAME).build());
|
||||||
|
|
||||||
|
ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build());
|
||||||
|
table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_1);
|
||||||
|
loadTable(table);
|
||||||
|
table.close();
|
||||||
|
|
||||||
|
ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_2)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build());
|
||||||
|
table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_2);
|
||||||
|
loadTable(table);
|
||||||
|
table.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected boolean checkSucceeded(String backupId) throws IOException {
|
protected boolean checkSucceeded(String backupId) throws IOException {
|
||||||
|
@ -461,7 +519,7 @@ public class TestBackupBase {
|
||||||
return status.getState() == BackupState.FAILED;
|
return status.getState() == BackupState.FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
private BackupInfo getBackupInfo(String backupId) throws IOException {
|
protected BackupInfo getBackupInfo(String backupId) throws IOException {
|
||||||
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
|
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
|
||||||
BackupInfo status = table.readBackupInfo(backupId);
|
BackupInfo status = table.readBackupInfo(backupId);
|
||||||
return status;
|
return status;
|
||||||
|
@ -498,6 +556,26 @@ public class TestBackupBase {
|
||||||
return logFiles;
|
return logFiles;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected Set<Address> getRsgroupServers(String rsgroupName) throws IOException {
|
||||||
|
RSGroupInfo rsGroupInfo = TEST_UTIL.getAdmin().getRSGroup(rsgroupName);
|
||||||
|
if (
|
||||||
|
rsGroupInfo != null && rsGroupInfo.getServers() != null && !rsGroupInfo.getServers().isEmpty()
|
||||||
|
) {
|
||||||
|
return new HashSet<>(rsGroupInfo.getServers());
|
||||||
|
}
|
||||||
|
return new HashSet<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void checkIfWALFilesBelongToRsgroup(List<String> walFiles, String rsgroupName)
|
||||||
|
throws IOException {
|
||||||
|
for (String file : walFiles) {
|
||||||
|
Address walServerAddress =
|
||||||
|
Address.fromString(BackupUtils.parseHostNameFromLogFile(new Path(file)));
|
||||||
|
assertTrue("Backed WAL files should be from RSGroup " + rsgroupName,
|
||||||
|
getRsgroupServers(rsgroupName).contains(walServerAddress));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected void dumpBackupDir() throws IOException {
|
protected void dumpBackupDir() throws IOException {
|
||||||
// Dump Backup Dir
|
// Dump Backup Dir
|
||||||
FileSystem fs = FileSystem.get(conf1);
|
FileSystem fs = FileSystem.get(conf1);
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class TestBackupDelete extends TestBackupBase {
|
||||||
public void testBackupDelete() throws Exception {
|
public void testBackupDelete() throws Exception {
|
||||||
LOG.info("test backup delete on a single table with data");
|
LOG.info("test backup delete on a single table with data");
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
String[] backupIds = new String[] { backupId };
|
String[] backupIds = new String[] { backupId };
|
||||||
|
@ -85,7 +85,7 @@ public class TestBackupDelete extends TestBackupBase {
|
||||||
public void testBackupDeleteCommand() throws Exception {
|
public void testBackupDeleteCommand() throws Exception {
|
||||||
LOG.info("test backup delete on a single table with data: command-line");
|
LOG.info("test backup delete on a single table with data: command-line");
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||||
|
@ -117,7 +117,7 @@ public class TestBackupDelete extends TestBackupBase {
|
||||||
return System.currentTimeMillis() - 2 * 24 * 3600 * 1000;
|
return System.currentTimeMillis() - 2 * 24 * 3600 * 1000;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
EnvironmentEdgeManager.reset();
|
EnvironmentEdgeManager.reset();
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class TestBackupDeleteRestore extends TestBackupBase {
|
||||||
LOG.info("test full restore on a single table empty table");
|
LOG.info("test full restore on a single table empty table");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
int numRows = TEST_UTIL.countRows(table1);
|
int numRows = TEST_UTIL.countRows(table1);
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
LOG.info("test repair backup delete on a single table with data and failures " + failures[0]);
|
LOG.info("test repair backup delete on a single table with data and failures " + failures[0]);
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
String[] backupIds = new String[] { backupId };
|
String[] backupIds = new String[] { backupId };
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class TestBackupDescribe extends TestBackupBase {
|
||||||
LOG.info("test backup describe on a single table with data: command-line");
|
LOG.info("test backup describe on a single table with data: command-line");
|
||||||
|
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
|
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
|
@ -0,0 +1,129 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtil;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
|
import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
|
||||||
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
@Category(MediumTests.class)
|
||||||
|
public class TestBackupLogCleanerWithRsgroup extends TestBackupBase {
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestBackupLogCleanerWithRsgroup.class);
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleanerWithRsgroup.class);
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
TEST_UTIL = new HBaseTestingUtil();
|
||||||
|
conf1 = TEST_UTIL.getConfiguration();
|
||||||
|
enableRSgroup = true;
|
||||||
|
autoRestoreOnFailure = true;
|
||||||
|
useSecondCluster = false;
|
||||||
|
setUpHelper();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBackupLogCleanerRsgroup() throws Exception {
|
||||||
|
// #1 - create full backup for all tables
|
||||||
|
LOG.info("create full backup image for all tables");
|
||||||
|
List<TableName> tableSetFullList = Lists.newArrayList(RSGROUP_TABLE_1);
|
||||||
|
|
||||||
|
try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
|
||||||
|
// Verify that we have no backup sessions yet
|
||||||
|
assertFalse(systemTable.hasBackupSessions());
|
||||||
|
|
||||||
|
List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
|
||||||
|
BackupLogCleaner cleaner = new BackupLogCleaner();
|
||||||
|
cleaner.setConf(TEST_UTIL.getConfiguration());
|
||||||
|
Map<String, Object> params = new HashMap<>();
|
||||||
|
params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster());
|
||||||
|
cleaner.init(params);
|
||||||
|
cleaner.setConf(TEST_UTIL.getConfiguration());
|
||||||
|
|
||||||
|
Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
|
||||||
|
// We can delete all files because we do not have yet recorded backup sessions
|
||||||
|
assertTrue(Iterables.size(deletable) == walFiles.size());
|
||||||
|
String backupIdFull = fullTableBackup(tableSetFullList).getBackupId();
|
||||||
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
// Check one more time
|
||||||
|
deletable = cleaner.getDeletableFiles(walFiles);
|
||||||
|
assertTrue(Iterables.size(deletable) == walFiles.size());
|
||||||
|
|
||||||
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
|
// #2 - insert some data to table
|
||||||
|
Table t1 = conn.getTable(RSGROUP_TABLE_1);
|
||||||
|
Put p1;
|
||||||
|
Random rnd = new Random();
|
||||||
|
for (int i = 0; i < 5000; i++) {
|
||||||
|
p1 = new Put(Bytes.toBytes(1000000 + rnd.nextInt(9000000)));
|
||||||
|
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
|
||||||
|
t1.put(p1);
|
||||||
|
}
|
||||||
|
t1.close();
|
||||||
|
|
||||||
|
List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
|
||||||
|
// New list of wal files is greater than the previous one,
|
||||||
|
// because new wal per RS have been opened after full backup
|
||||||
|
assertTrue(walFiles.size() < newWalFiles.size());
|
||||||
|
|
||||||
|
deletable = cleaner.getDeletableFiles(newWalFiles);
|
||||||
|
assertTrue(newWalFiles.size() > Iterables.size(deletable));
|
||||||
|
|
||||||
|
// #3 - incremental backup
|
||||||
|
List<TableName> tableSetIncList = Lists.newArrayList(RSGROUP_TABLE_1);
|
||||||
|
String backupIdIncMultiple =
|
||||||
|
backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId();
|
||||||
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
|
||||||
|
deletable = cleaner.getDeletableFiles(newWalFiles);
|
||||||
|
assertTrue(Iterables.size(deletable) == newWalFiles.size());
|
||||||
|
|
||||||
|
conn.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -62,7 +62,7 @@ public class TestBackupMerge extends TestBackupBase {
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ public class TestBackupMerge extends TestBackupBase {
|
||||||
// #3 - incremental backup for multiple tables
|
// #3 - incremental backup for multiple tables
|
||||||
tables = Lists.newArrayList(table1, table2);
|
tables = Lists.newArrayList(table1, table2);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple = client.backupTables(request);
|
String backupIdIncMultiple = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ public class TestBackupMerge extends TestBackupBase {
|
||||||
|
|
||||||
// #3 - incremental backup for multiple tables
|
// #3 - incremental backup for multiple tables
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple2 = client.backupTables(request);
|
String backupIdIncMultiple2 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
||||||
|
|
||||||
try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
|
try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
Admin admin = conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdmin client = new BackupAdminImpl(conn);
|
BackupAdmin client = new BackupAdminImpl(conn);
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
// #2 - insert some data to table table1
|
// #2 - insert some data to table table1
|
||||||
Table t1 = conn.getTable(table1);
|
Table t1 = conn.getTable(table1);
|
||||||
|
@ -78,7 +78,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
// #3 - incremental backup for table1
|
// #3 - incremental backup for table1
|
||||||
tables = Lists.newArrayList(table1);
|
tables = Lists.newArrayList(table1);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdInc1 = client.backupTables(request);
|
String backupIdInc1 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdInc1));
|
assertTrue(checkSucceeded(backupIdInc1));
|
||||||
// #4 - insert some data to table table2
|
// #4 - insert some data to table table2
|
||||||
Table t2 = conn.getTable(table2);
|
Table t2 = conn.getTable(table2);
|
||||||
|
@ -91,7 +91,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
// #5 - incremental backup for table1, table2
|
// #5 - incremental backup for table1, table2
|
||||||
tables = Lists.newArrayList(table1, table2);
|
tables = Lists.newArrayList(table1, table2);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdInc2 = client.backupTables(request);
|
String backupIdInc2 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdInc2));
|
assertTrue(checkSucceeded(backupIdInc2));
|
||||||
// #6 - insert some data to table table1
|
// #6 - insert some data to table table1
|
||||||
t1 = conn.getTable(table1);
|
t1 = conn.getTable(table1);
|
||||||
|
@ -103,7 +103,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
// #7 - incremental backup for table1
|
// #7 - incremental backup for table1
|
||||||
tables = Lists.newArrayList(table1);
|
tables = Lists.newArrayList(table1);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdInc3 = client.backupTables(request);
|
String backupIdInc3 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdInc3));
|
assertTrue(checkSucceeded(backupIdInc3));
|
||||||
// #8 - insert some data to table table2
|
// #8 - insert some data to table table2
|
||||||
t2 = conn.getTable(table2);
|
t2 = conn.getTable(table2);
|
||||||
|
@ -115,17 +115,17 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
// #9 - incremental backup for table1, table2
|
// #9 - incremental backup for table1, table2
|
||||||
tables = Lists.newArrayList(table1, table2);
|
tables = Lists.newArrayList(table1, table2);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdInc4 = client.backupTables(request);
|
String backupIdInc4 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdInc4));
|
assertTrue(checkSucceeded(backupIdInc4));
|
||||||
// #10 full backup for table3
|
// #10 full backup for table3
|
||||||
tables = Lists.newArrayList(table3);
|
tables = Lists.newArrayList(table3);
|
||||||
request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull2 = client.backupTables(request);
|
String backupIdFull2 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdFull2));
|
assertTrue(checkSucceeded(backupIdFull2));
|
||||||
// #11 - incremental backup for table3
|
// #11 - incremental backup for table3
|
||||||
tables = Lists.newArrayList(table3);
|
tables = Lists.newArrayList(table3);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdInc5 = client.backupTables(request);
|
String backupIdInc5 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdInc5));
|
assertTrue(checkSucceeded(backupIdInc5));
|
||||||
LOG.error("Delete backupIdInc2");
|
LOG.error("Delete backupIdInc2");
|
||||||
client.deleteBackups(new String[] { backupIdInc2 });
|
client.deleteBackups(new String[] { backupIdInc2 });
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class TestBackupShowHistory extends TestBackupBase {
|
||||||
LOG.info("test backup history on a single table with data");
|
LOG.info("test backup history on a single table with data");
|
||||||
|
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ public class TestBackupShowHistory extends TestBackupBase {
|
||||||
assertTrue(output.indexOf(backupId) > 0);
|
assertTrue(output.indexOf(backupId) > 0);
|
||||||
|
|
||||||
tableList = Lists.newArrayList(table2);
|
tableList = Lists.newArrayList(table2);
|
||||||
String backupId2 = fullTableBackup(tableList);
|
String backupId2 = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId2));
|
assertTrue(checkSucceeded(backupId2));
|
||||||
LOG.info("backup complete: " + table2);
|
LOG.info("backup complete: " + table2);
|
||||||
BackupInfo.Filter tableNameFilter = image -> {
|
BackupInfo.Filter tableNameFilter = image -> {
|
||||||
|
|
|
@ -53,7 +53,7 @@ public class TestBackupStatusProgress extends TestBackupBase {
|
||||||
LOG.info("test backup status/progress on a single table with data");
|
LOG.info("test backup status/progress on a single table with data");
|
||||||
|
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ public class TestBackupStatusProgress extends TestBackupBase {
|
||||||
LOG.info("test backup status/progress on a single table with data: command-line");
|
LOG.info("test backup status/progress on a single table with data: command-line");
|
||||||
|
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("test full restore on a single table empty table");
|
LOG.info("test full restore on a single table empty table");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
|
@ -76,7 +76,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("test full restore on a single table empty table: command-line");
|
LOG.info("test full restore on a single table empty table: command-line");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
// restore <backup_root_path> <backup_id> <tables> [tableMapping]
|
// restore <backup_root_path> <backup_id> <tables> [tableMapping]
|
||||||
|
@ -97,7 +97,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("test full restore on a single table: command-line, check only");
|
LOG.info("test full restore on a single table: command-line, check only");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
// restore <backup_root_path> <backup_id> <tables> [tableMapping]
|
// restore <backup_root_path> <backup_id> <tables> [tableMapping]
|
||||||
|
@ -119,7 +119,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreMultiple() throws Exception {
|
public void testFullRestoreMultiple() throws Exception {
|
||||||
LOG.info("create full backup image on multiple tables");
|
LOG.info("create full backup image on multiple tables");
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
||||||
|
@ -143,7 +143,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreMultipleCommand() throws Exception {
|
public void testFullRestoreMultipleCommand() throws Exception {
|
||||||
LOG.info("create full backup image on multiple tables: command-line");
|
LOG.info("create full backup image on multiple tables: command-line");
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
||||||
|
@ -172,7 +172,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreSingleOverwrite() throws Exception {
|
public void testFullRestoreSingleOverwrite() throws Exception {
|
||||||
LOG.info("test full restore on a single table empty table");
|
LOG.info("test full restore on a single table empty table");
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
|
@ -191,7 +191,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreSingleOverwriteCommand() throws Exception {
|
public void testFullRestoreSingleOverwriteCommand() throws Exception {
|
||||||
LOG.info("test full restore on a single table empty table: command-line");
|
LOG.info("test full restore on a single table empty table: command-line");
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
TableName[] tableset = new TableName[] { table1 };
|
TableName[] tableset = new TableName[] { table1 };
|
||||||
|
@ -216,7 +216,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("create full backup image on multiple tables");
|
LOG.info("create full backup image on multiple tables");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
||||||
|
@ -234,7 +234,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("create full backup image on multiple tables: command-line");
|
LOG.info("create full backup image on multiple tables: command-line");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
||||||
|
@ -259,7 +259,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreSingleDNE() throws Exception {
|
public void testFullRestoreSingleDNE() throws Exception {
|
||||||
LOG.info("test restore fails on a single table that does not exist");
|
LOG.info("test restore fails on a single table that does not exist");
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
|
@ -279,7 +279,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
public void testFullRestoreSingleDNECommand() throws Exception {
|
public void testFullRestoreSingleDNECommand() throws Exception {
|
||||||
LOG.info("test restore fails on a single table that does not exist: command-line");
|
LOG.info("test restore fails on a single table that does not exist: command-line");
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
|
@ -302,7 +302,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("test restore fails on multiple tables that do not exist");
|
LOG.info("test restore fails on multiple tables that do not exist");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset =
|
TableName[] restore_tableset =
|
||||||
|
@ -322,7 +322,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
LOG.info("test restore fails on multiple tables that do not exist: command-line");
|
LOG.info("test restore fails on multiple tables that do not exist: command-line");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table2, table3);
|
List<TableName> tables = Lists.newArrayList(table2, table3);
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
TableName[] restore_tableset =
|
TableName[] restore_tableset =
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -98,7 +99,7 @@ public class TestIncrementalBackup extends TestBackupBase {
|
||||||
Admin admin = conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
// #2 - insert some data to table
|
// #2 - insert some data to table
|
||||||
|
@ -146,8 +147,11 @@ public class TestIncrementalBackup extends TestBackupBase {
|
||||||
// #3 - incremental backup for multiple tables
|
// #3 - incremental backup for multiple tables
|
||||||
tables = Lists.newArrayList(table1, table2);
|
tables = Lists.newArrayList(table1, table2);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple = client.backupTables(request);
|
BackupInfo backupInfoIncMultiple = client.backupTables(request);
|
||||||
|
String backupIdIncMultiple = backupInfoIncMultiple.getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(),
|
||||||
|
RSGroupInfo.DEFAULT_GROUP);
|
||||||
|
|
||||||
// add column family f2 to table1
|
// add column family f2 to table1
|
||||||
// drop column family f3
|
// drop column family f3
|
||||||
|
@ -166,8 +170,11 @@ public class TestIncrementalBackup extends TestBackupBase {
|
||||||
|
|
||||||
// #4 - additional incremental backup for multiple tables
|
// #4 - additional incremental backup for multiple tables
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple2 = client.backupTables(request);
|
BackupInfo backupInfoIncMultiple2 = client.backupTables(request);
|
||||||
|
String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
||||||
|
checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(),
|
||||||
|
RSGroupInfo.DEFAULT_GROUP);
|
||||||
|
|
||||||
// #5 - restore full backup for all tables
|
// #5 - restore full backup for all tables
|
||||||
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
|
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
|
||||||
// #3 - incremental backup for table1
|
// #3 - incremental backup for table1
|
||||||
tables = Lists.newArrayList(table1);
|
tables = Lists.newArrayList(table1);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple = client.backupTables(request);
|
String backupIdIncMultiple = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
|
||||||
// #4 - restore full backup for all tables, without overwrite
|
// #4 - restore full backup for all tables, without overwrite
|
||||||
|
|
|
@ -240,7 +240,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
|
||||||
// #3 - incremental backup for multiple tables
|
// #3 - incremental backup for multiple tables
|
||||||
tables = Lists.newArrayList(table1, table2);
|
tables = Lists.newArrayList(table1, table2);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple = client.backupTables(request);
|
String backupIdIncMultiple = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
|
||||||
|
|
||||||
// #3 - incremental backup for multiple tables
|
// #3 - incremental backup for multiple tables
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple2 = client.backupTables(request);
|
String backupIdIncMultiple2 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
||||||
// #4 Merge backup images with failures
|
// #4 Merge backup images with failures
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
// #3 - incremental backup for table1
|
// #3 - incremental backup for table1
|
||||||
tables = Lists.newArrayList(table1);
|
tables = Lists.newArrayList(table1);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple = client.backupTables(request);
|
String backupIdIncMultiple = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
// #4 bulk load again
|
// #4 bulk load again
|
||||||
LOG.debug("bulk loading into " + testName);
|
LOG.debug("bulk loading into " + testName);
|
||||||
|
@ -110,7 +110,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
// #5 - incremental backup for table1
|
// #5 - incremental backup for table1
|
||||||
tables = Lists.newArrayList(table1);
|
tables = Lists.newArrayList(table1);
|
||||||
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdIncMultiple1 = client.backupTables(request);
|
String backupIdIncMultiple1 = client.backupTables(request).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple1));
|
assertTrue(checkSucceeded(backupIdIncMultiple1));
|
||||||
// Delete all data in table1
|
// Delete all data in table1
|
||||||
TEST_UTIL.deleteTableData(table1);
|
TEST_UTIL.deleteTableData(table1);
|
||||||
|
@ -125,7 +125,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1);
|
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1);
|
||||||
request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
|
||||||
backupIdFull = client.backupTables(request);
|
backupIdFull = client.backupTables(request).getBackupId();
|
||||||
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||||
Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>,
|
Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>,
|
||||||
List<byte[]>> pair = table.readBulkloadRows(tables);
|
List<byte[]>> pair = table.readBulkloadRows(tables);
|
||||||
|
|
|
@ -95,7 +95,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase {
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,237 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtil;
|
||||||
|
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.junit.runners.Parameterized;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
@Category(LargeTests.class)
|
||||||
|
@RunWith(Parameterized.class)
|
||||||
|
public class TestIncrementalBackupWithRsgroup extends TestBackupBase {
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestIncrementalBackupWithRsgroup.class);
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupWithRsgroup.class);
|
||||||
|
|
||||||
|
public TestIncrementalBackupWithRsgroup(Boolean b) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Parameterized.Parameters
|
||||||
|
public static Collection<Object[]> data() {
|
||||||
|
List<Object[]> params = new ArrayList<>();
|
||||||
|
params.add(new Object[] { Boolean.TRUE });
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
TEST_UTIL = new HBaseTestingUtil();
|
||||||
|
conf1 = TEST_UTIL.getConfiguration();
|
||||||
|
enableRSgroup = true;
|
||||||
|
autoRestoreOnFailure = true;
|
||||||
|
useSecondCluster = false;
|
||||||
|
setUpHelper();
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement all test cases in 1 test since incremental
|
||||||
|
// backup/restore has dependencies
|
||||||
|
@Test
|
||||||
|
public void TestIncBackupRestore() throws Exception {
|
||||||
|
int ADD_ROWS = 99;
|
||||||
|
|
||||||
|
// #1 - create full backup for all tables
|
||||||
|
LOG.info("create full backup image for all tables");
|
||||||
|
List<TableName> tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2);
|
||||||
|
final byte[] fam3Name = Bytes.toBytes("f3");
|
||||||
|
final byte[] mobName = Bytes.toBytes("mob");
|
||||||
|
|
||||||
|
TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName))
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
|
||||||
|
.setMobThreshold(5L).build())
|
||||||
|
.build();
|
||||||
|
TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
|
||||||
|
|
||||||
|
try (Connection conn = ConnectionFactory.createConnection(conf1)) {
|
||||||
|
int NB_ROWS_FAM3 = 6;
|
||||||
|
insertIntoTable(conn, RSGROUP_TABLE_1, fam3Name, 3, NB_ROWS_FAM3).close();
|
||||||
|
insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_FAM3).close();
|
||||||
|
Admin admin = conn.getAdmin();
|
||||||
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
String backupIdFull = client.backupTables(request).getBackupId();
|
||||||
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
|
|
||||||
|
// #2 - insert some data to table
|
||||||
|
Table t1 = insertIntoTable(conn, RSGROUP_TABLE_1, famName, 1, ADD_ROWS);
|
||||||
|
LOG.debug("writing " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1);
|
||||||
|
Assert.assertEquals(HBaseTestingUtil.countRows(t1),
|
||||||
|
NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
|
||||||
|
LOG.debug("written " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1);
|
||||||
|
// additionally, insert rows to MOB cf
|
||||||
|
int NB_ROWS_MOB = 111;
|
||||||
|
insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_MOB);
|
||||||
|
LOG.debug("written " + NB_ROWS_MOB + " rows to " + RSGROUP_TABLE_1 + " to Mob enabled CF");
|
||||||
|
t1.close();
|
||||||
|
Assert.assertEquals(HBaseTestingUtil.countRows(t1),
|
||||||
|
NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
|
||||||
|
Table t2 = conn.getTable(RSGROUP_TABLE_2);
|
||||||
|
Put p2;
|
||||||
|
for (int i = 0; i < 5; i++) {
|
||||||
|
p2 = new Put(Bytes.toBytes("row-t2" + i));
|
||||||
|
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
|
||||||
|
t2.put(p2);
|
||||||
|
}
|
||||||
|
Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
|
||||||
|
t2.close();
|
||||||
|
LOG.debug("written " + 5 + " rows to " + RSGROUP_TABLE_2);
|
||||||
|
// split RSGROUP_TABLE_1
|
||||||
|
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||||
|
List<HRegion> regions = cluster.getRegions(RSGROUP_TABLE_1);
|
||||||
|
byte[] name = regions.get(0).getRegionInfo().getRegionName();
|
||||||
|
long startSplitTime = EnvironmentEdgeManager.currentTime();
|
||||||
|
try {
|
||||||
|
admin.splitRegionAsync(name).get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
// although split fail, this may not affect following check in current API,
|
||||||
|
// exception will be thrown.
|
||||||
|
LOG.debug("region is not splittable, because " + e);
|
||||||
|
}
|
||||||
|
while (!admin.isTableAvailable(RSGROUP_TABLE_1)) {
|
||||||
|
Thread.sleep(100);
|
||||||
|
}
|
||||||
|
long endSplitTime = EnvironmentEdgeManager.currentTime();
|
||||||
|
// split finished
|
||||||
|
LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
|
||||||
|
|
||||||
|
// #3 - incremental backup for multiple tables
|
||||||
|
tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2);
|
||||||
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
|
BackupInfo backupInfoIncMultiple = client.backupTables(request);
|
||||||
|
String backupIdIncMultiple = backupInfoIncMultiple.getBackupId();
|
||||||
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
|
checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), RSGROUP_NAME);
|
||||||
|
|
||||||
|
// add column family f2 to RSGROUP_TABLE_1
|
||||||
|
// drop column family f3
|
||||||
|
final byte[] fam2Name = Bytes.toBytes("f2");
|
||||||
|
newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
|
||||||
|
.build();
|
||||||
|
TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
|
||||||
|
|
||||||
|
int NB_ROWS_FAM2 = 7;
|
||||||
|
Table t3 = insertIntoTable(conn, RSGROUP_TABLE_1, fam2Name, 2, NB_ROWS_FAM2);
|
||||||
|
t3.close();
|
||||||
|
|
||||||
|
// Wait for 5 sec to make sure that old WALs were deleted
|
||||||
|
Thread.sleep(5000);
|
||||||
|
|
||||||
|
// #4 - additional incremental backup for multiple tables
|
||||||
|
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
|
||||||
|
BackupInfo backupInfoIncMultiple2 = client.backupTables(request);
|
||||||
|
String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId();
|
||||||
|
assertTrue(checkSucceeded(backupIdIncMultiple2));
|
||||||
|
checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), RSGROUP_NAME);
|
||||||
|
|
||||||
|
// #5 - restore full backup for all tables
|
||||||
|
TableName[] tablesRestoreFull = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 };
|
||||||
|
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
|
||||||
|
|
||||||
|
LOG.debug("Restoring full " + backupIdFull);
|
||||||
|
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
|
||||||
|
tablesRestoreFull, tablesMapFull, true));
|
||||||
|
|
||||||
|
// #6.1 - check tables for full restore
|
||||||
|
Admin hAdmin = TEST_UTIL.getAdmin();
|
||||||
|
assertTrue(hAdmin.tableExists(table1_restore));
|
||||||
|
assertTrue(hAdmin.tableExists(table2_restore));
|
||||||
|
hAdmin.close();
|
||||||
|
|
||||||
|
// #6.2 - checking row count of tables for full restore
|
||||||
|
Table hTable = conn.getTable(table1_restore);
|
||||||
|
Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
|
||||||
|
hTable.close();
|
||||||
|
|
||||||
|
hTable = conn.getTable(table2_restore);
|
||||||
|
Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
|
||||||
|
hTable.close();
|
||||||
|
|
||||||
|
// #7 - restore incremental backup for multiple tables, with overwrite
|
||||||
|
TableName[] tablesRestoreIncMultiple = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 };
|
||||||
|
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
|
||||||
|
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
|
||||||
|
tablesRestoreIncMultiple, tablesMapIncMultiple, true));
|
||||||
|
hTable = conn.getTable(table1_restore);
|
||||||
|
|
||||||
|
LOG.debug("After incremental restore: " + hTable.getDescriptor());
|
||||||
|
int countFamName = TEST_UTIL.countRows(hTable, famName);
|
||||||
|
LOG.debug("f1 has " + countFamName + " rows");
|
||||||
|
Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
|
||||||
|
|
||||||
|
int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
|
||||||
|
LOG.debug("f2 has " + countFam2Name + " rows");
|
||||||
|
Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
|
||||||
|
|
||||||
|
int countMobName = TEST_UTIL.countRows(hTable, mobName);
|
||||||
|
LOG.debug("mob has " + countMobName + " rows");
|
||||||
|
Assert.assertEquals(countMobName, NB_ROWS_MOB);
|
||||||
|
hTable.close();
|
||||||
|
|
||||||
|
hTable = conn.getTable(table2_restore);
|
||||||
|
Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
|
||||||
|
hTable.close();
|
||||||
|
admin.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -116,7 +116,8 @@ public class TestRemoteBackup extends TestBackupBase {
|
||||||
|
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
String backupId =
|
String backupId =
|
||||||
backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
|
backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR)
|
||||||
|
.getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
|
|
||||||
LOG.info("backup complete " + backupId);
|
LOG.info("backup complete " + backupId);
|
||||||
|
|
|
@ -69,7 +69,8 @@ public class TestRemoteRestore extends TestBackupBase {
|
||||||
public void testFullRestoreRemote() throws Exception {
|
public void testFullRestoreRemote() throws Exception {
|
||||||
LOG.info("test remote full backup on a single table");
|
LOG.info("test remote full backup on a single table");
|
||||||
String backupId =
|
String backupId =
|
||||||
backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
|
backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR)
|
||||||
|
.getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
TableName[] tableset = new TableName[] { table1 };
|
TableName[] tableset = new TableName[] { table1 };
|
||||||
TableName[] tablemap = new TableName[] { table1_restore };
|
TableName[] tablemap = new TableName[] { table1_restore };
|
||||||
|
@ -90,7 +91,8 @@ public class TestRemoteRestore extends TestBackupBase {
|
||||||
public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception {
|
public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception {
|
||||||
LOG.info("test remote full backup on a single table with alternate restore output dir");
|
LOG.info("test remote full backup on a single table with alternate restore output dir");
|
||||||
String backupId =
|
String backupId =
|
||||||
backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
|
backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR)
|
||||||
|
.getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
TableName[] tableset = new TableName[] { table1 };
|
TableName[] tableset = new TableName[] { table1 };
|
||||||
TableName[] tablemap = new TableName[] { table1_restore };
|
TableName[] tablemap = new TableName[] { table1_restore };
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase {
|
||||||
public void testRepairBackupDelete() throws Exception {
|
public void testRepairBackupDelete() throws Exception {
|
||||||
LOG.info("test repair backup delete on a single table with data");
|
LOG.info("test repair backup delete on a single table with data");
|
||||||
List<TableName> tableList = Lists.newArrayList(table1);
|
List<TableName> tableList = Lists.newArrayList(table1);
|
||||||
String backupId = fullTableBackup(tableList);
|
String backupId = fullTableBackup(tableList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
String[] backupIds = new String[] { backupId };
|
String[] backupIds = new String[] { backupId };
|
||||||
|
|
|
@ -47,7 +47,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
|
||||||
@Test
|
@Test
|
||||||
public void testFullRestoreSingleEmpty() throws Exception {
|
public void testFullRestoreSingleEmpty() throws Exception {
|
||||||
LOG.info("test full restore on a single table empty table");
|
LOG.info("test full restore on a single table empty table");
|
||||||
String backupId = fullTableBackup(toList(table1.getNameAsString()));
|
String backupId = fullTableBackup(toList(table1.getNameAsString())).getBackupId();
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
TableName[] tableset = new TableName[] { table1 };
|
TableName[] tableset = new TableName[] { table1 };
|
||||||
TableName[] tablemap = new TableName[] { table1_restore };
|
TableName[] tablemap = new TableName[] { table1_restore };
|
||||||
|
@ -67,7 +67,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
|
||||||
LOG.info("create full backup image on multiple tables");
|
LOG.info("create full backup image on multiple tables");
|
||||||
|
|
||||||
List<TableName> tables = toList(table2.getNameAsString(), table3.getNameAsString());
|
List<TableName> tables = toList(table2.getNameAsString(), table3.getNameAsString());
|
||||||
String backupId = fullTableBackup(tables);
|
String backupId = fullTableBackup(tables).getBackupId();
|
||||||
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
TableName[] restore_tableset = new TableName[] { table2, table3 };
|
||||||
TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
|
TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
|
||||||
getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
||||||
|
|
|
@ -83,7 +83,7 @@ public class TestBackupLogCleaner extends TestBackupBase {
|
||||||
// We can delete all files because we do not have yet recorded backup sessions
|
// We can delete all files because we do not have yet recorded backup sessions
|
||||||
assertTrue(size == walFiles.size());
|
assertTrue(size == walFiles.size());
|
||||||
|
|
||||||
String backupIdFull = fullTableBackup(tableSetFullList);
|
String backupIdFull = fullTableBackup(tableSetFullList).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdFull));
|
assertTrue(checkSucceeded(backupIdFull));
|
||||||
// Check one more time
|
// Check one more time
|
||||||
deletable = cleaner.getDeletableFiles(walFiles);
|
deletable = cleaner.getDeletableFiles(walFiles);
|
||||||
|
@ -123,7 +123,7 @@ public class TestBackupLogCleaner extends TestBackupBase {
|
||||||
|
|
||||||
List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
|
List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
|
||||||
String backupIdIncMultiple =
|
String backupIdIncMultiple =
|
||||||
backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
|
backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId();
|
||||||
assertTrue(checkSucceeded(backupIdIncMultiple));
|
assertTrue(checkSucceeded(backupIdIncMultiple));
|
||||||
deletable = cleaner.getDeletableFiles(newWalFiles);
|
deletable = cleaner.getDeletableFiles(newWalFiles);
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.NavigableSet;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.SortedSet;
|
import java.util.SortedSet;
|
||||||
|
@ -40,7 +41,7 @@ public class RSGroupInfo {
|
||||||
|
|
||||||
private final String name;
|
private final String name;
|
||||||
// Keep servers in a sorted set so has an expected ordering when displayed.
|
// Keep servers in a sorted set so has an expected ordering when displayed.
|
||||||
private final SortedSet<Address> servers;
|
private final NavigableSet<Address> servers;
|
||||||
// Keep tables sorted too.
|
// Keep tables sorted too.
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -100,8 +101,10 @@ public class RSGroupInfo {
|
||||||
return servers.contains(hostPort);
|
return servers.contains(hostPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get list of servers. */
|
/**
|
||||||
public Set<Address> getServers() {
|
* Get list of servers.
|
||||||
|
*/
|
||||||
|
public NavigableSet<Address> getServers() {
|
||||||
return servers;
|
return servers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,7 +229,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String backup(BackupRequest request, BackupAdmin client) throws IOException {
|
private String backup(BackupRequest request, BackupAdmin client) throws IOException {
|
||||||
String backupId = client.backupTables(request);
|
String backupId = client.backupTables(request).getBackupId();
|
||||||
return backupId;
|
return backupId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue