diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index 25055fd5e8e..0dd0aeb8de6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -42,7 +42,7 @@ public interface BackupAdmin extends Closeable { * @return the backup Id */ - String backupTables(final BackupRequest userRequest) throws IOException; + BackupInfo backupTables(final BackupRequest userRequest) throws IOException; /** * Restore backup diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index f580fb0c47b..a802627b4eb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -519,7 +519,7 @@ public class BackupAdminImpl implements BackupAdmin { } @Override - public String backupTables(BackupRequest request) throws IOException { + public BackupInfo backupTables(BackupRequest request) throws IOException { BackupType type = request.getBackupType(); String targetRootDir = request.getTargetRootDir(); List tableList = request.getTableList(); @@ -602,7 +602,7 @@ public class BackupAdminImpl implements BackupAdmin { client.execute(); - return backupId; + return client.backupInfo; } private List excludeNonExistingTables(List tableList, diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index ce9c5bbe8fa..0c76ac56a98 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -342,8 +342,9 @@ public final class BackupCommands { tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) .withTargetRootDir(targetBackupDir).withTotalTasks(workers) .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); - String backupId = admin.backupTables(request); - System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); + BackupInfo backupInfo = admin.backupTables(request); + System.out + .println("Backup session " + backupInfo.getBackupId() + " finished. Status: SUCCESS"); } catch (IOException e) { System.out.println("Backup session finished. Status: FAILURE"); throw e; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index c92c0747e83..d35b3d903e1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -20,8 +20,11 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -33,7 +36,9 @@ import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -93,13 +98,36 @@ public class IncrementalBackupManager extends BackupManager { } newTimestamps = readRegionServerLastLogRollResult(); - logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); + logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode, + getParticipatingServerNames(backupInfo.getTables())); logList = excludeProcV2WALs(logList); backupInfo.setIncrBackupFileList(logList); return newTimestamps; } + private Set getParticipatingServerNames(Set tables) throws IOException { + Set
participatingServers = new HashSet<>(); + boolean flag = false; + for (TableName table : tables) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if (rsGroupInfo != null && !rsGroupInfo.getServers().isEmpty()) { + LOG.info("Participating servers for table {}, rsgroup Name: {} are: {}", table, + rsGroupInfo.getName(), rsGroupInfo.getServers()); + participatingServers.addAll(rsGroupInfo.getServers()); + } else { + LOG.warn( + "Rsgroup isn't available for table {}, all servers in the cluster will be participating ", + table); + flag = true; + } + } + + return flag + ? new HashSet<>() + : participatingServers.stream().map(a -> a.toString()).collect(Collectors.toSet()); + } + private List excludeProcV2WALs(List logList) { List list = new ArrayList<>(); for (int i = 0; i < logList.size(); i++) { @@ -126,8 +154,8 @@ public class IncrementalBackupManager extends BackupManager { * @throws IOException exception */ private List getLogFilesForNewBackup(Map olderTimestamps, - Map newestTimestamps, Configuration conf, String savedStartCode) - throws IOException { + Map newestTimestamps, Configuration conf, String savedStartCode, + Set servers) throws IOException { LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps + "\n newestTimestamps: " + newestTimestamps); @@ -160,7 +188,7 @@ public class IncrementalBackupManager extends BackupManager { for (FileStatus rs : rss) { p = rs.getPath(); host = BackupUtils.parseHostNameFromLogFile(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } FileStatus[] logs; @@ -215,7 +243,7 @@ public class IncrementalBackupManager extends BackupManager { continue; } host = BackupUtils.parseHostFromOldLog(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } currentLogTS = BackupUtils.getCreationTime(p); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index f3ddda499b0..e47634c8501 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -21,8 +21,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -83,6 +87,22 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { Map serverAddressToLastBackupMap = new HashMap<>(); Map tableNameBackupInfoMap = new HashMap<>(); + Set
servers = new HashSet<>(); + for (BackupInfo backupInfo : backups) { + for (TableName table : backupInfo.getTables()) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if ( + rsGroupInfo != null && rsGroupInfo.getServers() != null + && !rsGroupInfo.getServers().isEmpty() + ) { + servers.addAll(rsGroupInfo.getServers()); + } else { + servers.addAll(conn.getAdmin().getRegionServers().stream().map(s -> s.getAddress()) + .collect(Collectors.toList())); + } + } + } + for (BackupInfo backupInfo : backups) { for (TableName table : backupInfo.getTables()) { tableNameBackupInfoMap.putIfAbsent(table, backupInfo.getStartTs()); @@ -90,7 +110,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { tableNameBackupInfoMap.put(table, backupInfo.getStartTs()); for (Map.Entry entry : backupInfo.getTableSetTimestampMap().get(table) .entrySet()) { - serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()), entry.getValue()); + if (servers.contains(Address.fromString(entry.getKey()))) { + serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()), + entry.getValue()); + } } } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 7b5095a897e..613d1f9ee0f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -17,14 +17,18 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -57,6 +61,9 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureTestUtil; @@ -84,6 +91,15 @@ public class TestBackupBase { protected static Configuration conf1; protected static Configuration conf2; + protected static final int RSGROUP_RS_NUM = 5; + protected static final int NUM_REGIONSERVERS = 3; + protected static final String RSGROUP_NAME = "rsgroup1"; + protected static final String RSGROUP_NAMESPACE = "rsgroup_ns"; + protected static final TableName RSGROUP_TABLE_1 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table1"); + protected static final TableName RSGROUP_TABLE_2 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table2"); + protected static TableName table1 = TableName.valueOf("table1"); protected static TableDescriptor table1Desc; protected static TableName table2 = TableName.valueOf("table2"); @@ -105,6 +121,7 @@ public class TestBackupBase { protected static boolean autoRestoreOnFailure; protected static boolean useSecondCluster; + protected static boolean enableRSgroup; static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient { public IncrementalTableBackupClientForTest() { @@ -257,6 +274,22 @@ public class TestBackupBase { } } + private static RSGroupInfo addGroup(String groupName, int serverCount) throws IOException { + Admin admin = TEST_UTIL.getAdmin(); + RSGroupInfo defaultInfo = admin.getRSGroup(RSGroupInfo.DEFAULT_GROUP); + admin.addRSGroup(groupName); + Set
set = new HashSet<>(); + for (Address server : defaultInfo.getServers()) { + if (set.size() == serverCount) { + break; + } + set.add(server); + } + admin.moveServersToRSGroup(set, groupName); + RSGroupInfo result = admin.getRSGroup(groupName); + return result; + } + public static void setUpHelper() throws Exception { BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; @@ -279,7 +312,13 @@ public class TestBackupBase { // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); - TEST_UTIL.startMiniCluster(); + if (enableRSgroup) { + conf1.setBoolean(RSGroupUtil.RS_GROUP_ENABLED, true); + TEST_UTIL.startMiniCluster(RSGROUP_RS_NUM + NUM_REGIONSERVERS); + addGroup(RSGROUP_NAME, RSGROUP_RS_NUM); + } else { + TEST_UTIL.startMiniCluster(); + } if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); @@ -317,6 +356,7 @@ public class TestBackupBase { public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; setUpHelper(); @@ -342,6 +382,7 @@ public class TestBackupBase { } TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniMapReduceCluster(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; } @@ -366,16 +407,16 @@ public class TestBackupBase { return request; } - protected String backupTables(BackupType type, List tables, String path) + protected BackupInfo backupTables(BackupType type, List tables, String path) throws IOException { Connection conn = null; BackupAdmin badmin = null; - String backupId; + BackupInfo backupInfo; try { conn = ConnectionFactory.createConnection(conf1); badmin = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(type, tables, path); - backupId = badmin.backupTables(request); + backupInfo = badmin.backupTables(request); } finally { if (badmin != null) { badmin.close(); @@ -384,14 +425,14 @@ public class TestBackupBase { conn.close(); } } - return backupId; + return backupInfo; } - protected String fullTableBackup(List tables) throws IOException { + protected BackupInfo fullTableBackup(List tables) throws IOException { return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); } - protected String incrementalTableBackup(List tables) throws IOException { + protected BackupInfo incrementalTableBackup(List tables) throws IOException { return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); } @@ -439,6 +480,23 @@ public class TestBackupBase { table.close(); ha.close(); conn.close(); + + if (enableRSgroup) { + ha.createNamespace(NamespaceDescriptor.create(RSGROUP_NAMESPACE) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, RSGROUP_NAME).build()); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_1); + loadTable(table); + table.close(); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_2) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_2); + loadTable(table); + table.close(); + } } protected boolean checkSucceeded(String backupId) throws IOException { @@ -461,7 +519,7 @@ public class TestBackupBase { return status.getState() == BackupState.FAILED; } - private BackupInfo getBackupInfo(String backupId) throws IOException { + protected BackupInfo getBackupInfo(String backupId) throws IOException { try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { BackupInfo status = table.readBackupInfo(backupId); return status; @@ -498,6 +556,26 @@ public class TestBackupBase { return logFiles; } + protected Set
getRsgroupServers(String rsgroupName) throws IOException { + RSGroupInfo rsGroupInfo = TEST_UTIL.getAdmin().getRSGroup(rsgroupName); + if ( + rsGroupInfo != null && rsGroupInfo.getServers() != null && !rsGroupInfo.getServers().isEmpty() + ) { + return new HashSet<>(rsGroupInfo.getServers()); + } + return new HashSet<>(); + } + + protected void checkIfWALFilesBelongToRsgroup(List walFiles, String rsgroupName) + throws IOException { + for (String file : walFiles) { + Address walServerAddress = + Address.fromString(BackupUtils.parseHostNameFromLogFile(new Path(file))); + assertTrue("Backed WAL files should be from RSGroup " + rsgroupName, + getRsgroupServers(rsgroupName).contains(walServerAddress)); + } + } + protected void dumpBackupDir() throws IOException { // Dump Backup Dir FileSystem fs = FileSystem.get(conf1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index 0c4d44d489d..d852550dcf1 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -58,7 +58,7 @@ public class TestBackupDelete extends TestBackupBase { public void testBackupDelete() throws Exception { LOG.info("test backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; @@ -85,7 +85,7 @@ public class TestBackupDelete extends TestBackupBase { public void testBackupDeleteCommand() throws Exception { LOG.info("test backup delete on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -117,7 +117,7 @@ public class TestBackupDelete extends TestBackupBase { return System.currentTimeMillis() - 2 * 24 * 3600 * 1000; } }); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); EnvironmentEdgeManager.reset(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index 2798e1a16f0..9499957c568 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -56,7 +56,7 @@ public class TestBackupDeleteRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); int numRows = TEST_UTIL.countRows(table1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 12c8d5c4065..254783346cd 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -142,7 +142,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase { throws Exception { LOG.info("test repair backup delete on a single table with data and failures " + failures[0]); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 7ce039fd666..489f17e289d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -82,7 +82,7 @@ public class TestBackupDescribe extends TestBackupBase { LOG.info("test backup describe on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java new file mode 100644 index 00000000000..3a42b7a6b8f --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(MediumTests.class) +public class TestBackupLogCleanerWithRsgroup extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupLogCleanerWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleanerWithRsgroup.class); + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + @Test + public void testBackupLogCleanerRsgroup() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tableSetFullList = Lists.newArrayList(RSGROUP_TABLE_1); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + Map params = new HashMap<>(); + params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster()); + cleaner.init(params); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(Iterables.size(deletable) == walFiles.size()); + String backupIdFull = fullTableBackup(tableSetFullList).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + assertTrue(Iterables.size(deletable) == walFiles.size()); + + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + Table t1 = conn.getTable(RSGROUP_TABLE_1); + Put p1; + Random rnd = new Random(); + for (int i = 0; i < 5000; i++) { + p1 = new Put(Bytes.toBytes(1000000 + rnd.nextInt(9000000))); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + t1.close(); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(newWalFiles.size() > Iterables.size(deletable)); + + // #3 - incremental backup + List tableSetIncList = Lists.newArrayList(RSGROUP_TABLE_1); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index c34f6be43b5..194e03351f2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -62,7 +62,7 @@ public class TestBackupMerge extends TestBackupBase { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -83,7 +83,7 @@ public class TestBackupMerge extends TestBackupBase { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -95,7 +95,7 @@ public class TestBackupMerge extends TestBackupBase { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index 36cecd3faf5..5149880820d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -63,7 +63,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { Admin admin = conn.getAdmin(); BackupAdmin client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table table1 Table t1 = conn.getTable(table1); @@ -78,7 +78,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc1 = client.backupTables(request); + String backupIdInc1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc1)); // #4 - insert some data to table table2 Table t2 = conn.getTable(table2); @@ -91,7 +91,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { // #5 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc2 = client.backupTables(request); + String backupIdInc2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc2)); // #6 - insert some data to table table1 t1 = conn.getTable(table1); @@ -103,7 +103,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { // #7 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc3 = client.backupTables(request); + String backupIdInc3 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc3)); // #8 - insert some data to table table2 t2 = conn.getTable(table2); @@ -115,17 +115,17 @@ public class TestBackupMultipleDeletes extends TestBackupBase { // #9 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc4 = client.backupTables(request); + String backupIdInc4 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc4)); // #10 full backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull2 = client.backupTables(request); + String backupIdFull2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull2)); // #11 - incremental backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc5 = client.backupTables(request); + String backupIdInc5 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc5)); LOG.error("Delete backupIdInc2"); client.deleteBackups(new String[] { backupIdInc2 }); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index fa624250929..4d39ec175f2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -68,7 +68,7 @@ public class TestBackupShowHistory extends TestBackupBase { LOG.info("test backup history on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -92,7 +92,7 @@ public class TestBackupShowHistory extends TestBackupBase { assertTrue(output.indexOf(backupId) > 0); tableList = Lists.newArrayList(table2); - String backupId2 = fullTableBackup(tableList); + String backupId2 = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId2)); LOG.info("backup complete: " + table2); BackupInfo.Filter tableNameFilter = image -> { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java index 1a1e5dbf1cc..93f99c72f0a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -53,7 +53,7 @@ public class TestBackupStatusProgress extends TestBackupBase { LOG.info("test backup status/progress on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); @@ -70,7 +70,7 @@ public class TestBackupStatusProgress extends TestBackupBase { LOG.info("test backup status/progress on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); ByteArrayOutputStream baos = new ByteArrayOutputStream(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 385a6b3c519..8c9afcd8a00 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -55,7 +55,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -76,7 +76,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -97,7 +97,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table: command-line, check only"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -119,7 +119,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreMultiple() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -143,7 +143,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreMultipleCommand() throws Exception { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -172,7 +172,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreSingleOverwrite() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -191,7 +191,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreSingleOverwriteCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; @@ -216,7 +216,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -234,7 +234,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -259,7 +259,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreSingleDNE() throws Exception { LOG.info("test restore fails on a single table that does not exist"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -279,7 +279,7 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreSingleDNECommand() throws Exception { LOG.info("test restore fails on a single table that does not exist: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -302,7 +302,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test restore fails on multiple tables that do not exist"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = @@ -322,7 +322,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test restore fails on multiple tables that do not exist: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 90fbba2bf0a..a42dc255c90 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -98,7 +99,7 @@ public class TestIncrementalBackup extends TestBackupBase { Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table @@ -146,8 +147,11 @@ public class TestIncrementalBackup extends TestBackupBase { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); // add column family f2 to table1 // drop column family f3 @@ -166,8 +170,11 @@ public class TestIncrementalBackup extends TestBackupBase { // #4 - additional incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); // #5 - restore full backup for all tables TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index a5eec87fb06..0d7d5528558 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -65,7 +65,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -88,7 +88,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1ece1770489..0e4b3f32cbf 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -240,7 +240,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -261,7 +261,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -273,7 +273,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); // #4 Merge backup images with failures diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index a182144a8ab..581be7720c0 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -70,7 +70,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -97,7 +97,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 bulk load again LOG.debug("bulk loading into " + testName); @@ -110,7 +110,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { // #5 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple1 = client.backupTables(request); + String backupIdIncMultiple1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple1)); // Delete all data in table1 TEST_UTIL.deleteTableData(table1); @@ -125,7 +125,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - backupIdFull = client.backupTables(request); + backupIdFull = client.backupTables(request).getBackupId(); try (final BackupSystemTable table = new BackupSystemTable(conn)) { Pair>>>>, List> pair = table.readBulkloadRows(tables); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index c8d53656418..f2c122632a6 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -95,7 +95,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java new file mode 100644 index 00000000000..f59d8dff5f9 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(LargeTests.class) +@RunWith(Parameterized.class) +public class TestIncrementalBackupWithRsgroup extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncrementalBackupWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupWithRsgroup.class); + + public TestIncrementalBackupWithRsgroup(Boolean b) { + } + + @Parameterized.Parameters + public static Collection data() { + List params = new ArrayList<>(); + params.add(new Object[] { Boolean.TRUE }); + return params; + } + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + // implement all test cases in 1 test since incremental + // backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + int ADD_ROWS = 99; + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + final byte[] fam3Name = Bytes.toBytes("f3"); + final byte[] mobName = Bytes.toBytes("mob"); + + TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) + .setMobThreshold(5L).build()) + .build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + int NB_ROWS_FAM3 = 6; + insertIntoTable(conn, RSGROUP_TABLE_1, fam3Name, 3, NB_ROWS_FAM3).close(); + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_FAM3).close(); + Admin admin = conn.getAdmin(); + BackupAdminImpl client = new BackupAdminImpl(conn); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table + Table t1 = insertIntoTable(conn, RSGROUP_TABLE_1, famName, 1, ADD_ROWS); + LOG.debug("writing " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + LOG.debug("written " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + // additionally, insert rows to MOB cf + int NB_ROWS_MOB = 111; + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_MOB); + LOG.debug("written " + NB_ROWS_MOB + " rows to " + RSGROUP_TABLE_1 + " to Mob enabled CF"); + t1.close(); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); + Table t2 = conn.getTable(RSGROUP_TABLE_2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2)); + t2.close(); + LOG.debug("written " + 5 + " rows to " + RSGROUP_TABLE_2); + // split RSGROUP_TABLE_1 + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + List regions = cluster.getRegions(RSGROUP_TABLE_1); + byte[] name = regions.get(0).getRegionInfo().getRegionName(); + long startSplitTime = EnvironmentEdgeManager.currentTime(); + try { + admin.splitRegionAsync(name).get(); + } catch (Exception e) { + // although split fail, this may not affect following check in current API, + // exception will be thrown. + LOG.debug("region is not splittable, because " + e); + } + while (!admin.isTableAvailable(RSGROUP_TABLE_1)) { + Thread.sleep(100); + } + long endSplitTime = EnvironmentEdgeManager.currentTime(); + // split finished + LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), RSGROUP_NAME); + + // add column family f2 to RSGROUP_TABLE_1 + // drop column family f3 + final byte[] fam2Name = Bytes.toBytes("f2"); + newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) + .build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + int NB_ROWS_FAM2 = 7; + Table t3 = insertIntoTable(conn, RSGROUP_TABLE_1, fam2Name, 2, NB_ROWS_FAM2); + t3.close(); + + // Wait for 5 sec to make sure that old WALs were deleted + Thread.sleep(5000); + + // #4 - additional incremental backup for multiple tables + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), RSGROUP_NAME); + + // #5 - restore full backup for all tables + TableName[] tablesRestoreFull = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; + + LOG.debug("Restoring full " + backupIdFull); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + tablesRestoreFull, tablesMapFull, true)); + + // #6.1 - check tables for full restore + Admin hAdmin = TEST_UTIL.getAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + hAdmin.close(); + + // #6.2 - checking row count of tables for full restore + Table hTable = conn.getTable(table1_restore); + Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + + // #7 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + hTable = conn.getTable(table1_restore); + + LOG.debug("After incremental restore: " + hTable.getDescriptor()); + int countFamName = TEST_UTIL.countRows(hTable, famName); + LOG.debug("f1 has " + countFamName + " rows"); + Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS); + + int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name); + LOG.debug("f2 has " + countFam2Name + " rows"); + Assert.assertEquals(countFam2Name, NB_ROWS_FAM2); + + int countMobName = TEST_UTIL.countRows(hTable, mobName); + LOG.debug("mob has " + countMobName + " rows"); + Assert.assertEquals(countMobName, NB_ROWS_MOB); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + admin.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index a148ab232dc..3743c1916a4 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -116,7 +116,8 @@ public class TestRemoteBackup extends TestBackupBase { latch.countDown(); String backupId = - backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete " + backupId); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index b3a2872c709..0041dc11da7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -69,7 +69,8 @@ public class TestRemoteRestore extends TestBackupBase { public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; @@ -90,7 +91,8 @@ public class TestRemoteRestore extends TestBackupBase { public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception { LOG.info("test remote full backup on a single table with alternate restore output dir"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 93345fd1705..d6110eefb84 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -50,7 +50,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { public void testRepairBackupDelete() throws Exception { LOG.info("test repair backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index 7b49558031e..9411e433d18 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -47,7 +47,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { @Test public void testFullRestoreSingleEmpty() throws Exception { LOG.info("test full restore on a single table empty table"); - String backupId = fullTableBackup(toList(table1.getNameAsString())); + String backupId = fullTableBackup(toList(table1.getNameAsString())).getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; @@ -67,7 +67,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { LOG.info("create full backup image on multiple tables"); List tables = toList(table2.getNameAsString(), table3.getNameAsString()); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 2b0f9c0cba5..545b11c2c4c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -83,7 +83,7 @@ public class TestBackupLogCleaner extends TestBackupBase { // We can delete all files because we do not have yet recorded backup sessions assertTrue(size == walFiles.size()); - String backupIdFull = fullTableBackup(tableSetFullList); + String backupIdFull = fullTableBackup(tableSetFullList).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // Check one more time deletable = cleaner.getDeletableFiles(walFiles); @@ -123,7 +123,7 @@ public class TestBackupLogCleaner extends TestBackupBase { List tableSetIncList = Lists.newArrayList(table1, table2, table3); String backupIdIncMultiple = - backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(newWalFiles); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 94ea6f5845c..4bac3f8fa9a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -21,6 +21,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.NavigableSet; import java.util.Objects; import java.util.Set; import java.util.SortedSet; @@ -40,7 +41,7 @@ public class RSGroupInfo { private final String name; // Keep servers in a sorted set so has an expected ordering when displayed. - private final SortedSet
servers; + private final NavigableSet
servers; // Keep tables sorted too. /** @@ -100,8 +101,10 @@ public class RSGroupInfo { return servers.contains(hostPort); } - /** Get list of servers. */ - public Set
getServers() { + /** + * Get list of servers. + */ + public NavigableSet
getServers() { return servers; } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index 1a0446381ae..71381004a73 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -229,7 +229,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { } private String backup(BackupRequest request, BackupAdmin client) throws IOException { - String backupId = client.backupTables(request); + String backupId = client.backupTables(request).getBackupId(); return backupId; }