HBASE-19441: Implement retry logic around starting exclusive backup operation
Signed-off-by: tedyu <yuzhihong@gmail.com>
This commit is contained in:
parent
8ab7b20f48
commit
91075276e7
|
@ -1,4 +1,5 @@
|
||||||
/**
|
/**
|
||||||
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +16,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup.impl;
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -47,18 +47,22 @@ import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
|
import org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handles backup requests, creates backup info records in backup system table to
|
* Handles backup requests, creates backup info records in backup system table to keep track of
|
||||||
* keep track of backup sessions, dispatches backup request.
|
* backup sessions, dispatches backup request.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BackupManager implements Closeable {
|
public class BackupManager implements Closeable {
|
||||||
|
// in seconds
|
||||||
|
public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
|
||||||
|
"hbase.backup.exclusive.op.timeout.seconds";
|
||||||
|
// In seconds
|
||||||
|
private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
|
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
|
||||||
|
|
||||||
protected Configuration conf = null;
|
protected Configuration conf = null;
|
||||||
|
@ -112,8 +116,8 @@ public class BackupManager implements Closeable {
|
||||||
if (classes == null) {
|
if (classes == null) {
|
||||||
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass);
|
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass);
|
||||||
} else if (!classes.contains(masterProcedureClass)) {
|
} else if (!classes.contains(masterProcedureClass)) {
|
||||||
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + ","
|
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY,
|
||||||
+ masterProcedureClass);
|
classes + "," + masterProcedureClass);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
@ -138,16 +142,16 @@ public class BackupManager implements Closeable {
|
||||||
if (classes == null) {
|
if (classes == null) {
|
||||||
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, regionProcedureClass);
|
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, regionProcedureClass);
|
||||||
} else if (!classes.contains(regionProcedureClass)) {
|
} else if (!classes.contains(regionProcedureClass)) {
|
||||||
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, classes + ","
|
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY,
|
||||||
+ regionProcedureClass);
|
classes + "," + regionProcedureClass);
|
||||||
}
|
}
|
||||||
String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
|
String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
|
||||||
String regionObserverClass = BackupObserver.class.getName();
|
String regionObserverClass = BackupObserver.class.getName();
|
||||||
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") +
|
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
|
||||||
regionObserverClass);
|
(coproc == null ? "" : coproc + ",") + regionObserverClass);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Added region procedure manager: " + regionProcedureClass +
|
LOG.debug("Added region procedure manager: " + regionProcedureClass
|
||||||
". Added region observer: " + regionObserverClass);
|
+ ". Added region observer: " + regionObserverClass);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,8 +227,7 @@ public class BackupManager implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
// there are one or more tables in the table list
|
// there are one or more tables in the table list
|
||||||
backupInfo =
|
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]),
|
||||||
new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]),
|
|
||||||
targetRootDir);
|
targetRootDir);
|
||||||
backupInfo.setBandwidth(bandwidth);
|
backupInfo.setBandwidth(bandwidth);
|
||||||
backupInfo.setWorkers(workers);
|
backupInfo.setWorkers(workers);
|
||||||
|
@ -286,8 +289,7 @@ public class BackupManager implements Closeable {
|
||||||
|
|
||||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||||
|
|
||||||
BackupImage image =
|
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||||
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
|
||||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||||
|
|
||||||
|
@ -319,9 +321,9 @@ public class BackupManager implements Closeable {
|
||||||
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
|
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
|
||||||
ancestors.add(lastIncrImage);
|
ancestors.add(lastIncrImage);
|
||||||
|
|
||||||
LOG.debug("Last dependent incremental backup image: " + "{BackupID="
|
LOG.debug(
|
||||||
+ lastIncrImage.getBackupId() + "," + "BackupDir=" + lastIncrImage.getRootDir()
|
"Last dependent incremental backup image: " + "{BackupID=" + lastIncrImage.getBackupId()
|
||||||
+ "}");
|
+ "," + "BackupDir=" + lastIncrImage.getRootDir() + "}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -369,7 +371,36 @@ public class BackupManager implements Closeable {
|
||||||
* @throws IOException if active session already exists
|
* @throws IOException if active session already exists
|
||||||
*/
|
*/
|
||||||
public void startBackupSession() throws IOException {
|
public void startBackupSession() throws IOException {
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
long timeout = conf.getInt(BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY,
|
||||||
|
DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT) * 1000L;
|
||||||
|
long lastWarningOutputTime = 0;
|
||||||
|
while (System.currentTimeMillis() - startTime < timeout) {
|
||||||
|
try {
|
||||||
systemTable.startBackupExclusiveOperation();
|
systemTable.startBackupExclusiveOperation();
|
||||||
|
return;
|
||||||
|
} catch (IOException e) {
|
||||||
|
if (e instanceof ExclusiveOperationException) {
|
||||||
|
// sleep, then repeat
|
||||||
|
try {
|
||||||
|
Thread.sleep(1000);
|
||||||
|
} catch (InterruptedException e1) {
|
||||||
|
// Restore the interrupted status
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
|
if (lastWarningOutputTime == 0
|
||||||
|
|| (System.currentTimeMillis() - lastWarningOutputTime) > 60000) {
|
||||||
|
lastWarningOutputTime = System.currentTimeMillis();
|
||||||
|
LOG.warn("Waiting to acquire backup exclusive lock for "
|
||||||
|
+ (lastWarningOutputTime - startTime) / 1000 + "s");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new IOException(
|
||||||
|
"Failed to acquire backup system table exclusive lock after " + timeout / 1000 + "s");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -483,7 +514,6 @@ public class BackupManager implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get WAL files iterator.
|
* Get WAL files iterator.
|
||||||
*
|
|
||||||
* @return WAL files iterator from backup system table
|
* @return WAL files iterator from backup system table
|
||||||
* @throws IOException if getting the WAL files iterator fails
|
* @throws IOException if getting the WAL files iterator fails
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
/**
|
/**
|
||||||
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -64,6 +65,8 @@ import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
@ -71,26 +74,25 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class provides API to access backup system table<br>
|
* This class provides API to access backup system table<br>
|
||||||
*
|
|
||||||
* Backup system table schema:<br>
|
* Backup system table schema:<br>
|
||||||
* <p><ul>
|
* <p>
|
||||||
|
* <ul>
|
||||||
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
|
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
|
||||||
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
|
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
|
||||||
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
|
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
|
||||||
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name;
|
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL
|
||||||
* value = map[RS-> last WAL timestamp]</li>
|
* timestamp]</li>
|
||||||
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
|
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
|
||||||
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name;
|
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file
|
||||||
* value = backupId and full WAL file name</li>
|
* name</li>
|
||||||
* </ul></p>
|
* </ul>
|
||||||
|
* </p>
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class BackupSystemTable implements Closeable {
|
public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BackupSystemTable.class);
|
private static final Logger LOG = LoggerFactory.getLogger(BackupSystemTable.class);
|
||||||
|
|
||||||
static class WALItem {
|
static class WALItem {
|
||||||
|
@ -128,10 +130,9 @@ public final class BackupSystemTable implements Closeable {
|
||||||
private TableName tableName;
|
private TableName tableName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Backup System table name for bulk loaded files.
|
* Backup System table name for bulk loaded files. We keep all bulk loaded file references in a
|
||||||
* We keep all bulk loaded file references in a separate table
|
* separate table because we have to isolate general backup operations: create, merge etc from
|
||||||
* because we have to isolate general backup operations: create, merge etc
|
* activity of RegionObserver, which controls process of a bulk loading
|
||||||
* from activity of RegionObserver, which controls process of a bulk loading
|
|
||||||
* {@link org.apache.hadoop.hbase.backup.BackupObserver}
|
* {@link org.apache.hadoop.hbase.backup.BackupObserver}
|
||||||
*/
|
*/
|
||||||
private TableName bulkLoadTableName;
|
private TableName bulkLoadTableName;
|
||||||
|
@ -198,13 +199,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
verifyNamespaceExists(admin);
|
verifyNamespaceExists(admin);
|
||||||
Configuration conf = connection.getConfiguration();
|
Configuration conf = connection.getConfiguration();
|
||||||
if (!admin.tableExists(tableName)) {
|
if (!admin.tableExists(tableName)) {
|
||||||
TableDescriptor backupHTD =
|
TableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(conf);
|
||||||
BackupSystemTable.getSystemTableDescriptor(conf);
|
|
||||||
admin.createTable(backupHTD);
|
admin.createTable(backupHTD);
|
||||||
}
|
}
|
||||||
if (!admin.tableExists(bulkLoadTableName)) {
|
if (!admin.tableExists(bulkLoadTableName)) {
|
||||||
TableDescriptor blHTD =
|
TableDescriptor blHTD = BackupSystemTable.getSystemTableForBulkLoadedDataDescriptor(conf);
|
||||||
BackupSystemTable.getSystemTableForBulkLoadedDataDescriptor(conf);
|
|
||||||
admin.createTable(blHTD);
|
admin.createTable(blHTD);
|
||||||
}
|
}
|
||||||
waitForSystemTable(admin, tableName);
|
waitForSystemTable(admin, tableName);
|
||||||
|
@ -237,11 +236,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
}
|
}
|
||||||
if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) {
|
if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) {
|
||||||
throw new IOException("Failed to create backup system table "+
|
throw new IOException(
|
||||||
tableName +" after " + TIMEOUT + "ms");
|
"Failed to create backup system table " + tableName + " after " + TIMEOUT + "ms");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.debug("Backup table "+tableName+" exists and available");
|
LOG.debug("Backup table " + tableName + " exists and available");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -344,7 +343,6 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes backup status from backup system table table
|
* Deletes backup status from backup system table table
|
||||||
* @param backupId backup id
|
* @param backupId backup id
|
||||||
|
@ -389,8 +387,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
|
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
|
||||||
final List<Pair<Path, Path>> pairs) throws IOException {
|
final List<Pair<Path, Path>> pairs) throws IOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + pairs.size()
|
LOG.debug(
|
||||||
+ " entries");
|
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
|
||||||
}
|
}
|
||||||
try (Table table = connection.getTable(bulkLoadTableName)) {
|
try (Table table = connection.getTable(bulkLoadTableName)) {
|
||||||
List<Put> puts =
|
List<Put> puts =
|
||||||
|
@ -426,6 +424,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
*/
|
*/
|
||||||
public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>>
|
public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>>
|
||||||
readBulkloadRows(List<TableName> tableList) throws IOException {
|
readBulkloadRows(List<TableName> tableList) throws IOException {
|
||||||
|
|
||||||
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = new HashMap<>();
|
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = new HashMap<>();
|
||||||
List<byte[]> rows = new ArrayList<>();
|
List<byte[]> rows = new ArrayList<>();
|
||||||
for (TableName tTable : tableList) {
|
for (TableName tTable : tableList) {
|
||||||
|
@ -504,9 +503,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
byte[] fam = entry.getKey();
|
byte[] fam = entry.getKey();
|
||||||
List<Path> paths = entry.getValue();
|
List<Path> paths = entry.getValue();
|
||||||
for (Path p : paths) {
|
for (Path p : paths) {
|
||||||
Put put =
|
Put put = BackupSystemTable.createPutForBulkLoadedFile(tn, fam, p.toString(), backupId,
|
||||||
BackupSystemTable.createPutForBulkLoadedFile(tn, fam, p.toString(), backupId, ts,
|
ts, cnt++);
|
||||||
cnt++);
|
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -580,8 +578,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exclusive operations are:
|
* Exclusive operations are: create, delete, merge
|
||||||
* create, delete, merge
|
|
||||||
* @throws IOException if a table operation fails or an active backup exclusive operation is
|
* @throws IOException if a table operation fails or an active backup exclusive operation is
|
||||||
* already underway
|
* already underway
|
||||||
*/
|
*/
|
||||||
|
@ -596,7 +593,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
// Row exists, try to put if value == ACTIVE_SESSION_NO
|
// Row exists, try to put if value == ACTIVE_SESSION_NO
|
||||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||||
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
|
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
|
||||||
throw new IOException("There is an active backup exclusive operation");
|
throw new ExclusiveOperationException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -696,8 +693,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get first n backup history records
|
* Get first n backup history records
|
||||||
* @param n number of records, if n== -1 - max number
|
* @param n number of records, if n== -1 - max number is ignored
|
||||||
* is ignored
|
|
||||||
* @return list of records
|
* @return list of records
|
||||||
* @throws IOException if getting the backup history fails
|
* @throws IOException if getting the backup history fails
|
||||||
*/
|
*/
|
||||||
|
@ -711,8 +707,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get backup history records filtered by list of filters.
|
* Get backup history records filtered by list of filters.
|
||||||
* @param n max number of records, if n == -1 , then max number
|
* @param n max number of records, if n == -1 , then max number is ignored
|
||||||
* is ignored
|
|
||||||
* @param filters list of filters
|
* @param filters list of filters
|
||||||
* @return backup records
|
* @return backup records
|
||||||
* @throws IOException if getting the backup history fails
|
* @throws IOException if getting the backup history fails
|
||||||
|
@ -917,8 +912,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Map<String, Long> map) {
|
Map<String, Long> map) {
|
||||||
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
||||||
BackupProtos.TableServerTimestamp.newBuilder();
|
BackupProtos.TableServerTimestamp.newBuilder();
|
||||||
tstBuilder.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil
|
tstBuilder
|
||||||
.toProtoTableName(table));
|
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
|
||||||
|
|
||||||
for (Entry<String, Long> entry : map.entrySet()) {
|
for (Entry<String, Long> entry : map.entrySet()) {
|
||||||
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
|
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
|
||||||
|
@ -934,8 +929,9 @@ public final class BackupSystemTable implements Closeable {
|
||||||
return tstBuilder.build();
|
return tstBuilder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
private HashMap<String, Long> fromTableServerTimestampProto(
|
private HashMap<String, Long>
|
||||||
BackupProtos.TableServerTimestamp proto) {
|
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp proto) {
|
||||||
|
|
||||||
HashMap<String, Long> map = new HashMap<>();
|
HashMap<String, Long> map = new HashMap<>();
|
||||||
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
|
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
|
||||||
for (BackupProtos.ServerTimestamp st : list) {
|
for (BackupProtos.ServerTimestamp st : list) {
|
||||||
|
@ -1106,8 +1102,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Check if WAL file is eligible for deletion using multi-get
|
* Check if WAL file is eligible for deletion using multi-get
|
||||||
* @param files names of a file to check
|
* @param files names of a file to check
|
||||||
* @return map of results
|
* @return map of results (key: FileStatus object. value: true if the file is deletable, false
|
||||||
* (key: FileStatus object. value: true if the file is deletable, false otherwise)
|
* otherwise)
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public Map<FileStatus, Boolean> areWALFilesDeletable(Iterable<FileStatus> files)
|
public Map<FileStatus, Boolean> areWALFilesDeletable(Iterable<FileStatus> files)
|
||||||
|
@ -1223,8 +1219,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
res.advance();
|
res.advance();
|
||||||
String[] tables = cellValueToBackupSet(res.current());
|
String[] tables = cellValueToBackupSet(res.current());
|
||||||
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item)).
|
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
|
||||||
collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
} finally {
|
} finally {
|
||||||
if (table != null) {
|
if (table != null) {
|
||||||
table.close();
|
table.close();
|
||||||
|
@ -1266,8 +1262,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
*/
|
*/
|
||||||
public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
|
public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ")
|
LOG.trace(
|
||||||
+ "]");
|
" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ") + "]");
|
||||||
}
|
}
|
||||||
String[] disjoint;
|
String[] disjoint;
|
||||||
String[] tables;
|
String[] tables;
|
||||||
|
@ -1342,16 +1338,14 @@ public final class BackupSystemTable implements Closeable {
|
||||||
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
|
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
|
||||||
builder.setColumnFamily(colSessionsDesc);
|
builder.setColumnFamily(colSessionsDesc);
|
||||||
|
|
||||||
colBuilder =
|
colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
|
||||||
ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
|
|
||||||
colBuilder.setTimeToLive(ttl);
|
colBuilder.setTimeToLive(ttl);
|
||||||
builder.setColumnFamily(colBuilder.build());
|
builder.setColumnFamily(colBuilder.build());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TableName getTableName(Configuration conf) {
|
public static TableName getTableName(Configuration conf) {
|
||||||
String name =
|
String name = conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
|
||||||
conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
|
|
||||||
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT);
|
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT);
|
||||||
return TableName.valueOf(name);
|
return TableName.valueOf(name);
|
||||||
}
|
}
|
||||||
|
@ -1381,8 +1375,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
colBuilder.setTimeToLive(ttl);
|
colBuilder.setTimeToLive(ttl);
|
||||||
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
|
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
|
||||||
builder.setColumnFamily(colSessionsDesc);
|
builder.setColumnFamily(colSessionsDesc);
|
||||||
colBuilder =
|
colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
|
||||||
ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
|
|
||||||
colBuilder.setTimeToLive(ttl);
|
colBuilder.setTimeToLive(ttl);
|
||||||
builder.setColumnFamily(colBuilder.build());
|
builder.setColumnFamily(colBuilder.build());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
@ -1393,6 +1386,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk";
|
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk";
|
||||||
return TableName.valueOf(name);
|
return TableName.valueOf(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates Put operation for a given backup info object
|
* Creates Put operation for a given backup info object
|
||||||
* @param context backup info
|
* @param context backup info
|
||||||
|
@ -1622,16 +1616,15 @@ public final class BackupSystemTable implements Closeable {
|
||||||
String file = path.toString();
|
String file = path.toString();
|
||||||
int lastSlash = file.lastIndexOf("/");
|
int lastSlash = file.lastIndexOf("/");
|
||||||
String filename = file.substring(lastSlash + 1);
|
String filename = file.substring(lastSlash + 1);
|
||||||
Put put =
|
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
|
||||||
new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
|
|
||||||
Bytes.toString(region), BLK_LD_DELIM, filename));
|
Bytes.toString(region), BLK_LD_DELIM, filename));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
LOG.debug("writing done bulk path " + file + " for " + table + " "
|
LOG.debug(
|
||||||
+ Bytes.toString(region));
|
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return puts;
|
return puts;
|
||||||
|
@ -1658,8 +1651,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
// Snapshot does not exists, i.e completeBackup failed after
|
// Snapshot does not exists, i.e completeBackup failed after
|
||||||
// deleting backup system table snapshot
|
// deleting backup system table snapshot
|
||||||
// In this case we log WARN and proceed
|
// In this case we log WARN and proceed
|
||||||
LOG.warn("Could not restore backup system table. Snapshot " + snapshotName
|
LOG.warn(
|
||||||
+ " does not exists.");
|
"Could not restore backup system table. Snapshot " + snapshotName + " does not exists.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1695,17 +1688,16 @@ public final class BackupSystemTable implements Closeable {
|
||||||
/*
|
/*
|
||||||
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
||||||
*/
|
*/
|
||||||
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region,
|
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
|
||||||
final byte[] family, final List<Pair<Path, Path>> pairs) {
|
final List<Pair<Path, Path>> pairs) {
|
||||||
List<Put> puts = new ArrayList<>(pairs.size());
|
List<Put> puts = new ArrayList<>(pairs.size());
|
||||||
for (Pair<Path, Path> pair : pairs) {
|
for (Pair<Path, Path> pair : pairs) {
|
||||||
Path path = pair.getSecond();
|
Path path = pair.getSecond();
|
||||||
String file = path.toString();
|
String file = path.toString();
|
||||||
int lastSlash = file.lastIndexOf("/");
|
int lastSlash = file.lastIndexOf("/");
|
||||||
String filename = file.substring(lastSlash + 1);
|
String filename = file.substring(lastSlash + 1);
|
||||||
Put put =
|
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
|
||||||
new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM, Bytes.toString(region),
|
Bytes.toString(region), BLK_LD_DELIM, filename));
|
||||||
BLK_LD_DELIM, filename));
|
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
|
||||||
|
@ -1899,9 +1891,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
*/
|
*/
|
||||||
static Scan createScanForBulkLoadedFiles(String backupId) {
|
static Scan createScanForBulkLoadedFiles(String backupId) {
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
byte[] startRow =
|
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES
|
||||||
backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId
|
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
|
||||||
+ BLK_LD_DELIM);
|
|
||||||
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
|
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
|
||||||
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
|
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
|
||||||
scan.setStartRow(startRow);
|
scan.setStartRow(startRow);
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
public class ExclusiveOperationException extends IOException {
|
||||||
|
|
||||||
|
public ExclusiveOperationException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,137 @@
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.concurrent.atomic.AtomicLongArray;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
|
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
||||||
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles;
|
||||||
|
|
||||||
|
@Category(MediumTests.class)
|
||||||
|
public class TestBackupManager {
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestBackupManager.class);
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestBackupManager.class);
|
||||||
|
|
||||||
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
protected static Configuration conf = UTIL.getConfiguration();
|
||||||
|
protected static MiniHBaseCluster cluster;
|
||||||
|
protected static Connection conn;
|
||||||
|
protected BackupManager backupManager;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
|
||||||
|
BackupManager.decorateMasterConfiguration(conf);
|
||||||
|
BackupManager.decorateRegionServerConfiguration(conf);
|
||||||
|
cluster = UTIL.startMiniCluster();
|
||||||
|
conn = UTIL.getConnection();
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDown() throws IOException {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() throws IOException {
|
||||||
|
backupManager = new BackupManager(conn, conn.getConfiguration());
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void after() {
|
||||||
|
backupManager.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
AtomicLongArray startTimes = new AtomicLongArray(2);
|
||||||
|
AtomicLongArray stopTimes = new AtomicLongArray(2);
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStartBackupExclusiveOperation() {
|
||||||
|
|
||||||
|
long sleepTime = 2000;
|
||||||
|
Runnable r = new Runnable() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
backupManager.startBackupSession();
|
||||||
|
boolean result = startTimes.compareAndSet(0, 0, System.currentTimeMillis());
|
||||||
|
if (!result) {
|
||||||
|
result = startTimes.compareAndSet(1, 0, System.currentTimeMillis());
|
||||||
|
if (!result) {
|
||||||
|
throw new IOException("PANIC! Unreachable code");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Thread.sleep(sleepTime);
|
||||||
|
result = stopTimes.compareAndSet(0, 0, System.currentTimeMillis());
|
||||||
|
if (!result) {
|
||||||
|
result = stopTimes.compareAndSet(1, 0, System.currentTimeMillis());
|
||||||
|
if (!result) {
|
||||||
|
throw new IOException("PANIC! Unreachable code");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
backupManager.finishBackupSession();
|
||||||
|
} catch (IOException | InterruptedException e) {
|
||||||
|
fail("Unexpected exception: " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Thread[] workers = new Thread[2];
|
||||||
|
for (int i = 0; i < workers.length; i++) {
|
||||||
|
workers[i] = new Thread(r);
|
||||||
|
workers[i].start();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < workers.length; i++) {
|
||||||
|
Uninterruptibles.joinUninterruptibly(workers[i]);
|
||||||
|
}
|
||||||
|
LOG.info("Diff start time=" + (startTimes.get(1) - startTimes.get(0)) + "ms");
|
||||||
|
LOG.info("Diff finish time=" + (stopTimes.get(1) - stopTimes.get(0)) + "ms");
|
||||||
|
assertTrue(startTimes.get(1) - startTimes.get(0) >= sleepTime);
|
||||||
|
assertTrue(stopTimes.get(1) - stopTimes.get(0) >= sleepTime);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in New Issue