HBASE-18870 Hbase Backup should set the details to MR job name (Vishal Khandelwal)
This commit is contained in:
parent
a79b66b32b
commit
6712f8f632
|
@ -93,6 +93,7 @@ public interface BackupRestoreConstants {
|
|||
public static final String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
|
||||
public static final String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
|
||||
|
||||
public final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
|
||||
|
||||
public static final String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n"
|
||||
+ "hbase.master.logcleaner.plugins="
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEM
|
|||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS;
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_MAX_ATTEMPTS;
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
@ -92,8 +93,15 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
args[2] = "-copy-to";
|
||||
args[3] = backupInfo.getTableBackupDir(table);
|
||||
|
||||
String jobname = "Full-Backup_" + backupInfo.getBackupId() + "_" + table.getNameAsString();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Setting snapshot copy job name to : " + jobname);
|
||||
}
|
||||
conf.set(JOB_NAME_CONF_KEY, jobname);
|
||||
|
||||
LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
|
||||
res = copyService.copy(backupInfo, backupManager, conf, BackupType.FULL, args);
|
||||
|
||||
// if one snapshot export failed, do not continue for remained snapshots
|
||||
if (res != 0) {
|
||||
LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
|
||||
|
@ -101,6 +109,8 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
|
||||
+ " with reason code " + res);
|
||||
}
|
||||
|
||||
conf.unset(JOB_NAME_CONF_KEY);
|
||||
LOG.info("Snapshot copy " + args[1] + " finished.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
@ -296,6 +298,13 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
String[] strArr = new String[files.length + 1];
|
||||
System.arraycopy(files, 0, strArr, 0, files.length);
|
||||
strArr[strArr.length - 1] = backupDest;
|
||||
|
||||
String jobname = "Incremental_Backup-HFileCopy-" + backupInfo.getBackupId();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Setting incremental copy HFiles job name to : " + jobname);
|
||||
}
|
||||
conf.set(JOB_NAME_CONF_KEY, jobname);
|
||||
|
||||
BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf);
|
||||
int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
|
||||
if (res != 0) {
|
||||
|
@ -353,10 +362,12 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
// a Map task for each file. We use ';' as separator
|
||||
// because WAL file names contains ','
|
||||
String dirs = StringUtils.join(dirPaths, ';');
|
||||
String jobname = "Incremental_Backup-" + backupId + "-" + tableName.getNameAsString();
|
||||
|
||||
Path bulkOutputPath = getBulkOutputDirForTable(tableName);
|
||||
conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
|
||||
conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";");
|
||||
conf.set(JOB_NAME_CONF_KEY, jobname);
|
||||
String[] playerArgs = { dirs, tableName.getNameAsString() };
|
||||
|
||||
try {
|
||||
|
@ -366,6 +377,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
throw new IOException("WAL Player failed");
|
||||
}
|
||||
conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
|
||||
conf.unset(JOB_NAME_CONF_KEY);
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} catch (Exception ee) {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
|
@ -150,8 +152,10 @@ public class RestoreTablesClient {
|
|||
if (manifest.getType() == BackupType.FULL) {
|
||||
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image "
|
||||
+ tableBackupPath.toString());
|
||||
conf.set(JOB_NAME_CONF_KEY, "Full_Restore-" + backupId + "-" + tTable);
|
||||
restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists,
|
||||
lastIncrBackupId);
|
||||
conf.unset(JOB_NAME_CONF_KEY);
|
||||
} else { // incremental Backup
|
||||
throw new IOException("Unexpected backup type " + image.getType());
|
||||
}
|
||||
|
@ -175,6 +179,7 @@ public class RestoreTablesClient {
|
|||
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from log dirs: " + dirs);
|
||||
Path[] paths = new Path[dirList.size()];
|
||||
dirList.toArray(paths);
|
||||
conf.set(JOB_NAME_CONF_KEY, "Incremental_Restore-" + backupId + "-" + tTable);
|
||||
restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[] { sTable },
|
||||
new TableName[] { tTable }, lastIncrBackupId);
|
||||
LOG.info(sTable + " has been successfully restored to " + tTable);
|
||||
|
|
|
@ -108,6 +108,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
|
|||
private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size";
|
||||
private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group";
|
||||
private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb";
|
||||
private static final String CONF_MR_JOB_NAME = "mapreduce.job.name";
|
||||
protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp";
|
||||
|
||||
static class Testing {
|
||||
|
@ -807,8 +808,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
|
|||
conf.set(CONF_SNAPSHOT_NAME, snapshotName);
|
||||
conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString());
|
||||
|
||||
String jobname = conf.get(CONF_MR_JOB_NAME, "ExportSnapshot-" + snapshotName);
|
||||
Job job = new Job(conf);
|
||||
job.setJobName("ExportSnapshot-" + snapshotName);
|
||||
job.setJobName(jobname);
|
||||
job.setJarByClass(ExportSnapshot.class);
|
||||
TableMapReduceUtil.addDependencyJars(job);
|
||||
job.setMapperClass(ExportMapper.class);
|
||||
|
|
Loading…
Reference in New Issue