HBASE-20586: add support for clusters on different realms (with cross-realm authentication)

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
wellington 2018-05-15 15:32:54 +01:00 committed by Andrew Purtell
parent 9a0daa8cbd
commit c2f48e01db
No known key found for this signature in database
GPG Key ID: 8597754DD5365CCD
1 changed files with 19 additions and 3 deletions

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -79,12 +80,28 @@ public class SyncTable extends Configured implements Tool {
super(conf);
}
private void initCredentialsForHBase(String zookeeper, Job job) throws IOException {
Configuration peerConf = HBaseConfiguration.createClusterConf(job
.getConfiguration(), zookeeper);
if(peerConf.get("hbase.security.authentication").equals("kerberos")){
TableMapReduceUtil.initCredentialsForCluster(job, peerConf);
}
}
public Job createSubmittableJob(String[] args) throws IOException {
FileSystem fs = sourceHashDir.getFileSystem(getConf());
if (!fs.exists(sourceHashDir)) {
throw new IOException("Source hash dir not found: " + sourceHashDir);
}
Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
"syncTable_" + sourceTableName + "-" + targetTableName));
Configuration jobConf = job.getConfiguration();
if (jobConf.get("hadoop.security.authentication").equals("kerberos")) {
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new
Path[] { sourceHashDir }, getConf());
}
HashTable.TableHash tableHash = HashTable.TableHash.read(getConf(), sourceHashDir);
LOG.info("Read source hash manifest: " + tableHash);
LOG.info("Read " + tableHash.partitions.size() + " partition keys");
@ -114,18 +131,17 @@ public class SyncTable extends Configured implements Tool {
+ " found in the partitions file is " + tableHash.partitions.size());
}
Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
"syncTable_" + sourceTableName + "-" + targetTableName));
Configuration jobConf = job.getConfiguration();
job.setJarByClass(HashTable.class);
jobConf.set(SOURCE_HASH_DIR_CONF_KEY, sourceHashDir.toString());
jobConf.set(SOURCE_TABLE_CONF_KEY, sourceTableName);
jobConf.set(TARGET_TABLE_CONF_KEY, targetTableName);
if (sourceZkCluster != null) {
jobConf.set(SOURCE_ZK_CLUSTER_CONF_KEY, sourceZkCluster);
initCredentialsForHBase(sourceZkCluster, job);
}
if (targetZkCluster != null) {
jobConf.set(TARGET_ZK_CLUSTER_CONF_KEY, targetZkCluster);
initCredentialsForHBase(targetZkCluster, job);
}
jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);