HBASE-8684: Table Coprocessor can't access external HTable by default

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1490043 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
jyates 2013-06-05 22:04:54 +00:00
parent 45da73e18e
commit 7188830e6b
2 changed files with 135 additions and 1 deletions

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -182,7 +183,9 @@ public class RegionCoprocessorHost
} }
if (cfgSpec != null) { if (cfgSpec != null) {
cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1);
Configuration newConf = new Configuration(conf); // do an explicit deep copy of the passed configuration
Configuration newConf = new Configuration(false);
HBaseConfiguration.merge(newConf, conf);
Matcher m = HConstants.CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); Matcher m = HConstants.CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec);
while (m.find()) { while (m.find()) {
newConf.set(m.group(1), m.group(2)); newConf.set(m.group(1), m.group(2));

View File

@ -0,0 +1,131 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test that a coprocessor can open a connection and write to another table, inside a hook.
*/
@Category(MediumTests.class)
public class TestOpenTableInCoprocessor {
private static final byte[] otherTable = Bytes.toBytes("otherTable");
private static final byte[] family = new byte[] { 'f' };
private static boolean completed = false;
/**
* Custom coprocessor that just copies the write to another table.
*/
public static class SendToOtherTableCoprocessor extends BaseRegionObserver {
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
final Durability durability) throws IOException {
HTableInterface table = e.getEnvironment().getTable(otherTable);
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
table.put(put);
table.flushCommits();
completed = true;
table.close();
}
}
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@AfterClass
public static void cleanup() throws Exception {
UTIL.getHBaseAdmin().close();
}
@Test
public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable {
HTableDescriptor primary = new HTableDescriptor("primary");
primary.addFamily(new HColumnDescriptor(family));
// add our coprocessor
primary.addCoprocessor(SendToOtherTableCoprocessor.class.getName());
HTableDescriptor other = new HTableDescriptor(otherTable);
other.addFamily(new HColumnDescriptor(family));
UTIL.startMiniCluster();
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.createTable(primary);
admin.createTable(other);
admin.close();
HTable table = new HTable(UTIL.getConfiguration(), "primary");
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
table.put(p);
table.flushCommits();
table.close();
HTable target = new HTable(UTIL.getConfiguration(), otherTable);
assertTrue("Didn't complete update to target table!", completed);
assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
target.close();
UTIL.shutdownMiniCluster();
}
/**
* Count the number of keyvalue in the table. Scans all possible versions
* @param table table to scan
* @return number of keyvalues over all rows in the table
* @throws IOException
*/
private int getKeyValueCount(HTable table) throws IOException {
Scan scan = new Scan();
scan.setMaxVersions(Integer.MAX_VALUE - 1);
ResultScanner results = table.getScanner(scan);
int count = 0;
for (Result res : results) {
count += res.list().size();
System.out.println(count + ") " + res);
}
results.close();
return count;
}
}